Bug 1938156 - vendor abseil-cpp from libwebrtc/third_party 6d7b7fe2c23 r=dbaker

Differential Revision: https://phabricator.services.mozilla.com/D233927
This commit is contained in:
Michael Froman
2025-01-13 21:17:53 +00:00
parent 0e538da443
commit 73bb69af0b
323 changed files with 18809 additions and 6390 deletions

View File

@@ -58,10 +58,12 @@ component("absl") {
group("absl_component_deps") {
if (false) {
public_deps = [
"//third_party/abseil-cpp/absl/algorithm",
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/base",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/base:log_severity",
"//third_party/abseil-cpp/absl/base:no_destructor",
"//third_party/abseil-cpp/absl/base:nullability",
"//third_party/abseil-cpp/absl/base:prefetch",
@@ -83,7 +85,13 @@ group("absl_component_deps") {
"//third_party/abseil-cpp/absl/hash",
"//third_party/abseil-cpp/absl/log:absl_check",
"//third_party/abseil-cpp/absl/log:absl_log",
"//third_party/abseil-cpp/absl/log:absl_vlog_is_on",
"//third_party/abseil-cpp/absl/log:die_if_null",
"//third_party/abseil-cpp/absl/log:globals",
"//third_party/abseil-cpp/absl/log:initialize",
"//third_party/abseil-cpp/absl/log:log_entry",
"//third_party/abseil-cpp/absl/log:log_sink",
"//third_party/abseil-cpp/absl/log:log_sink_registry",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/meta:type_traits",
"//third_party/abseil-cpp/absl/numeric:bits",
@@ -122,6 +130,16 @@ group("absl_component_deps") {
]
}
# Dependencies that preferably shouldn't be public in chromium.
public_deps += [
# public in abseil, but deprecated in chromium, yet used.
"//third_party/abseil-cpp/absl/base:dynamic_annotations",
# base/logging.cc uses this non-public absl dependency while there no better
# alternative to inject abort hook. See notes in `base/logging.cc`.
"//third_party/abseil-cpp/absl/base:raw_logging_internal",
]
if (is_component_build) {
public_deps += [ ":absl_full_deps" ]
}
@@ -154,7 +172,16 @@ source_set("absl_full") {
}
if (build_with_chromium) {
visibility = [ "//third_party/fuzztest:*" ]
visibility = [
# Used by some test executables, but not by anything that is a
# part of Chrome.
"//components/optimization_guide/internal/*",
"//third_party/fuzztest:*",
# GoogleTest doesn't actually need absl_full, but this makes gn check
# happier.
"//third_party/googletest:*",
]
}
}
@@ -283,7 +310,10 @@ if (absl_build_tests) {
"absl/base:config_test",
"absl/base:no_destructor_test",
"absl/base:nullability_test",
"absl/base:poison_test",
"absl/base:prefetch_test",
"absl/base:tracing_internal_strong_test",
"absl/base:tracing_internal_weak_test",
"absl/cleanup:cleanup_test",
# TODO(mbonadei): Fix issue with EXPECT_DEATH and uncomment.
@@ -294,6 +324,8 @@ if (absl_build_tests) {
"absl/container:flat_hash_set_test",
"absl/container:hash_function_defaults_test",
"absl/container:inlined_vector_test",
"absl/container:node_hash_map_test",
"absl/container:node_hash_set_test",
"absl/container:node_slot_policy_test",
"absl/container:raw_hash_set_allocator_test",
"absl/container:raw_hash_set_test",
@@ -302,7 +334,11 @@ if (absl_build_tests) {
"absl/crc:crc_cord_state_test",
"absl/crc:crc_memcpy_test",
"absl/crc:non_temporal_memcpy_test",
"absl/debugging:bounded_utf8_length_sequence_test",
"absl/debugging:decode_rust_punycode_test",
"absl/debugging:demangle_rust_test",
"absl/debugging:stacktrace_test",
"absl/debugging:utf8_for_code_point_test",
"absl/flags:flag_test",
"absl/functional:any_invocable_test",
"absl/functional:function_ref_test",
@@ -331,6 +367,9 @@ if (absl_build_tests) {
"absl/numeric:int128_test",
"absl/profiling:exponential_biased_test",
"absl/profiling:periodic_sampler_test",
"absl/random:distributions_test",
"absl/random:mock_distributions_test",
"absl/status:status_matchers_test",
"absl/status:status_test",
"absl/status:statusor_test",
"absl/strings:ascii_test",
@@ -362,10 +401,15 @@ if (absl_build_tests) {
"absl/strings:str_format_test",
"absl/strings:str_replace_test",
"absl/strings:string_view_test",
"absl/synchronization:barrier_test",
"absl/synchronization:graphcycles_test",
"absl/synchronization:kernel_timeout_internal_test",
"absl/synchronization:mutex_test",
"absl/synchronization:per_thread_sem_test",
"absl/synchronization:waiter_test",
"absl/time:time_test",
"absl/types:optional_test",
"absl/types:span_test",
"absl/types:variant_test",
"absl/utility:if_constexpr_test",
"//third_party/googletest:gtest_main",

View File

@@ -27,6 +27,8 @@ set(ABSL_INTERNAL_DLL_FILES
"base/internal/low_level_scheduling.h"
"base/internal/nullability_impl.h"
"base/internal/per_thread_tls.h"
"base/internal/poison.cc"
"base/internal/poison.h"
"base/prefetch.h"
"base/internal/pretty_function.h"
"base/internal/raw_logging.cc"
@@ -46,6 +48,8 @@ set(ABSL_INTERNAL_DLL_FILES
"base/internal/thread_identity.h"
"base/internal/throw_delegate.cc"
"base/internal/throw_delegate.h"
"base/internal/tracing.cc"
"base/internal/tracing.h"
"base/internal/tsan_mutex_interface.h"
"base/internal/unaligned_access.h"
"base/internal/unscaledcycleclock.cc"
@@ -65,6 +69,7 @@ set(ABSL_INTERNAL_DLL_FILES
"cleanup/internal/cleanup.h"
"container/btree_map.h"
"container/btree_set.h"
"container/hash_container_defaults.h"
"container/fixed_array.h"
"container/flat_hash_map.h"
"container/flat_hash_set.h"
@@ -120,8 +125,13 @@ set(ABSL_INTERNAL_DLL_FILES
"debugging/symbolize.h"
"debugging/internal/address_is_readable.cc"
"debugging/internal/address_is_readable.h"
"debugging/internal/bounded_utf8_length_sequence.h"
"debugging/internal/decode_rust_punycode.cc"
"debugging/internal/decode_rust_punycode.h"
"debugging/internal/demangle.cc"
"debugging/internal/demangle.h"
"debugging/internal/demangle_rust.cc"
"debugging/internal/demangle_rust.h"
"debugging/internal/elf_mem_image.cc"
"debugging/internal/elf_mem_image.h"
"debugging/internal/examine_stack.cc"
@@ -130,6 +140,8 @@ set(ABSL_INTERNAL_DLL_FILES
"debugging/internal/stack_consumption.h"
"debugging/internal/stacktrace_config.h"
"debugging/internal/symbolize.h"
"debugging/internal/utf8_for_code_point.cc"
"debugging/internal/utf8_for_code_point.h"
"debugging/internal/vdso_support.cc"
"debugging/internal/vdso_support.h"
"functional/any_invocable.h"
@@ -310,7 +322,6 @@ set(ABSL_INTERNAL_DLL_FILES
"strings/internal/string_constant.h"
"strings/internal/stringify_sink.h"
"strings/internal/stringify_sink.cc"
"strings/internal/has_absl_stringify.h"
"strings/has_absl_stringify.h"
"strings/has_ostream_operator.h"
"strings/match.cc"
@@ -436,9 +447,47 @@ set(ABSL_INTERNAL_DLL_FILES
"debugging/leak_check.cc"
)
if(NOT MSVC)
list(APPEND ABSL_INTERNAL_DLL_FILES
"flags/commandlineflag.cc"
"flags/commandlineflag.h"
"flags/config.h"
"flags/declare.h"
"flags/flag.h"
"flags/internal/commandlineflag.cc"
"flags/internal/commandlineflag.h"
"flags/internal/flag.cc"
"flags/internal/flag.h"
"flags/internal/parse.h"
"flags/internal/path_util.h"
"flags/internal/private_handle_accessor.cc"
"flags/internal/private_handle_accessor.h"
"flags/internal/program_name.cc"
"flags/internal/program_name.h"
"flags/internal/registry.h"
"flags/internal/sequence_lock.h"
"flags/internal/usage.cc"
"flags/internal/usage.h"
"flags/marshalling.cc"
"flags/marshalling.h"
"flags/parse.cc"
"flags/parse.h"
"flags/reflection.cc"
"flags/reflection.h"
"flags/usage.cc"
"flags/usage.h"
"flags/usage_config.cc"
"flags/usage_config.h"
"log/flags.cc"
"log/flags.h"
"log/internal/flags.h"
)
endif()
set(ABSL_INTERNAL_DLL_TARGETS
"absl_check"
"absl_log"
"absl_vlog_is_on"
"algorithm"
"algorithm_container"
"any"
@@ -504,6 +553,7 @@ set(ABSL_INTERNAL_DLL_TARGETS
"log_internal_check_op"
"log_internal_conditions"
"log_internal_config"
"log_internal_fnmatch"
"log_internal_format"
"log_internal_globals"
"log_internal_log_impl"
@@ -583,6 +633,7 @@ set(ABSL_INTERNAL_DLL_TARGETS
"strerror"
"strings"
"strings_internal"
"string_view"
"symbolize"
"synchronization"
"thread_pool"
@@ -593,8 +644,30 @@ set(ABSL_INTERNAL_DLL_TARGETS
"type_traits"
"utility"
"variant"
"vlog_config_internal"
"vlog_is_on"
)
if(NOT MSVC)
list(APPEND ABSL_INTERNAL_DLL_TARGETS
"flags"
"flags_commandlineflag"
"flags_commandlineflag_internal"
"flags_config"
"flags_internal"
"flags_marshalling"
"flags_parse"
"flags_path_util"
"flags_private_handle_accessor"
"flags_program_name"
"flags_reflection"
"flags_usage"
"flags_usage_internal"
"log_internal_flags"
"log_flags"
)
endif()
set(ABSL_INTERNAL_TEST_DLL_FILES
"hash/hash_testing.h"
"log/scoped_mock_log.cc"
@@ -607,6 +680,9 @@ set(ABSL_INTERNAL_TEST_DLL_FILES
"random/internal/mock_overload_set.h"
"random/mocking_bit_gen.h"
"random/mock_distributions.h"
"status/status_matchers.h"
"status/internal/status_matchers.cc"
"status/internal/status_matchers.h"
"strings/cordz_test_helpers.h"
"strings/cord_test_helpers.h"
)
@@ -619,6 +695,7 @@ set(ABSL_INTERNAL_TEST_DLL_TARGETS
"random_internal_distribution_test_util"
"random_internal_mock_overload_set"
"scoped_mock_log"
"status_matchers"
)
include(CheckCXXSourceCompiles)
@@ -667,12 +744,7 @@ function(absl_internal_dll_contains)
STRING(REGEX REPLACE "^absl::" "" _target ${ABSL_INTERNAL_DLL_TARGET})
list(FIND
ABSL_INTERNAL_DLL_TARGETS
"${_target}"
_index)
if (${_index} GREATER -1)
if (_target IN_LIST ABSL_INTERNAL_DLL_TARGETS)
set(${ABSL_INTERNAL_DLL_OUTPUT} 1 PARENT_SCOPE)
else()
set(${ABSL_INTERNAL_DLL_OUTPUT} 0 PARENT_SCOPE)
@@ -689,12 +761,7 @@ function(absl_internal_test_dll_contains)
STRING(REGEX REPLACE "^absl::" "" _target ${ABSL_INTERNAL_TEST_DLL_TARGET})
list(FIND
ABSL_INTERNAL_TEST_DLL_TARGETS
"${_target}"
_index)
if (${_index} GREATER -1)
if (_target IN_LIST ABSL_INTERNAL_TEST_DLL_TARGETS)
set(${ABSL_INTERNAL_TEST_DLL_OUTPUT} 1 PARENT_SCOPE)
else()
set(${ABSL_INTERNAL_TEST_DLL_OUTPUT} 0 PARENT_SCOPE)
@@ -746,7 +813,12 @@ function(absl_make_dll)
else()
set(_dll "abseil_dll")
set(_dll_files ${ABSL_INTERNAL_DLL_FILES})
set(_dll_libs "")
set(_dll_libs
Threads::Threads
# TODO(#1495): Use $<LINK_LIBRARY:FRAMEWORK,CoreFoundation> once our
# minimum CMake version >= 3.24
$<$<PLATFORM_ID:Darwin>:-Wl,-framework,CoreFoundation>
)
set(_dll_compile_definitions "")
set(_dll_includes "")
set(_dll_consume "ABSL_CONSUME_DLL")
@@ -764,7 +836,10 @@ function(absl_make_dll)
${_dll_libs}
${ABSL_DEFAULT_LINKOPTS}
)
set_property(TARGET ${_dll} PROPERTY LINKER_LANGUAGE "CXX")
set_target_properties(${_dll} PROPERTIES
LINKER_LANGUAGE "CXX"
SOVERSION ${ABSL_SOVERSION}
)
target_include_directories(
${_dll}
PUBLIC

View File

@@ -186,16 +186,16 @@ function(absl_cc_library)
endif()
endif()
endforeach()
set(skip_next_cflag OFF)
foreach(cflag ${ABSL_CC_LIB_COPTS})
if(skip_next_cflag)
set(skip_next_cflag OFF)
elseif(${cflag} MATCHES "^-Xarch_")
# Strip out the CMake-specific `SHELL:` prefix, which is used to construct
# a group of space-separated options.
# https://cmake.org/cmake/help/v3.30/command/target_compile_options.html#option-de-duplication
string(REGEX REPLACE "^SHELL:" "" cflag "${cflag}")
if(${cflag} MATCHES "^-Xarch_")
# An -Xarch_ flag implies that its successor only applies to the
# specified platform. Filter both of them out before the successor
# reaches the "^-m" filter.
set(skip_next_cflag ON)
elseif(${cflag} MATCHES "^(-Wno|/wd)")
# specified platform. Such option groups are each specified in a single
# `SHELL:`-prefixed string in the COPTS list, which we simply ignore.
elseif(${cflag} MATCHES "^(-Wno-|/wd)")
# These flags are needed to suppress warnings that might fire in our headers.
set(PC_CFLAGS "${PC_CFLAGS} ${cflag}")
elseif(${cflag} MATCHES "^(-W|/w[1234eo])")
@@ -258,6 +258,13 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
elseif(_build_type STREQUAL "static" OR _build_type STREQUAL "shared")
add_library(${_NAME} "")
target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS})
if(APPLE)
set_target_properties(${_NAME} PROPERTIES
INSTALL_RPATH "@loader_path")
elseif(UNIX)
set_target_properties(${_NAME} PROPERTIES
INSTALL_RPATH "$ORIGIN")
endif()
target_link_libraries(${_NAME}
PUBLIC ${ABSL_CC_LIB_DEPS}
PRIVATE
@@ -306,7 +313,7 @@ Cflags: -I\${includedir}${PC_CFLAGS}\n")
if(ABSL_ENABLE_INSTALL)
set_target_properties(${_NAME} PROPERTIES
OUTPUT_NAME "absl_${_NAME}"
SOVERSION 0
SOVERSION "${ABSL_SOVERSION}"
)
endif()
else()

View File

@@ -1,4 +1,4 @@
cmake_minimum_required(VERSION 3.10)
cmake_minimum_required(VERSION 3.16)
project(googletest-external NONE)

View File

@@ -39,7 +39,7 @@ section of your executable or of your library.<br>
Here is a short CMakeLists.txt example of an application project using Abseil.
```cmake
cmake_minimum_required(VERSION 3.10)
cmake_minimum_required(VERSION 3.16)
project(my_app_project)
# Pick the C++ standard to compile with.
@@ -62,7 +62,7 @@ will control Abseil library targets) is set to at least that minimum. For
example:
```cmake
cmake_minimum_required(VERSION 3.10)
cmake_minimum_required(VERSION 3.16)
project(my_lib_project)
# Leave C++ standard up to the root application, so set it only if this is the

View File

@@ -15,7 +15,7 @@
# A simple CMakeLists.txt for testing cmake installation
cmake_minimum_required(VERSION 3.10)
cmake_minimum_required(VERSION 3.16)
project(absl_cmake_testing CXX)
add_executable(simple simple.cc)

View File

@@ -22,7 +22,8 @@ set -euox pipefail
absl_dir=/abseil-cpp
absl_build_dir=/buildfs
googletest_builddir=/googletest_builddir
project_dir="${absl_dir}"/CMake/install_test_project
googletest_archive="googletest-${ABSL_GOOGLETEST_VERSION}.tar.gz"
project_dir="${absl_dir}/CMake/install_test_project"
project_build_dir=/buildfs/project-build
build_shared_libs="OFF"
@@ -33,9 +34,9 @@ fi
# Build and install GoogleTest
mkdir "${googletest_builddir}"
pushd "${googletest_builddir}"
curl -L "${ABSL_GOOGLETEST_DOWNLOAD_URL}" --output "${ABSL_GOOGLETEST_COMMIT}".zip
unzip "${ABSL_GOOGLETEST_COMMIT}".zip
pushd "googletest-${ABSL_GOOGLETEST_COMMIT}"
curl -L "${ABSL_GOOGLETEST_DOWNLOAD_URL}" --output "${googletest_archive}"
tar -xz -f "${googletest_archive}"
pushd "googletest-${ABSL_GOOGLETEST_VERSION}"
mkdir build
pushd build
cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS="${build_shared_libs}" ..

View File

@@ -15,43 +15,8 @@
#
# https://github.com/google/oss-policies-info/blob/main/foundational-cxx-support-matrix.md
# As of 2022-09-06, CMake 3.10 is the minimum supported version.
cmake_minimum_required(VERSION 3.10)
# Compiler id for Apple Clang is now AppleClang.
if (POLICY CMP0025)
cmake_policy(SET CMP0025 NEW)
endif (POLICY CMP0025)
# if command can use IN_LIST
if (POLICY CMP0057)
cmake_policy(SET CMP0057 NEW)
endif (POLICY CMP0057)
# Project version variables are the empty string if version is unspecified
if (POLICY CMP0048)
cmake_policy(SET CMP0048 NEW)
endif (POLICY CMP0048)
# Honor the GTest_ROOT variable if specified
if (POLICY CMP0074)
cmake_policy(SET CMP0074 NEW)
endif (POLICY CMP0074)
# option() honor variables
if (POLICY CMP0077)
cmake_policy(SET CMP0077 NEW)
endif (POLICY CMP0077)
# Allow the user to specify the MSVC runtime
if (POLICY CMP0091)
cmake_policy(SET CMP0091 NEW)
endif (POLICY CMP0091)
# try_compile() honors the CMAKE_CXX_STANDARD value
if (POLICY CMP0067)
cmake_policy(SET CMP0067 NEW)
endif (POLICY CMP0067)
# As of 2024-07-01, CMake 3.16 is the minimum supported version.
cmake_minimum_required(VERSION 3.16)
# Allow the user to specify the CMAKE_MSVC_DEBUG_INFORMATION_FORMAT
if (POLICY CMP0141)
@@ -59,6 +24,7 @@ if (POLICY CMP0141)
endif (POLICY CMP0141)
project(absl LANGUAGES CXX)
set(ABSL_SOVERSION 0)
include(CTest)
# Output directory is correct by default for most build setups. However, when
@@ -75,6 +41,10 @@ else()
option(ABSL_ENABLE_INSTALL "Enable install rule" ON)
endif()
set(CMAKE_INSTALL_RPATH "$ORIGIN")
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH ON)
set(CMAKE_BUILD_RPATH_USE_ORIGIN ON)
option(ABSL_PROPAGATE_CXX_STD
"Use CMake C++ standard meta features (e.g. cxx_std_14) that propagate to targets that link to Abseil"
OFF) # TODO: Default to ON for CMake 3.8 and greater.
@@ -91,6 +61,15 @@ list(APPEND CMAKE_MODULE_PATH
${CMAKE_CURRENT_LIST_DIR}/absl/copts
)
option(ABSL_MSVC_STATIC_RUNTIME
"Link static runtime libraries"
OFF)
if(ABSL_MSVC_STATIC_RUNTIME)
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
else()
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>DLL")
endif()
include(CMakePackageConfigHelpers)
include(GNUInstallDirs)
include(AbseilDll)
@@ -149,6 +128,14 @@ set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH
"If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout."
)
option(ABSL_BUILD_MONOLITHIC_SHARED_LIBS
"Build Abseil as a single shared library (always enabled for Windows)"
OFF
)
if(NOT BUILD_SHARED_LIBS AND ABSL_BUILD_MONOLITHIC_SHARED_LIBS)
message(WARNING "Not building a shared library because BUILD_SHARED_LIBS is not set. Ignoring ABSL_BUILD_MONOLITHIC_SHARED_LIBS.")
endif()
if((BUILD_TESTING AND ABSL_BUILD_TESTING) OR ABSL_BUILD_TEST_HELPERS)
if (ABSL_USE_EXTERNAL_GOOGLETEST)
if (ABSL_FIND_GOOGLETEST)
@@ -271,7 +258,7 @@ if(ABSL_ENABLE_INSTALL)
ABSL_INTERNAL_OPTIONS_H_PINNED
"${ABSL_INTERNAL_OPTIONS_H_CONTENTS}")
file(WRITE "${CMAKE_BINARY_DIR}/options-pinned.h" "${ABSL_INTERNAL_OPTIONS_H_PINNED}")
file(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/options-pinned.h" CONTENT "${ABSL_INTERNAL_OPTIONS_H_PINNED}")
install(FILES "${CMAKE_BINARY_DIR}/options-pinned.h"
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/absl/base

View File

@@ -35,8 +35,8 @@ bazel_dep(name = "google_benchmark",
dev_dependency = True)
bazel_dep(name = "googletest",
version = "1.14.0.bcr.1",
version = "1.15.2",
repo_name = "com_google_googletest")
bazel_dep(name = "platforms",
version = "0.0.8")
version = "0.0.10")

View File

@@ -3,8 +3,8 @@ Short Name: absl
URL: https://github.com/abseil/abseil-cpp
License: Apache 2.0
License File: LICENSE
Version: 0
Revision: c0bec1a74864cf6a685ea226478b451120379fbd
Version: N/A
Revision: 69c46839620967c6ffb99b656174d1c544e60a50
Security Critical: yes
Shipped: yes

View File

@@ -1,4 +1,4 @@
# ./mach python dom/media/webrtc/third_party_build/vendor-libwebrtc.py --from-local .moz-vendoring/abseil-cpp --commit mozpatches abseil-cpp
abseil-cpp updated from .moz-vendoring/abseil-cpp commit mozpatches on 2025-01-09T19:49:05.257540+00:00.
abseil-cpp updated from .moz-vendoring/abseil-cpp commit mozpatches on 2025-01-10T02:10:03.058591+00:00.
# base of lastest vendoring
ba05f98ec72
6d7b7fe2c23

View File

@@ -20,20 +20,21 @@ load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
# GoogleTest/GoogleMock framework. Used by most unit-tests.
http_archive(
name = "com_google_googletest",
sha256 = "8ad598c73ad796e0d8280b082cebd82a630d73e73cd3c70057938a6501bba5d7",
strip_prefix = "googletest-1.14.0",
# Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh and
# ci/windows_msvc_cmake.bat.
urls = ["https://github.com/google/googletest/archive/refs/tags/v1.14.0.tar.gz"],
name = "com_google_googletest",
sha256 = "7b42b4d6ed48810c5362c265a17faebe90dc2373c885e5216439d37927f02926",
strip_prefix = "googletest-1.15.2",
# Keep this URL in sync with the version in ci/cmake_common.sh and
# ci/windows_msvc_cmake.bat.
urls = ["https://github.com/google/googletest/releases/download/v1.15.2/googletest-1.15.2.tar.gz"],
)
# RE2 (the regular expression library used by GoogleTest)
http_archive(
name = "com_googlesource_code_re2",
sha256 = "828341ad08524618a626167bd320b0c2acc97bd1c28eff693a9ea33a7ed2a85f",
strip_prefix = "re2-2023-11-01",
urls = ["https://github.com/google/re2/releases/download/2023-11-01/re2-2023-11-01.zip"],
sha256 = "eb2df807c781601c14a260a507a5bb4509be1ee626024cb45acbd57cb9d4032b",
strip_prefix = "re2-2024-07-02",
urls = ["https://github.com/google/re2/releases/download/2024-07-02/re2-2024-07-02.tar.gz"],
repo_mapping = {"@abseil-cpp": "@com_google_absl"},
)
# Google benchmark.
@@ -46,14 +47,17 @@ http_archive(
# Bazel Skylib.
http_archive(
name = "bazel_skylib",
sha256 = "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94",
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz"],
name = "bazel_skylib",
sha256 = "cd55a062e763b9349921f0f5db8c3933288dc8ba4f76dd9416aac68acee3cb94",
urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.5.0/bazel-skylib-1.5.0.tar.gz"],
)
# Bazel platform rules.
http_archive(
name = "platforms",
sha256 = "8150406605389ececb6da07cbcb509d5637a3ab9a24bc69b1101531367d89d74",
urls = ["https://github.com/bazelbuild/platforms/releases/download/0.0.8/platforms-0.0.8.tar.gz"],
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
"https://github.com/bazelbuild/platforms/releases/download/0.0.10/platforms-0.0.10.tar.gz",
],
sha256 = "218efe8ee736d26a3572663b374a253c012b716d8af0c07e842e82f238a0a7ee",
)

View File

@@ -86,6 +86,8 @@ template("absl_source_set") {
"//libassistant/*",
# Not built into Chrome.
# TODO(crbug.com/357147248): Remove after these targets depend
# on absl_full instead.
"//components/optimization_guide/internal/*",
]
} else {

View File

@@ -36,9 +36,9 @@ add_subdirectory(time)
add_subdirectory(types)
add_subdirectory(utility)
if (${ABSL_BUILD_DLL})
if (ABSL_BUILD_DLL)
absl_make_dll()
if (${ABSL_BUILD_TEST_HELPERS})
if ((BUILD_TESTING AND ABSL_BUILD_TESTING) OR ABSL_BUILD_TEST_HELPERS)
absl_make_dll(TEST ON)
endif()
endif()

View File

@@ -44,9 +44,14 @@ Pod::Spec.new do |s|
'ALWAYS_SEARCH_USER_PATHS' => 'NO',
}
s.ios.deployment_target = '9.0'
s.osx.deployment_target = '10.10'
s.osx.deployment_target = '10.11'
s.tvos.deployment_target = '9.0'
s.watchos.deployment_target = '2.0'
s.subspec 'xcprivacy' do |ss|
ss.resource_bundles = {
ss.module_name => 'PrivacyInfo.xcprivacy',
}
end
"""
# Rule object representing the rule of Bazel BUILD.
@@ -191,6 +196,12 @@ def write_podspec_rule(f, rule, depth):
name = get_spec_name(dep.replace(":", "/"))
f.write("{indent}{var}.dependency '{dep}'\n".format(
indent=indent, var=spec_var, dep=name))
# Writes dependency to xcprivacy
f.write(
"{indent}{var}.dependency '{dep}'\n".format(
indent=indent, var=spec_var, dep="abseil/xcprivacy"
)
)
def write_indented_list(f, leading, values):

View File

@@ -65,6 +65,7 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":algorithm",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:nullability",
"//absl/meta:type_traits",
@@ -79,6 +80,7 @@ cc_test(
deps = [
":container",
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/memory",
"//absl/types:span",

View File

@@ -13,8 +13,9 @@ absl_source_set("container") {
public = [ "container.h" ]
deps = [
":algorithm",
"//third_party/abseil-cpp/absl/base:nullability",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/base:nullability",
"//third_party/abseil-cpp/absl/meta:type_traits",
]
}
@@ -32,6 +33,7 @@ absl_test("container_test") {
deps = [
":container",
"//third_party/abseil-cpp/absl/base",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/types:span",

View File

@@ -48,6 +48,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::algorithm
absl::config
absl::core_headers
absl::meta
absl::nullability
@@ -64,6 +65,7 @@ absl_cc_test(
DEPS
absl::algorithm_container
absl::base
absl::config
absl::core_headers
absl::memory
absl::span

View File

@@ -53,8 +53,8 @@ using std::rotate;
// n = (`last` - `first`) comparisons. A linear search over short containers
// may be faster than a binary search, even when the container is sorted.
template <typename InputIterator, typename EqualityComparable>
bool linear_search(InputIterator first, InputIterator last,
const EqualityComparable& value) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool linear_search(
InputIterator first, InputIterator last, const EqualityComparable& value) {
return std::find(first, last, value) != last;
}

View File

@@ -14,11 +14,9 @@
#include "absl/algorithm/algorithm.h"
#include <algorithm>
#include <list>
#include <array>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
@@ -47,4 +45,16 @@ TEST_F(LinearSearchTest, linear_searchConst) {
absl::linear_search(const_container->begin(), const_container->end(), 4));
}
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
TEST_F(LinearSearchTest, Constexpr) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::linear_search(kArray.begin(), kArray.end(), 3));
static_assert(!absl::linear_search(kArray.begin(), kArray.end(), 4));
}
#endif // defined(ABSL_INTERNAL_CPLUSPLUS_LANG) &&
// ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
} // namespace

View File

@@ -44,6 +44,7 @@
#include <cassert>
#include <iterator>
#include <numeric>
#include <random>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
@@ -51,6 +52,7 @@
#include <vector>
#include "absl/algorithm/algorithm.h"
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/base/nullability.h"
#include "absl/meta/type_traits.h"
@@ -92,17 +94,17 @@ using ContainerPointerType =
// using std::end;
// std::foo(begin(c), end(c));
// becomes
// std::foo(container_algorithm_internal::begin(c),
// container_algorithm_internal::end(c));
// std::foo(container_algorithm_internal::c_begin(c),
// container_algorithm_internal::c_end(c));
// These are meant for internal use only.
template <typename C>
ContainerIter<C> c_begin(C& c) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 ContainerIter<C> c_begin(C& c) {
return begin(c);
}
template <typename C>
ContainerIter<C> c_end(C& c) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 ContainerIter<C> c_end(C& c) {
return end(c);
}
@@ -130,7 +132,8 @@ struct IsUnorderedContainer<std::unordered_set<Key, Hash, KeyEqual, Allocator>>
// Container-based version of absl::linear_search() for performing a linear
// search within a container.
template <typename C, typename EqualityComparable>
bool c_linear_search(const C& c, EqualityComparable&& value) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_linear_search(
const C& c, EqualityComparable&& value) {
return linear_search(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<EqualityComparable>(value));
@@ -145,8 +148,9 @@ bool c_linear_search(const C& c, EqualityComparable&& value) {
// Container-based version of the <iterator> `std::distance()` function to
// return the number of elements within a container.
template <typename C>
container_algorithm_internal::ContainerDifferenceType<const C> c_distance(
const C& c) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerDifferenceType<const C>
c_distance(const C& c) {
return std::distance(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
@@ -160,7 +164,7 @@ container_algorithm_internal::ContainerDifferenceType<const C> c_distance(
// Container-based version of the <algorithm> `std::all_of()` function to
// test if all elements within a container satisfy a condition.
template <typename C, typename Pred>
bool c_all_of(const C& c, Pred&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_all_of(const C& c, Pred&& pred) {
return std::all_of(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
@@ -171,7 +175,7 @@ bool c_all_of(const C& c, Pred&& pred) {
// Container-based version of the <algorithm> `std::any_of()` function to
// test if any element in a container fulfills a condition.
template <typename C, typename Pred>
bool c_any_of(const C& c, Pred&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_any_of(const C& c, Pred&& pred) {
return std::any_of(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
@@ -182,7 +186,7 @@ bool c_any_of(const C& c, Pred&& pred) {
// Container-based version of the <algorithm> `std::none_of()` function to
// test if no elements in a container fulfill a condition.
template <typename C, typename Pred>
bool c_none_of(const C& c, Pred&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_none_of(const C& c, Pred&& pred) {
return std::none_of(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
@@ -193,7 +197,8 @@ bool c_none_of(const C& c, Pred&& pred) {
// Container-based version of the <algorithm> `std::for_each()` function to
// apply a function to a container's elements.
template <typename C, typename Function>
decay_t<Function> c_for_each(C&& c, Function&& f) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 decay_t<Function> c_for_each(C&& c,
Function&& f) {
return std::for_each(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Function>(f));
@@ -204,18 +209,33 @@ decay_t<Function> c_for_each(C&& c, Function&& f) {
// Container-based version of the <algorithm> `std::find()` function to find
// the first element containing the passed value within a container value.
template <typename C, typename T>
container_algorithm_internal::ContainerIter<C> c_find(C& c, T&& value) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C>
c_find(C& c, T&& value) {
return std::find(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<T>(value));
}
// c_contains()
//
// Container-based version of the <algorithm> `std::ranges::contains()` C++23
// function to search a container for a value.
template <typename Sequence, typename T>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_contains(const Sequence& sequence,
T&& value) {
return absl::c_find(sequence, std::forward<T>(value)) !=
container_algorithm_internal::c_end(sequence);
}
// c_find_if()
//
// Container-based version of the <algorithm> `std::find_if()` function to find
// the first element in a container matching the given condition.
template <typename C, typename Pred>
container_algorithm_internal::ContainerIter<C> c_find_if(C& c, Pred&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C>
c_find_if(C& c, Pred&& pred) {
return std::find_if(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
@@ -226,8 +246,9 @@ container_algorithm_internal::ContainerIter<C> c_find_if(C& c, Pred&& pred) {
// Container-based version of the <algorithm> `std::find_if_not()` function to
// find the first element in a container not matching the given condition.
template <typename C, typename Pred>
container_algorithm_internal::ContainerIter<C> c_find_if_not(C& c,
Pred&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C>
c_find_if_not(C& c, Pred&& pred) {
return std::find_if_not(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
@@ -238,8 +259,9 @@ container_algorithm_internal::ContainerIter<C> c_find_if_not(C& c,
// Container-based version of the <algorithm> `std::find_end()` function to
// find the last subsequence within a container.
template <typename Sequence1, typename Sequence2>
container_algorithm_internal::ContainerIter<Sequence1> c_find_end(
Sequence1& sequence, Sequence2& subsequence) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence1>
c_find_end(Sequence1& sequence, Sequence2& subsequence) {
return std::find_end(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(subsequence),
@@ -249,8 +271,10 @@ container_algorithm_internal::ContainerIter<Sequence1> c_find_end(
// Overload of c_find_end() for using a predicate evaluation other than `==` as
// the function's test condition.
template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
container_algorithm_internal::ContainerIter<Sequence1> c_find_end(
Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence1>
c_find_end(Sequence1& sequence, Sequence2& subsequence,
BinaryPredicate&& pred) {
return std::find_end(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(subsequence),
@@ -264,8 +288,9 @@ container_algorithm_internal::ContainerIter<Sequence1> c_find_end(
// find the first element within the container that is also within the options
// container.
template <typename C1, typename C2>
container_algorithm_internal::ContainerIter<C1> c_find_first_of(C1& container,
C2& options) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C1>
c_find_first_of(C1& container, C2& options) {
return std::find_first_of(container_algorithm_internal::c_begin(container),
container_algorithm_internal::c_end(container),
container_algorithm_internal::c_begin(options),
@@ -275,8 +300,9 @@ container_algorithm_internal::ContainerIter<C1> c_find_first_of(C1& container,
// Overload of c_find_first_of() for using a predicate evaluation other than
// `==` as the function's test condition.
template <typename C1, typename C2, typename BinaryPredicate>
container_algorithm_internal::ContainerIter<C1> c_find_first_of(
C1& container, C2& options, BinaryPredicate&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<C1>
c_find_first_of(C1& container, C2& options, BinaryPredicate&& pred) {
return std::find_first_of(container_algorithm_internal::c_begin(container),
container_algorithm_internal::c_end(container),
container_algorithm_internal::c_begin(options),
@@ -289,8 +315,9 @@ container_algorithm_internal::ContainerIter<C1> c_find_first_of(
// Container-based version of the <algorithm> `std::adjacent_find()` function to
// find equal adjacent elements within a container.
template <typename Sequence>
container_algorithm_internal::ContainerIter<Sequence> c_adjacent_find(
Sequence& sequence) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence>
c_adjacent_find(Sequence& sequence) {
return std::adjacent_find(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
@@ -298,8 +325,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_adjacent_find(
// Overload of c_adjacent_find() for using a predicate evaluation other than
// `==` as the function's test condition.
template <typename Sequence, typename BinaryPredicate>
container_algorithm_internal::ContainerIter<Sequence> c_adjacent_find(
Sequence& sequence, BinaryPredicate&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence>
c_adjacent_find(Sequence& sequence, BinaryPredicate&& pred) {
return std::adjacent_find(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<BinaryPredicate>(pred));
@@ -310,8 +338,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_adjacent_find(
// Container-based version of the <algorithm> `std::count()` function to count
// values that match within a container.
template <typename C, typename T>
container_algorithm_internal::ContainerDifferenceType<const C> c_count(
const C& c, T&& value) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerDifferenceType<const C>
c_count(const C& c, T&& value) {
return std::count(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<T>(value));
@@ -322,8 +351,9 @@ container_algorithm_internal::ContainerDifferenceType<const C> c_count(
// Container-based version of the <algorithm> `std::count_if()` function to
// count values matching a condition within a container.
template <typename C, typename Pred>
container_algorithm_internal::ContainerDifferenceType<const C> c_count_if(
const C& c, Pred&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerDifferenceType<const C>
c_count_if(const C& c, Pred&& pred) {
return std::count_if(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<Pred>(pred));
@@ -335,8 +365,9 @@ container_algorithm_internal::ContainerDifferenceType<const C> c_count_if(
// return the first element where two ordered containers differ. Applies `==` to
// the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)).
template <typename C1, typename C2>
container_algorithm_internal::ContainerIterPairType<C1, C2> c_mismatch(C1& c1,
C2& c2) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIterPairType<C1, C2>
c_mismatch(C1& c1, C2& c2) {
return std::mismatch(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
@@ -347,8 +378,9 @@ container_algorithm_internal::ContainerIterPairType<C1, C2> c_mismatch(C1& c1,
// the function's test condition. Applies `pred`to the first N elements of `c1`
// and `c2`, where N = min(size(c1), size(c2)).
template <typename C1, typename C2, typename BinaryPredicate>
container_algorithm_internal::ContainerIterPairType<C1, C2> c_mismatch(
C1& c1, C2& c2, BinaryPredicate pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIterPairType<C1, C2>
c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) {
return std::mismatch(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
@@ -360,7 +392,7 @@ container_algorithm_internal::ContainerIterPairType<C1, C2> c_mismatch(
// Container-based version of the <algorithm> `std::equal()` function to
// test whether two containers are equal.
template <typename C1, typename C2>
bool c_equal(const C1& c1, const C2& c2) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_equal(const C1& c1, const C2& c2) {
return std::equal(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
@@ -370,7 +402,8 @@ bool c_equal(const C1& c1, const C2& c2) {
// Overload of c_equal() for using a predicate evaluation other than `==` as
// the function's test condition.
template <typename C1, typename C2, typename BinaryPredicate>
bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_equal(const C1& c1, const C2& c2,
BinaryPredicate&& pred) {
return std::equal(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
@@ -383,7 +416,8 @@ bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) {
// Container-based version of the <algorithm> `std::is_permutation()` function
// to test whether a container is a permutation of another.
template <typename C1, typename C2>
bool c_is_permutation(const C1& c1, const C2& c2) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_permutation(const C1& c1,
const C2& c2) {
return std::is_permutation(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
@@ -393,7 +427,8 @@ bool c_is_permutation(const C1& c1, const C2& c2) {
// Overload of c_is_permutation() for using a predicate evaluation other than
// `==` as the function's test condition.
template <typename C1, typename C2, typename BinaryPredicate>
bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_is_permutation(
const C1& c1, const C2& c2, BinaryPredicate&& pred) {
return std::is_permutation(container_algorithm_internal::c_begin(c1),
container_algorithm_internal::c_end(c1),
container_algorithm_internal::c_begin(c2),
@@ -406,8 +441,9 @@ bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) {
// Container-based version of the <algorithm> `std::search()` function to search
// a container for a subsequence.
template <typename Sequence1, typename Sequence2>
container_algorithm_internal::ContainerIter<Sequence1> c_search(
Sequence1& sequence, Sequence2& subsequence) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence1>
c_search(Sequence1& sequence, Sequence2& subsequence) {
return std::search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(subsequence),
@@ -417,8 +453,10 @@ container_algorithm_internal::ContainerIter<Sequence1> c_search(
// Overload of c_search() for using a predicate evaluation other than
// `==` as the function's test condition.
template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
container_algorithm_internal::ContainerIter<Sequence1> c_search(
Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence1>
c_search(Sequence1& sequence, Sequence2& subsequence,
BinaryPredicate&& pred) {
return std::search(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
container_algorithm_internal::c_begin(subsequence),
@@ -426,13 +464,35 @@ container_algorithm_internal::ContainerIter<Sequence1> c_search(
std::forward<BinaryPredicate>(pred));
}
// c_contains_subrange()
//
// Container-based version of the <algorithm> `std::ranges::contains_subrange()`
// C++23 function to search a container for a subsequence.
template <typename Sequence1, typename Sequence2>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_contains_subrange(
Sequence1& sequence, Sequence2& subsequence) {
return absl::c_search(sequence, subsequence) !=
container_algorithm_internal::c_end(sequence);
}
// Overload of c_contains_subrange() for using a predicate evaluation other than
// `==` as the function's test condition.
template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 bool c_contains_subrange(
Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) {
return absl::c_search(sequence, subsequence,
std::forward<BinaryPredicate>(pred)) !=
container_algorithm_internal::c_end(sequence);
}
// c_search_n()
//
// Container-based version of the <algorithm> `std::search_n()` function to
// search a container for the first sequence of N elements.
template <typename Sequence, typename Size, typename T>
container_algorithm_internal::ContainerIter<Sequence> c_search_n(
Sequence& sequence, Size count, T&& value) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence>
c_search_n(Sequence& sequence, Size count, T&& value) {
return std::search_n(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), count,
std::forward<T>(value));
@@ -442,8 +502,10 @@ container_algorithm_internal::ContainerIter<Sequence> c_search_n(
// `==` as the function's test condition.
template <typename Sequence, typename Size, typename T,
typename BinaryPredicate>
container_algorithm_internal::ContainerIter<Sequence> c_search_n(
Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
container_algorithm_internal::ContainerIter<Sequence>
c_search_n(Sequence& sequence, Size count, T&& value,
BinaryPredicate&& pred) {
return std::search_n(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence), count,
std::forward<T>(value),
@@ -1500,8 +1562,9 @@ c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) {
// to return an iterator pointing to the element with the smallest value, using
// `operator<` to make the comparisons.
template <typename Sequence>
container_algorithm_internal::ContainerIter<Sequence> c_min_element(
Sequence& sequence) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIter<Sequence>
c_min_element(Sequence& sequence) {
return std::min_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
@@ -1509,8 +1572,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_min_element(
// Overload of c_min_element() for performing a `comp` comparison other than
// `operator<`.
template <typename Sequence, typename LessThan>
container_algorithm_internal::ContainerIter<Sequence> c_min_element(
Sequence& sequence, LessThan&& comp) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIter<Sequence>
c_min_element(Sequence& sequence, LessThan&& comp) {
return std::min_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
@@ -1522,8 +1586,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_min_element(
// to return an iterator pointing to the element with the largest value, using
// `operator<` to make the comparisons.
template <typename Sequence>
container_algorithm_internal::ContainerIter<Sequence> c_max_element(
Sequence& sequence) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIter<Sequence>
c_max_element(Sequence& sequence) {
return std::max_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence));
}
@@ -1531,8 +1596,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_max_element(
// Overload of c_max_element() for performing a `comp` comparison other than
// `operator<`.
template <typename Sequence, typename LessThan>
container_algorithm_internal::ContainerIter<Sequence> c_max_element(
Sequence& sequence, LessThan&& comp) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIter<Sequence>
c_max_element(Sequence& sequence, LessThan&& comp) {
return std::max_element(container_algorithm_internal::c_begin(sequence),
container_algorithm_internal::c_end(sequence),
std::forward<LessThan>(comp));
@@ -1545,8 +1611,9 @@ container_algorithm_internal::ContainerIter<Sequence> c_max_element(
// smallest and largest values, respectively, using `operator<` to make the
// comparisons.
template <typename C>
container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
C& c) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIterPairType<C, C>
c_minmax_element(C& c) {
return std::minmax_element(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c));
}
@@ -1554,8 +1621,9 @@ container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
// Overload of c_minmax_element() for performing `comp` comparisons other than
// `operator<`.
template <typename C, typename LessThan>
container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
C& c, LessThan&& comp) {
ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
container_algorithm_internal::ContainerIterPairType<C, C>
c_minmax_element(C& c, LessThan&& comp) {
return std::minmax_element(container_algorithm_internal::c_begin(c),
container_algorithm_internal::c_end(c),
std::forward<LessThan>(comp));

View File

@@ -15,6 +15,7 @@
#include "absl/algorithm/container.h"
#include <algorithm>
#include <array>
#include <functional>
#include <initializer_list>
#include <iterator>
@@ -31,6 +32,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/memory/memory.h"
#include "absl/types/span.h"
@@ -113,6 +115,11 @@ TEST_F(NonMutatingTest, FindReturnsCorrectType) {
absl::c_find(absl::implicit_cast<const std::list<int>&>(sequence_), 3);
}
TEST_F(NonMutatingTest, Contains) {
EXPECT_TRUE(absl::c_contains(container_, 3));
EXPECT_FALSE(absl::c_contains(container_, 4));
}
TEST_F(NonMutatingTest, FindIf) { absl::c_find_if(container_, Predicate); }
TEST_F(NonMutatingTest, FindIfNot) {
@@ -305,6 +312,17 @@ TEST_F(NonMutatingTest, SearchWithPredicate) {
absl::c_search(vector_, sequence_, BinPredicate);
}
TEST_F(NonMutatingTest, ContainsSubrange) {
EXPECT_TRUE(absl::c_contains_subrange(sequence_, vector_));
EXPECT_TRUE(absl::c_contains_subrange(vector_, sequence_));
EXPECT_TRUE(absl::c_contains_subrange(array_, sequence_));
}
TEST_F(NonMutatingTest, ContainsSubrangeWithPredicate) {
EXPECT_TRUE(absl::c_contains_subrange(sequence_, vector_, Equals));
EXPECT_TRUE(absl::c_contains_subrange(vector_, sequence_, Equals));
}
TEST_F(NonMutatingTest, SearchN) { absl::c_search_n(sequence_, 3, 1); }
TEST_F(NonMutatingTest, SearchNWithPredicate) {
@@ -1144,4 +1162,258 @@ TEST(MutatingTest, PermutationOperations) {
EXPECT_EQ(initial, permuted);
}
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
TEST(ConstexprTest, Distance) {
// Works at compile time with constexpr containers.
static_assert(absl::c_distance(std::array<int, 3>()) == 3);
}
TEST(ConstexprTest, MinElement) {
constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(*absl::c_min_element(kArray) == 1);
}
TEST(ConstexprTest, MinElementWithPredicate) {
constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(*absl::c_min_element(kArray, std::greater<int>()) == 3);
}
TEST(ConstexprTest, MaxElement) {
constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(*absl::c_max_element(kArray) == 3);
}
TEST(ConstexprTest, MaxElementWithPredicate) {
constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(*absl::c_max_element(kArray, std::greater<int>()) == 1);
}
TEST(ConstexprTest, MinMaxElement) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
constexpr auto kMinMaxPair = absl::c_minmax_element(kArray);
static_assert(*kMinMaxPair.first == 1);
static_assert(*kMinMaxPair.second == 3);
}
TEST(ConstexprTest, MinMaxElementWithPredicate) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
constexpr auto kMinMaxPair =
absl::c_minmax_element(kArray, std::greater<int>());
static_assert(*kMinMaxPair.first == 3);
static_assert(*kMinMaxPair.second == 1);
}
#endif // defined(ABSL_INTERNAL_CPLUSPLUS_LANG) &&
// ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
TEST(ConstexprTest, LinearSearch) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_linear_search(kArray, 3));
static_assert(!absl::c_linear_search(kArray, 4));
}
TEST(ConstexprTest, AllOf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(!absl::c_all_of(kArray, [](int x) { return x > 1; }));
static_assert(absl::c_all_of(kArray, [](int x) { return x > 0; }));
}
TEST(ConstexprTest, AnyOf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_any_of(kArray, [](int x) { return x > 2; }));
static_assert(!absl::c_any_of(kArray, [](int x) { return x > 5; }));
}
TEST(ConstexprTest, NoneOf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(!absl::c_none_of(kArray, [](int x) { return x > 2; }));
static_assert(absl::c_none_of(kArray, [](int x) { return x > 5; }));
}
TEST(ConstexprTest, ForEach) {
static constexpr std::array<int, 3> kArray = [] {
std::array<int, 3> array = {1, 2, 3};
absl::c_for_each(array, [](int& x) { x += 1; });
return array;
}();
static_assert(kArray == std::array{2, 3, 4});
}
TEST(ConstexprTest, Find) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_find(kArray, 1) == kArray.begin());
static_assert(absl::c_find(kArray, 4) == kArray.end());
}
TEST(ConstexprTest, Contains) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_contains(kArray, 1));
static_assert(!absl::c_contains(kArray, 4));
}
TEST(ConstexprTest, FindIf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_find_if(kArray, [](int x) { return x > 2; }) ==
kArray.begin() + 2);
static_assert(absl::c_find_if(kArray, [](int x) { return x > 5; }) ==
kArray.end());
}
TEST(ConstexprTest, FindIfNot) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_find_if_not(kArray, [](int x) { return x > 1; }) ==
kArray.begin());
static_assert(absl::c_find_if_not(kArray, [](int x) { return x > 0; }) ==
kArray.end());
}
TEST(ConstexprTest, FindEnd) {
static constexpr std::array<int, 5> kHaystack = {1, 2, 3, 2, 3};
static constexpr std::array<int, 2> kNeedle = {2, 3};
static_assert(absl::c_find_end(kHaystack, kNeedle) == kHaystack.begin() + 3);
}
TEST(ConstexprTest, FindFirstOf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_find_first_of(kArray, kArray) == kArray.begin());
}
TEST(ConstexprTest, AdjacentFind) {
static constexpr std::array<int, 4> kArray = {1, 2, 2, 3};
static_assert(absl::c_adjacent_find(kArray) == kArray.begin() + 1);
}
TEST(ConstexprTest, AdjacentFindWithPredicate) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_adjacent_find(kArray, std::less<int>()) ==
kArray.begin());
}
TEST(ConstexprTest, Count) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_count(kArray, 1) == 1);
static_assert(absl::c_count(kArray, 2) == 1);
static_assert(absl::c_count(kArray, 3) == 1);
static_assert(absl::c_count(kArray, 4) == 0);
}
TEST(ConstexprTest, CountIf) {
static constexpr std::array<int, 3> kArray = {1, 2, 3};
static_assert(absl::c_count_if(kArray, [](int x) { return x > 0; }) == 3);
static_assert(absl::c_count_if(kArray, [](int x) { return x > 1; }) == 2);
}
TEST(ConstexprTest, Mismatch) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_mismatch(kArray1, kArray2) ==
std::pair{kArray1.end(), kArray2.end()});
static_assert(absl::c_mismatch(kArray1, kArray3) ==
std::pair{kArray1.begin(), kArray3.begin()});
}
TEST(ConstexprTest, MismatchWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_mismatch(kArray1, kArray2, std::not_equal_to<int>()) ==
std::pair{kArray1.begin(), kArray2.begin()});
static_assert(absl::c_mismatch(kArray1, kArray3, std::not_equal_to<int>()) ==
std::pair{kArray1.end(), kArray3.end()});
}
TEST(ConstexprTest, Equal) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_equal(kArray1, kArray2));
static_assert(!absl::c_equal(kArray1, kArray3));
}
TEST(ConstexprTest, EqualWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(!absl::c_equal(kArray1, kArray2, std::not_equal_to<int>()));
static_assert(absl::c_equal(kArray1, kArray3, std::not_equal_to<int>()));
}
TEST(ConstexprTest, IsPermutation) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {3, 2, 1};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_is_permutation(kArray1, kArray2));
static_assert(!absl::c_is_permutation(kArray1, kArray3));
}
TEST(ConstexprTest, IsPermutationWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {3, 2, 1};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_is_permutation(kArray1, kArray2, std::equal_to<int>()));
static_assert(
!absl::c_is_permutation(kArray1, kArray3, std::equal_to<int>()));
}
TEST(ConstexprTest, Search) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_search(kArray1, kArray2) == kArray1.begin());
static_assert(absl::c_search(kArray1, kArray3) == kArray1.end());
}
TEST(ConstexprTest, SearchWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_search(kArray1, kArray2, std::not_equal_to<int>()) ==
kArray1.end());
static_assert(absl::c_search(kArray1, kArray3, std::not_equal_to<int>()) ==
kArray1.begin());
}
TEST(ConstexprTest, ContainsSubrange) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(absl::c_contains_subrange(kArray1, kArray2));
static_assert(!absl::c_contains_subrange(kArray1, kArray3));
}
TEST(ConstexprTest, ContainsSubrangeWithPredicate) {
static constexpr std::array<int, 3> kArray1 = {1, 2, 3};
static constexpr std::array<int, 3> kArray2 = {1, 2, 3};
static constexpr std::array<int, 3> kArray3 = {2, 3, 4};
static_assert(
!absl::c_contains_subrange(kArray1, kArray2, std::not_equal_to<>()));
static_assert(
absl::c_contains_subrange(kArray1, kArray3, std::not_equal_to<>()));
}
TEST(ConstexprTest, SearchN) {
static constexpr std::array<int, 4> kArray = {1, 2, 2, 3};
static_assert(absl::c_search_n(kArray, 1, 1) == kArray.begin());
static_assert(absl::c_search_n(kArray, 2, 2) == kArray.begin() + 1);
static_assert(absl::c_search_n(kArray, 1, 4) == kArray.end());
}
TEST(ConstexprTest, SearchNWithPredicate) {
static constexpr std::array<int, 4> kArray = {1, 2, 2, 3};
static_assert(absl::c_search_n(kArray, 1, 1, std::not_equal_to<int>()) ==
kArray.begin() + 1);
static_assert(absl::c_search_n(kArray, 2, 2, std::not_equal_to<int>()) ==
kArray.end());
static_assert(absl::c_search_n(kArray, 1, 4, std::not_equal_to<int>()) ==
kArray.begin());
}
#endif // defined(ABSL_INTERNAL_CPLUSPLUS_LANG) &&
// ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
} // namespace

View File

@@ -74,7 +74,10 @@ cc_library(
hdrs = ["no_destructor.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [":config"],
deps = [
":config",
":nullability",
],
)
cc_library(
@@ -84,6 +87,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":config",
":core_headers",
"//absl/meta:type_traits",
],
@@ -333,6 +337,18 @@ cc_test(
],
)
cc_test(
name = "c_header_test",
srcs = ["c_header_test.c"],
tags = [
"no_test_wasm",
],
deps = [
":config",
":core_headers",
],
)
cc_library(
name = "throw_delegate",
srcs = ["internal/throw_delegate.cc"],
@@ -847,6 +863,41 @@ cc_test(
],
)
cc_library(
name = "poison",
srcs = [
"internal/poison.cc",
],
hdrs = ["internal/poison.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [
":config",
":core_headers",
":malloc_internal",
],
)
cc_test(
name = "poison_test",
size = "small",
timeout = "short",
srcs = [
"internal/poison_test.cc",
],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":config",
":poison",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "unique_small_name_test",
size = "small",
@@ -875,3 +926,44 @@ cc_test(
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "tracing_internal",
srcs = ["internal/tracing.cc"],
hdrs = ["internal/tracing.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = [
"//absl:__subpackages__",
],
deps = [
"//absl/base:config",
"//absl/base:core_headers",
],
)
cc_test(
name = "tracing_internal_weak_test",
srcs = ["internal/tracing_weak_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":tracing_internal",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
cc_test(
name = "tracing_internal_strong_test",
srcs = ["internal/tracing_strong_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":config",
":core_headers",
":tracing_internal",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)

View File

@@ -6,7 +6,7 @@ import("//third_party/abseil-cpp/absl.gni")
absl_source_set("atomic_hook") {
public = [ "internal/atomic_hook.h" ]
public_deps = [
deps = [
":config",
":core_headers",
]
@@ -15,14 +15,14 @@ absl_source_set("atomic_hook") {
absl_source_set("errno_saver") {
public = [ "internal/errno_saver.h" ]
public_deps = [ ":config" ]
deps = [ ":config" ]
visibility = [ "//third_party/abseil-cpp/absl/*" ]
}
absl_source_set("log_severity") {
sources = [ "log_severity.cc" ]
public = [ "log_severity.h" ]
public_deps = [
deps = [
":config",
":core_headers",
]
@@ -30,13 +30,17 @@ absl_source_set("log_severity") {
absl_source_set("no_destructor") {
public = [ "no_destructor.h" ]
deps = [ ":config" ]
deps = [
":config",
":nullability",
]
}
absl_source_set("nullability") {
sources = [ "internal/nullability_impl.h" ]
public = [ "nullability.h" ]
deps = [
":config",
":core_headers",
"//third_party/abseil-cpp/absl/meta:type_traits",
]
@@ -45,14 +49,17 @@ absl_source_set("nullability") {
absl_source_set("raw_logging_internal") {
sources = [ "internal/raw_logging.cc" ]
public = [ "internal/raw_logging.h" ]
public_deps = [
deps = [
":atomic_hook",
":config",
":core_headers",
":errno_saver",
":log_severity",
]
visibility = [ "//third_party/abseil-cpp/absl/*" ]
visibility = [
"//third_party/abseil-cpp:absl_component_deps",
"//third_party/abseil-cpp/absl/*",
]
if (moz_webrtc_build) {
public_deps -= [ ":errno_saver" ]
}
@@ -100,7 +107,10 @@ absl_source_set("dynamic_annotations") {
# Abseil's dynamic annotations are only visible inside Abseil because
# their usage is deprecated in Chromium (see README.chromium for more info).
visibility = [ "//third_party/abseil-cpp/absl/*" ]
visibility = [
"//third_party/abseil-cpp:absl_component_deps",
"//third_party/abseil-cpp/absl/*",
]
deps = [
":config",
":core_headers",
@@ -116,7 +126,7 @@ absl_source_set("core_headers") {
"port.h",
"thread_annotations.h",
]
public_deps = [ ":config" ]
deps = [ ":config" ]
}
absl_source_set("malloc_internal") {
@@ -125,7 +135,7 @@ absl_source_set("malloc_internal") {
"internal/direct_mmap.h",
"internal/low_level_alloc.h",
]
public_deps = [
deps = [
":base",
":base_internal",
":config",
@@ -143,7 +153,7 @@ absl_source_set("base_internal") {
"internal/invoke.h",
"internal/scheduling_mode.h",
]
public_deps = [
deps = [
":config",
"//third_party/abseil-cpp/absl/meta:type_traits",
]
@@ -174,7 +184,7 @@ absl_source_set("base") {
# TODO(mbonadei): The bazel file has:
# "-DEFAULTLIB:advapi32.lib"
# understand if this is needed here as well.
public_deps = [
deps = [
":atomic_hook",
":base_internal",
":config",
@@ -189,10 +199,20 @@ absl_source_set("base") {
]
}
# This should be an executable() more than a test.
# It cannot be enabled because it defines its own main() function.
# absl_test("c_header_test") {
# sources = ["c_header_test.c"]
# deps = [
# ":config",
# ":core_headers",
# ]
# }
absl_source_set("throw_delegate") {
sources = [ "internal/throw_delegate.cc" ]
public = [ "internal/throw_delegate.h" ]
public_deps = [
deps = [
":config",
":raw_logging_internal",
]
@@ -202,7 +222,7 @@ absl_source_set("throw_delegate") {
absl_source_set("exception_testing") {
testonly = true
public = [ "internal/exception_testing.h" ]
public_deps = [ ":config" ]
deps = [ ":config" ]
visibility = [ "//third_party/abseil-cpp/absl/*" ]
}
@@ -256,7 +276,7 @@ absl_source_set("endian") {
"internal/endian.h",
"internal/unaligned_access.h",
]
public_deps = [
deps = [
":base",
":config",
":core_headers",
@@ -268,16 +288,18 @@ absl_source_set("scoped_set_env") {
testonly = true
public = [ "internal/scoped_set_env.h" ]
sources = [ "internal/scoped_set_env.cc" ]
public_deps = [ ":config" ]
deps = [ ":raw_logging_internal" ]
deps = [
":config",
":raw_logging_internal",
]
visibility = [ "//third_party/abseil-cpp/absl/*" ]
}
absl_source_set("strerror") {
sources = [ "internal/strerror.cc" ]
public = [ "internal/strerror.h" ]
public_deps = [ ":config" ]
deps = [
":config",
":core_headers",
":errno_saver",
]
@@ -289,7 +311,7 @@ absl_source_set("strerror") {
absl_source_set("fast_type_id") {
public = [ "internal/fast_type_id.h" ]
public_deps = [ ":config" ]
deps = [ ":config" ]
visibility = [ "//third_party/abseil-cpp/absl/*" ]
}
@@ -306,6 +328,47 @@ absl_test("prefetch_test") {
deps = [ ":prefetch" ]
}
absl_source_set("poison") {
public = [ "internal/poison.h" ]
sources = [ "internal/poison.cc" ]
deps = [
":config",
":core_headers",
":malloc_internal",
]
}
absl_test("poison_test") {
sources = [ "internal/poison_test.cc" ]
deps = [
":config",
":poison",
]
}
absl_source_set("tracing_internal") {
public = [ "internal/tracing.h" ]
sources = [ "internal/tracing.cc" ]
deps = [
":config",
":core_headers",
]
}
absl_test("tracing_internal_weak_test") {
sources = [ "internal/tracing_weak_test.cc" ]
deps = [ ":tracing_internal" ]
}
absl_test("tracing_internal_strong_test") {
sources = [ "internal/tracing_strong_test.cc" ]
deps = [
":config",
":core_headers",
":tracing_internal",
]
}
absl_test("config_test") {
sources = [ "config_test.cc" ]
deps = [

View File

@@ -62,6 +62,7 @@ absl_cc_library(
"no_destructor.h"
DEPS
absl::config
absl::nullability
COPTS
${ABSL_DEFAULT_COPTS}
)
@@ -74,6 +75,7 @@ absl_cc_library(
SRCS
"internal/nullability_impl.h"
DEPS
absl::config
absl::core_headers
absl::type_traits
COPTS
@@ -735,3 +737,72 @@ absl_cc_test(
absl::optional
GTest::gtest_main
)
absl_cc_library(
NAME
poison
SRCS
"internal/poison.cc"
HDRS
"internal/poison.h"
COPTS
${ABSL_DEFAULT_COPTS}
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::config
absl::core_headers
absl::malloc_internal
)
absl_cc_test(
NAME
poison_test
SRCS
"internal/poison_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::config
absl::poison
GTest::gtest_main
)
absl_cc_library(
NAME
tracing_internal
HDRS
"internal/tracing.h"
SRCS
"internal/tracing.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::base
)
absl_cc_test(
NAME
tracing_internal_weak_test
SRCS
"internal/tracing_weak_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::tracing_internal
GTest::gtest_main
)
absl_cc_test(
NAME
tracing_internal_strong_test
SRCS
"internal/tracing_strong_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::tracing_internal
GTest::gtest_main
)

View File

@@ -133,12 +133,14 @@
// Tags a function as weak for the purposes of compilation and linking.
// Weak attributes did not work properly in LLVM's Windows backend before
// 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598
// for further information.
// for further information. Weak attributes do not work across DLL boundary.
// The MinGW compiler doesn't complain about the weak attribute until the link
// step, presumably because Windows doesn't use ELF binaries.
#if (ABSL_HAVE_ATTRIBUTE(weak) || \
(defined(__GNUC__) && !defined(__clang__))) && \
(!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \
#if (ABSL_HAVE_ATTRIBUTE(weak) || \
(defined(__GNUC__) && !defined(__clang__))) && \
(!defined(_WIN32) || \
(defined(__clang__) && __clang_major__ >= 9 && \
!defined(ABSL_BUILD_DLL) && !defined(ABSL_CONSUME_DLL))) && \
!defined(__MINGW32__)
#undef ABSL_ATTRIBUTE_WEAK
#define ABSL_ATTRIBUTE_WEAK __attribute__((weak))
@@ -195,6 +197,9 @@
// ABSL_ATTRIBUTE_NORETURN
//
// Tells the compiler that a given function never returns.
//
// Deprecated: Prefer the `[[noreturn]]` attribute standardized by C++11 over
// this macro.
#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__))
#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn))
#elif defined(_MSC_VER)
@@ -702,6 +707,11 @@
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
_Pragma("GCC diagnostic pop")
#elif defined(_MSC_VER)
#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \
_Pragma("warning(push)") _Pragma("warning(disable: 4996)")
#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
_Pragma("warning(pop)")
#else
#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
@@ -808,14 +818,43 @@
//
// See also the upstream documentation:
// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
// https://learn.microsoft.com/en-us/cpp/code-quality/c26816?view=msvc-170
#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound)
#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]]
#elif ABSL_HAVE_CPP_ATTRIBUTE(msvc::lifetimebound)
#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[msvc::lifetimebound]]
#elif ABSL_HAVE_ATTRIBUTE(lifetimebound)
#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound))
#else
#define ABSL_ATTRIBUTE_LIFETIME_BOUND
#endif
// ABSL_INTERNAL_ATTRIBUTE_VIEW indicates that a type acts like a view i.e. a
// raw (non-owning) pointer. This enables diagnoses similar to those enabled by
// ABSL_ATTRIBUTE_LIFETIME_BOUND.
//
// See the following links for details:
// https://reviews.llvm.org/D64448
// https://lists.llvm.org/pipermail/cfe-dev/2018-November/060355.html
#if ABSL_HAVE_CPP_ATTRIBUTE(gsl::Pointer)
#define ABSL_INTERNAL_ATTRIBUTE_VIEW [[gsl::Pointer]]
#else
#define ABSL_INTERNAL_ATTRIBUTE_VIEW
#endif
// ABSL_INTERNAL_ATTRIBUTE_OWNER indicates that a type acts like a smart
// (owning) pointer. This enables diagnoses similar to those enabled by
// ABSL_ATTRIBUTE_LIFETIME_BOUND.
//
// See the following links for details:
// https://reviews.llvm.org/D64448
// https://lists.llvm.org/pipermail/cfe-dev/2018-November/060355.html
#if ABSL_HAVE_CPP_ATTRIBUTE(gsl::Owner)
#define ABSL_INTERNAL_ATTRIBUTE_OWNER [[gsl::Owner]]
#else
#define ABSL_INTERNAL_ATTRIBUTE_OWNER
#endif
// ABSL_ATTRIBUTE_TRIVIAL_ABI
// Indicates that a type is "trivially relocatable" -- meaning it can be
// relocated without invoking the constructor/destructor, using a form of move

View File

@@ -0,0 +1,30 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifdef __cplusplus
#error This is a C compile test
#endif
// This test ensures that headers that are included in legacy C code are
// compatible with C. Abseil is a C++ library. We do not desire to expand C
// compatibility or keep C compatibility forever. This test only exists to
// ensure C compatibility until it is no longer required. Do not add new code
// that requires C compatibility.
#include "absl/base/attributes.h" // IWYU pragma: keep
#include "absl/base/config.h" // IWYU pragma: keep
#include "absl/base/optimization.h" // IWYU pragma: keep
#include "absl/base/policy_checks.h" // IWYU pragma: keep
#include "absl/base/port.h" // IWYU pragma: keep
int main() { return 0; }

View File

@@ -231,12 +231,11 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#endif
// ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
// We assume __thread is supported on Linux or Asylo when compiled with Clang or
// We assume __thread is supported on Linux when compiled with Clang or
// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined.
#ifdef ABSL_HAVE_TLS
#error ABSL_HAVE_TLS cannot be directly set
#elif (defined(__linux__) || defined(__ASYLO__)) && \
(defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
#elif (defined(__linux__)) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
#define ABSL_HAVE_TLS 1
#endif
@@ -275,52 +274,17 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
#endif
// ABSL_HAVE_THREAD_LOCAL
//
// DEPRECATED - `thread_local` is available on all supported platforms.
// Checks whether C++11's `thread_local` storage duration specifier is
// supported.
#ifdef ABSL_HAVE_THREAD_LOCAL
#error ABSL_HAVE_THREAD_LOCAL cannot be directly set
#elif defined(__APPLE__)
// Notes:
// * Xcode's clang did not support `thread_local` until version 8, and
// even then not for all iOS < 9.0.
// * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator
// targeting iOS 9.x.
// * Xcode 10 moves the deployment target check for iOS < 9.0 to link time
// making ABSL_HAVE_FEATURE unreliable there.
//
#if ABSL_HAVE_FEATURE(cxx_thread_local) && \
!(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0)
#else
#define ABSL_HAVE_THREAD_LOCAL 1
#endif
#else // !defined(__APPLE__)
#define ABSL_HAVE_THREAD_LOCAL 1
#endif
// There are platforms for which TLS should not be used even though the compiler
// makes it seem like it's supported (Android NDK < r12b for example).
// This is primarily because of linker problems and toolchain misconfiguration:
// Abseil does not intend to support this indefinitely. Currently, the newest
// toolchain that we intend to support that requires this behavior is the
// r11 NDK - allowing for a 5 year support window on that means this option
// is likely to be removed around June of 2021.
// TLS isn't supported until NDK r12b per
// https://developer.android.com/ndk/downloads/revision_history.html
// Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
// <android/ndk-version.h>. For NDK < r16, users should define these macros,
// e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11.
#if defined(__ANDROID__) && defined(__clang__)
#if __has_include(<android/ndk-version.h>)
#include <android/ndk-version.h>
#endif // __has_include(<android/ndk-version.h>)
#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
defined(__NDK_MINOR__) && \
((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
#undef ABSL_HAVE_TLS
#undef ABSL_HAVE_THREAD_LOCAL
#endif
#endif // defined(__ANDROID__) && defined(__clang__)
// ABSL_HAVE_INTRINSIC_INT128
//
@@ -414,9 +378,9 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
defined(_AIX) || defined(__ros__) || defined(__native_client__) || \
defined(__asmjs__) || defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || \
defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \
defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \
defined(__QNX__) || defined(__VXWORKS__) || defined(__hexagon__)
defined(__sun) || defined(__myriad2__) || defined(__HAIKU__) || \
defined(__OpenBSD__) || defined(__NetBSD__) || defined(__QNX__) || \
defined(__VXWORKS__) || defined(__hexagon__)
#define ABSL_HAVE_MMAP 1
#endif
@@ -900,7 +864,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#error ABSL_INTERNAL_HAS_CXA_DEMANGLE cannot be directly set
#elif defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__))
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 0
#elif defined(__GNUC__) && !defined(__mips__)
#elif defined(__GNUC__)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
#elif defined(__clang__) && !defined(_MSC_VER)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
@@ -962,7 +926,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
// https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code
#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set
#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__)
#elif defined(__ARM_NEON) && !(defined(__NVCC__) && defined(__CUDACC__))
#define ABSL_INTERNAL_HAVE_ARM_NEON 1
#endif
@@ -977,6 +941,27 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
#define ABSL_HAVE_CONSTANT_EVALUATED 1
#endif
// ABSL_INTERNAL_CONSTEXPR_SINCE_CXXYY is used to conditionally define constexpr
// for different C++ versions.
//
// These macros are an implementation detail and will be unconditionally removed
// once the minimum supported C++ version catches up to a given version.
//
// For this reason, this symbol is considered INTERNAL and code outside of
// Abseil must not use it.
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17 constexpr
#else
#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX17
#endif
#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20 constexpr
#else
#define ABSL_INTERNAL_CONSTEXPR_SINCE_CXX20
#endif
// ABSL_INTERNAL_EMSCRIPTEN_VERSION combines Emscripten's three version macros
// into an integer that can be compared against.
#ifdef ABSL_INTERNAL_EMSCRIPTEN_VERSION

View File

@@ -249,25 +249,9 @@ ABSL_INTERNAL_END_EXTERN_C
#else // !defined(ABSL_HAVE_MEMORY_SANITIZER)
// TODO(rogeeff): remove this branch
#ifdef ABSL_HAVE_THREAD_SANITIZER
#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
do { \
(void)(address); \
(void)(size); \
} while (0)
#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
do { \
(void)(address); \
(void)(size); \
} while (0)
#else
#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
#endif
#endif // ABSL_HAVE_MEMORY_SANITIZER
// -------------------------------------------------------------------------

View File

@@ -19,10 +19,11 @@
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace nullability_internal {
// `IsNullabilityCompatible` checks whether its first argument is a class
@@ -101,6 +102,7 @@ using NullabilityUnknownImpl
= T;
} // namespace nullability_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_

View File

@@ -0,0 +1,84 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/poison.h"
#include <cstdlib>
#include "absl/base/config.h"
#include "absl/base/internal/direct_mmap.h"
#ifndef _WIN32
#include <unistd.h>
#endif
#if defined(ABSL_HAVE_ADDRESS_SANITIZER)
#include <sanitizer/asan_interface.h>
#elif defined(ABSL_HAVE_MEMORY_SANITIZER)
#include <sanitizer/msan_interface.h>
#elif defined(ABSL_HAVE_MMAP)
#include <sys/mman.h>
#endif
#if defined(_WIN32)
#include <windows.h>
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
size_t GetPageSize() {
#ifdef _WIN32
SYSTEM_INFO system_info;
GetSystemInfo(&system_info);
return system_info.dwPageSize;
#elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__)
return getpagesize();
#else
return static_cast<size_t>(sysconf(_SC_PAGESIZE));
#endif
}
} // namespace
void* InitializePoisonedPointerInternal() {
const size_t block_size = GetPageSize();
#if defined(ABSL_HAVE_ADDRESS_SANITIZER)
void* data = malloc(block_size);
ASAN_POISON_MEMORY_REGION(data, block_size);
#elif defined(ABSL_HAVE_MEMORY_SANITIZER)
void* data = malloc(block_size);
__msan_poison(data, block_size);
#elif defined(ABSL_HAVE_MMAP)
void* data = DirectMmap(nullptr, block_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (data == MAP_FAILED) return GetBadPointerInternal();
#elif defined(_WIN32)
void* data = VirtualAlloc(nullptr, block_size, MEM_RESERVE | MEM_COMMIT,
PAGE_NOACCESS);
if (data == nullptr) return GetBadPointerInternal();
#else
return GetBadPointerInternal();
#endif
// Return the middle of the block so that dereferences before and after the
// pointer will both crash.
return static_cast<char*>(data) + block_size / 2;
}
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl

View File

@@ -0,0 +1,59 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_POISON_H_
#define ABSL_BASE_INTERNAL_POISON_H_
#include <cstdint>
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
inline void* GetBadPointerInternal() {
// A likely bad pointer. Pointers are required to have high bits that are all
// zero or all one for certain 64-bit CPUs. This pointer value will hopefully
// cause a crash on dereference and also be clearly recognizable as invalid.
constexpr uint64_t kBadPtr = 0xBAD0BAD0BAD0BAD0;
auto ret = reinterpret_cast<void*>(static_cast<uintptr_t>(kBadPtr));
#ifndef _MSC_VER // MSVC doesn't support inline asm with `volatile`.
// Try to prevent the compiler from optimizing out the undefined behavior.
asm volatile("" : : "r"(ret) :); // NOLINT
#endif
return ret;
}
void* InitializePoisonedPointerInternal();
inline void* get_poisoned_pointer() {
#if defined(NDEBUG) && !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
!defined(ABSL_HAVE_MEMORY_SANITIZER)
// In optimized non-sanitized builds, avoid the function-local static because
// of the codegen and runtime cost.
return GetBadPointerInternal();
#else
// Non-optimized builds may use more robust implementation. Note that we can't
// use a static global because Chromium doesn't allow non-constinit globals.
static void* ptr = InitializePoisonedPointerInternal();
return ptr;
#endif
}
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_BASE_INTERNAL_POISON_H_

View File

@@ -0,0 +1,41 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/poison.h"
#include <iostream>
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
namespace {
TEST(PoisonTest, CrashesOnDereference) {
#ifdef __ANDROID__
GTEST_SKIP() << "On Android, poisoned pointer dereference times out instead "
"of crashing.";
#endif
int* poisoned_ptr = static_cast<int*>(get_poisoned_pointer());
EXPECT_DEATH_IF_SUPPORTED(std::cout << *poisoned_ptr, "");
EXPECT_DEATH_IF_SUPPORTED(std::cout << *(poisoned_ptr - 10), "");
EXPECT_DEATH_IF_SUPPORTED(std::cout << *(poisoned_ptr + 10), "");
}
} // namespace
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl

View File

@@ -89,7 +89,8 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
// acquisition was successful. If the lock was not acquired, false is
// returned. If this SpinLock is free at the time of the call, TryLock
// will return true with high probability.
inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
ABSL_MUST_USE_RESULT inline bool TryLock()
ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
bool res = TryLockImpl();
ABSL_TSAN_MUTEX_POST_LOCK(
@@ -120,7 +121,7 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
// Determine if the lock is held. When the lock is held by the invoking
// thread, true will always be returned. Intended to be used as
// CHECK(lock.IsHeld()).
inline bool IsHeld() const {
ABSL_MUST_USE_RESULT inline bool IsHeld() const {
return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
}
@@ -202,6 +203,15 @@ class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
//
// TODO(b/176172494): Use only [[nodiscard]] when baseline is raised.
// TODO(b/6695610): Remove forward declaration when #ifdef is no longer needed.
#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
class [[nodiscard]] SpinLockHolder;
#else
class ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_TRIVIAL_ABI SpinLockHolder;
#endif
class ABSL_SCOPED_LOCKABLE SpinLockHolder {
public:
inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)

View File

@@ -46,6 +46,10 @@
#include <rtems.h>
#endif
#if defined(__Fuchsia__)
#include <zircon/process.h>
#endif
#include <string.h>
#include <cassert>
@@ -461,6 +465,16 @@ pid_t GetTID() {
return reinterpret_cast<pid_t>(thread);
}
#elif defined(__Fuchsia__)
pid_t GetTID() {
// Use our thread handle as the TID, which should be unique within this
// process (but may not be globally unique). The handle value was chosen over
// a kernel object ID (KOID) because zx_handle_t (32-bits) can be cast to a
// pid_t type without loss of precision, but a zx_koid_t (64-bits) cannot.
return static_cast<pid_t>(zx_thread_self());
}
#else
// Fallback implementation of `GetTID` using `pthread_self`.

View File

@@ -130,7 +130,11 @@ struct PerThreadSynch {
};
// The instances of this class are allocated in NewThreadIdentity() with an
// alignment of PerThreadSynch::kAlignment.
// alignment of PerThreadSynch::kAlignment and never destroyed. Initialization
// should happen in OneTimeInitThreadIdentity().
//
// Instances may be reused by new threads - fields should be reset in
// ResetThreadIdentityBetweenReuse().
//
// NOTE: The layout of fields in this structure is critical, please do not
// add, remove, or modify the field placements without fully auditing the

View File

@@ -0,0 +1,39 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/base/internal/tracing.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceWait)(
const void*, ObjectKind) {}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceContinue)(
const void*, ObjectKind) {}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceSignal)(
const void*, ObjectKind) {}
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceObserved)(
const void*, ObjectKind) {}
} // extern "C"
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl

View File

@@ -0,0 +1,81 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_BASE_INTERNAL_TRACING_H_
#define ABSL_BASE_INTERNAL_TRACING_H_
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
// Well known Abseil object types that have causality.
enum class ObjectKind { kUnknown, kBlockingCounter, kNotification };
// `TraceWait` and `TraceContinue` record the start and end of a potentially
// blocking wait operation on `object`. `object` typically represents a higher
// level synchronization object such as `absl::Notification`.
void TraceWait(const void* object, ObjectKind kind);
void TraceContinue(const void* object, ObjectKind kind);
// `TraceSignal` records a signal on `object`.
void TraceSignal(const void* object, ObjectKind kind);
// `TraceObserved` records the non-blocking observation of a signaled object.
void TraceObserved(const void* object, ObjectKind kind);
// ---------------------------------------------------------------------------
// Weak implementation detail:
//
// We define the weak API as extern "C": in some build configurations we pass
// `--detect-odr-violations` to the gold linker. This causes it to flag weak
// symbol overrides as ODR violations. Because ODR only applies to C++ and not
// C, `--detect-odr-violations` ignores symbols not mangled with C++ names.
// By changing our extension points to be extern "C", we dodge this check.
// ---------------------------------------------------------------------------
extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceWait)(const void* object,
ObjectKind kind);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceContinue)(const void* object,
ObjectKind kind);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceSignal)(const void* object,
ObjectKind kind);
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceObserved)(const void* object,
ObjectKind kind);
} // extern "C"
inline void TraceWait(const void* object, ObjectKind kind) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceWait)(object, kind);
}
inline void TraceContinue(const void* object, ObjectKind kind) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceContinue)(object, kind);
}
inline void TraceSignal(const void* object, ObjectKind kind) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceSignal)(object, kind);
}
inline void TraceObserved(const void* object, ObjectKind kind) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceObserved)(object, kind);
}
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_BASE_INTERNAL_TRACING_H_

View File

@@ -0,0 +1,117 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <tuple>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/tracing.h"
#if ABSL_HAVE_ATTRIBUTE_WEAK
namespace {
using ::testing::ElementsAre;
using ::absl::base_internal::ObjectKind;
enum Function { kWait, kContinue, kSignal, kObserved };
using Record = std::tuple<Function, const void*, ObjectKind>;
thread_local std::vector<Record>* tls_records = nullptr;
} // namespace
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {
// Strong extern "C" implementation.
extern "C" {
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceWait)(const void* object,
ObjectKind kind) {
if (tls_records != nullptr) {
tls_records->push_back({kWait, object, kind});
}
}
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceContinue)(const void* object,
ObjectKind kind) {
if (tls_records != nullptr) {
tls_records->push_back({kContinue, object, kind});
}
}
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceSignal)(const void* object,
ObjectKind kind) {
if (tls_records != nullptr) {
tls_records->push_back({kSignal, object, kind});
}
}
void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceObserved)(const void* object,
ObjectKind kind) {
if (tls_records != nullptr) {
tls_records->push_back({kObserved, object, kind});
}
}
} // extern "C"
} // namespace base_internal
ABSL_NAMESPACE_END
} // namespace absl
namespace {
TEST(TracingInternal, InvokesStrongFunctionWithNullptr) {
std::vector<Record> records;
tls_records = &records;
auto kind = absl::base_internal::ObjectKind::kUnknown;
absl::base_internal::TraceWait(nullptr, kind);
absl::base_internal::TraceContinue(nullptr, kind);
absl::base_internal::TraceSignal(nullptr, kind);
absl::base_internal::TraceObserved(nullptr, kind);
tls_records = nullptr;
EXPECT_THAT(records, ElementsAre(Record{kWait, nullptr, kind},
Record{kContinue, nullptr, kind},
Record{kSignal, nullptr, kind},
Record{kObserved, nullptr, kind}));
}
TEST(TracingInternal, InvokesStrongFunctionWithObjectAddress) {
int object = 0;
std::vector<Record> records;
tls_records = &records;
auto kind = absl::base_internal::ObjectKind::kUnknown;
absl::base_internal::TraceWait(&object, kind);
absl::base_internal::TraceContinue(&object, kind);
absl::base_internal::TraceSignal(&object, kind);
absl::base_internal::TraceObserved(&object, kind);
tls_records = nullptr;
EXPECT_THAT(records, ElementsAre(Record{kWait, &object, kind},
Record{kContinue, &object, kind},
Record{kSignal, &object, kind},
Record{kObserved, &object, kind}));
}
} // namespace
#endif // ABSL_HAVE_ATTRIBUTE_WEAK

View File

@@ -0,0 +1,34 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "gtest/gtest.h"
#include "absl/base/internal/tracing.h"
namespace {
TEST(TracingInternal, HasDefaultImplementation) {
auto kind = absl::base_internal::ObjectKind::kUnknown;
absl::base_internal::TraceWait(nullptr, kind);
absl::base_internal::TraceContinue(nullptr, kind);
absl::base_internal::TraceSignal(nullptr, kind);
absl::base_internal::TraceObserved(nullptr, kind);
int object = 0;
absl::base_internal::TraceWait(&object, kind);
absl::base_internal::TraceContinue(&object, kind);
absl::base_internal::TraceSignal(&object, kind);
absl::base_internal::TraceObserved(&object, kind);
}
} // namespace

View File

@@ -121,18 +121,6 @@ double UnscaledCycleClock::Frequency() {
return aarch64_timer_frequency;
}
#elif defined(__riscv)
int64_t UnscaledCycleClock::Now() {
int64_t virtual_timer_value;
asm volatile("rdcycle %0" : "=r"(virtual_timer_value));
return virtual_timer_value;
}
double UnscaledCycleClock::Frequency() {
return base_internal::NominalCPUFrequency();
}
#elif defined(_M_IX86) || defined(_M_X64)
#pragma intrinsic(__rdtsc)

View File

@@ -21,8 +21,8 @@
// The following platforms have an implementation of a hardware counter.
#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \
defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC))
defined(__powerpc__) || defined(__ppc__) || defined(_M_IX86) || \
(defined(_M_X64) && !defined(_M_ARM64EC))
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
#else
#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
@@ -53,8 +53,8 @@
#if ABSL_USE_UNSCALED_CYCLECLOCK
// This macro can be used to test if UnscaledCycleClock::Frequency()
// is NominalCPUFrequency() on a particular platform.
#if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \
defined(_M_IX86) || defined(_M_X64))
#if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || \
defined(_M_X64))
#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
#endif
#endif

View File

@@ -174,4 +174,16 @@ ABSL_NAMESPACE_END
#define ABSL_DEPRECATE_AND_INLINE()
#endif
// Requires the compiler to prove that the size of the given object is at least
// the expected amount.
#if ABSL_HAVE_ATTRIBUTE(diagnose_if) && ABSL_HAVE_BUILTIN(__builtin_object_size)
#define ABSL_INTERNAL_NEED_MIN_SIZE(Obj, N) \
__attribute__((diagnose_if(__builtin_object_size(Obj, 0) < N, \
"object size provably too small " \
"(this would corrupt memory)", \
"error")))
#else
#define ABSL_INTERNAL_NEED_MIN_SIZE(Obj, N)
#endif
#endif // ABSL_BASE_MACROS_H_

View File

@@ -21,14 +21,13 @@
// such an object survives during program exit (and can be safely accessed at
// any time).
//
// Objects of such type, if constructed safely and under the right conditions,
// provide two main benefits over other alternatives:
//
// * Global objects not normally allowed due to concerns of destruction order
// (i.e. no "complex globals") can be safely allowed, provided that such
// objects can be constant initialized.
// * Function scope static objects can be optimized to avoid heap allocation,
// pointer chasing, and allow lazy construction.
// absl::NoDestructor<T> is useful when when a variable has static storage
// duration but its type has a non-trivial destructor. Global constructors are
// not recommended because of the C++'s static initialization order fiasco (See
// https://en.cppreference.com/w/cpp/language/siof). Global destructors are not
// allowed due to similar concerns about destruction ordering. Using
// absl::NoDestructor<T> as a function-local static prevents both of these
// issues.
//
// See below for complete details.
@@ -41,6 +40,7 @@
#include <utility>
#include "absl/base/config.h"
#include "absl/base/nullability.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -49,8 +49,8 @@ ABSL_NAMESPACE_BEGIN
//
// NoDestructor<T> is a wrapper around an object of type T that behaves as an
// object of type T but never calls T's destructor. NoDestructor<T> makes it
// safer and/or more efficient to use such objects in static storage contexts:
// as global or function scope static variables.
// safer and/or more efficient to use such objects in static storage contexts,
// ideally as function scope static variables.
//
// An instance of absl::NoDestructor<T> has similar type semantics to an
// instance of T:
@@ -61,9 +61,6 @@ ABSL_NAMESPACE_BEGIN
// `->`, `*`, and `get()`.
// (Note that `const NoDestructor<T>` works like a pointer to const `T`.)
//
// An object of type NoDestructor<T> should be defined in static storage:
// as either a global static object, or as a function scope static variable.
//
// Additionally, NoDestructor<T> provides the following benefits:
//
// * Never calls T's destructor for the object
@@ -71,24 +68,7 @@ ABSL_NAMESPACE_BEGIN
// lazily constructed.
//
// An object of type NoDestructor<T> is "trivially destructible" in the notion
// that its destructor is never run. Provided that an object of this type can be
// safely initialized and does not need to be cleaned up on program shutdown,
// NoDestructor<T> allows you to define global static variables, since Google's
// C++ style guide ban on such objects doesn't apply to objects that are
// trivially destructible.
//
// Usage as Global Static Variables
//
// NoDestructor<T> allows declaration of a global object with a non-trivial
// constructor in static storage without needing to add a destructor.
// However, such objects still need to worry about initialization order, so
// such objects should be const initialized:
//
// // Global or namespace scope.
// constinit absl::NoDestructor<MyRegistry> reg{"foo", "bar", 8008};
//
// Note that if your object already has a trivial destructor, you don't need to
// use NoDestructor<T>.
// that its destructor is never run.
//
// Usage as Function Scope Static Variables
//
@@ -114,6 +94,21 @@ ABSL_NAMESPACE_BEGIN
// return *x;
// }
//
// Usage as Global Static Variables
//
// NoDestructor<T> allows declaration of a global object of type T that has a
// non-trivial destructor since its destructor is never run. However, such
// objects still need to worry about initialization order, so such use is not
// recommended, strongly discouraged by the Google C++ Style Guide, and outright
// banned in Chromium.
// See https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables
//
// // Global or namespace scope.
// absl::NoDestructor<MyRegistry> reg{"foo", "bar", 8008};
//
// Note that if your object already has a trivial destructor, you don't need to
// use NoDestructor<T>.
//
template <typename T>
class NoDestructor {
public:
@@ -140,11 +135,11 @@ class NoDestructor {
// Pretend to be a smart pointer to T with deep constness.
// Never returns a null pointer.
T& operator*() { return *get(); }
T* operator->() { return get(); }
T* get() { return impl_.get(); }
absl::Nonnull<T*> operator->() { return get(); }
absl::Nonnull<T*> get() { return impl_.get(); }
const T& operator*() const { return *get(); }
const T* operator->() const { return get(); }
const T* get() const { return impl_.get(); }
absl::Nonnull<const T*> operator->() const { return get(); }
absl::Nonnull<const T*> get() const { return impl_.get(); }
private:
class DirectImpl {
@@ -152,8 +147,8 @@ class NoDestructor {
template <typename... Args>
explicit constexpr DirectImpl(Args&&... args)
: value_(std::forward<Args>(args)...) {}
const T* get() const { return &value_; }
T* get() { return &value_; }
absl::Nonnull<const T*> get() const { return &value_; }
absl::Nonnull<T*> get() { return &value_; }
private:
T value_;
@@ -165,14 +160,14 @@ class NoDestructor {
explicit PlacementImpl(Args&&... args) {
new (&space_) T(std::forward<Args>(args)...);
}
const T* get() const {
absl::Nonnull<const T*> get() const {
return Launder(reinterpret_cast<const T*>(&space_));
}
T* get() { return Launder(reinterpret_cast<T*>(&space_)); }
absl::Nonnull<T*> get() { return Launder(reinterpret_cast<T*>(&space_)); }
private:
template <typename P>
static P* Launder(P* p) {
static absl::Nonnull<P*> Launder(absl::Nonnull<P*> p) {
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
return std::launder(p);
#elif ABSL_HAVE_BUILTIN(__builtin_launder)

View File

@@ -158,14 +158,16 @@
#ifndef ABSL_BASE_NULLABILITY_H_
#define ABSL_BASE_NULLABILITY_H_
#include "absl/base/config.h"
#include "absl/base/internal/nullability_impl.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
// absl::Nonnull
//
// The indicated pointer is never null. It is the responsibility of the provider
// of this pointer across an API boundary to ensure that the pointer is never be
// of this pointer across an API boundary to ensure that the pointer is never
// set to null. Consumers of this pointer across an API boundary may safely
// dereference the pointer.
//
@@ -206,9 +208,9 @@ using Nullable = nullability_internal::NullableImpl<T>;
// migrated into one of the above two nullability states: `Nonnull<T>` or
// `Nullable<T>`.
//
// NOTE: Because this annotation is the global default state, pointers without
// any annotation are assumed to have "unknown" semantics. This assumption is
// designed to minimize churn and reduce clutter within the codebase.
// NOTE: Because this annotation is the global default state, unannotated
// pointers are assumed to have "unknown" semantics. This assumption is designed
// to minimize churn and reduce clutter within the codebase.
//
// Example:
//
@@ -227,6 +229,7 @@ using Nullable = nullability_internal::NullableImpl<T>;
template <typename T>
using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl<T>;
ABSL_NAMESPACE_END
} // namespace absl
// ABSL_NULLABILITY_COMPATIBLE

View File

@@ -18,12 +18,23 @@
// -----------------------------------------------------------------------------
//
// This header file defines portable macros for performance optimization.
//
// This header is included in both C++ code and legacy C code and thus must
// remain compatible with both C and C++. C compatibility will be removed if
// the legacy code is removed or converted to C++. Do not include this header in
// new code that requires C compatibility or assume C compatibility will remain
// indefinitely.
#ifndef ABSL_BASE_OPTIMIZATION_H_
#define ABSL_BASE_OPTIMIZATION_H_
#include <assert.h>
#ifdef __cplusplus
// Included for std::unreachable()
#include <utility>
#endif // __cplusplus
#include "absl/base/config.h"
#include "absl/base/options.h"

View File

@@ -135,9 +135,7 @@ void PrefetchToLocalCacheNta(const void* addr);
//
void PrefetchToLocalCacheForWrite(const void* addr);
#if (ABSL_HAVE_BUILTIN(__builtin_prefetch) && \
!(defined(_MSC_VER) && defined(_M_ARM64))) || \
defined(__GNUC__)
#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__)
#define ABSL_HAVE_PREFETCH 1

View File

@@ -48,7 +48,7 @@ class FunctorClass {
explicit FunctorClass(Callback callback) : callback_(std::move(callback)) {}
FunctorClass(FunctorClass&& other)
: callback_(absl::exchange(other.callback_, Callback())) {}
: callback_(std::exchange(other.callback_, Callback())) {}
FunctorClass(const FunctorClass&) = delete;

View File

@@ -126,6 +126,7 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":compressed_tuple",
"//absl/base:base_internal",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/memory",
@@ -247,11 +248,11 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
":hash_container_defaults",
":raw_hash_map",
"//absl/algorithm:container",
"//absl/base:core_headers",
"//absl/memory",
"//absl/meta:type_traits",
],
)
@@ -264,11 +265,13 @@ cc_test(
deps = [
":flat_hash_map",
":hash_generator_testing",
":hash_policy_testing",
":test_allocator",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
"//absl/base:config",
"//absl/log:check",
"//absl/meta:type_traits",
"//absl/types:any",
@@ -284,11 +287,12 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
":hash_container_defaults",
":raw_hash_set",
"//absl/algorithm:container",
"//absl/base:core_headers",
"//absl/memory",
"//absl/meta:type_traits",
],
)
@@ -323,12 +327,13 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
":hash_container_defaults",
":node_slot_policy",
":raw_hash_map",
"//absl/algorithm:container",
"//absl/base:core_headers",
"//absl/memory",
"//absl/meta:type_traits",
],
)
@@ -339,13 +344,14 @@ cc_test(
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["no_test_loonix"],
deps = [
":hash_generator_testing",
":hash_policy_testing",
":node_hash_map",
":tracked",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
"//absl/base:config",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
@@ -358,12 +364,13 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":container_memory",
":hash_function_defaults",
":hash_container_defaults",
":node_slot_policy",
":raw_hash_set",
"//absl/algorithm:container",
"//absl/base:core_headers",
"//absl/memory",
"//absl/meta:type_traits",
],
)
@@ -374,11 +381,15 @@ cc_test(
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["no_test_loonix"],
deps = [
":hash_generator_testing",
":hash_policy_testing",
":node_hash_set",
":unordered_set_constructor_test",
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
"//absl/base:config",
"//absl/memory",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
@@ -432,6 +443,17 @@ cc_library(
],
)
cc_library(
name = "hash_container_defaults",
hdrs = ["hash_container_defaults.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_function_defaults",
"//absl/base:config",
],
)
cc_test(
name = "hash_function_defaults_test",
srcs = ["internal/hash_function_defaults_test.cc"],
@@ -695,17 +717,22 @@ cc_test(
":hash_policy_testing",
":hashtable_debug",
":hashtablez_sampler",
":node_hash_set",
":raw_hash_set",
":test_allocator",
":test_instance_tracker",
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:prefetch",
"//absl/functional:function_ref",
"//absl/hash",
"//absl/log",
"//absl/log:check",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/strings",
"//absl/types:optional",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
@@ -724,6 +751,7 @@ cc_binary(
":hash_function_defaults",
":raw_hash_set",
"//absl/base:raw_logging_internal",
"//absl/random",
"//absl/strings:str_format",
"@com_github_google_benchmark//:benchmark_main",
],
@@ -1002,6 +1030,7 @@ cc_library(
":compressed_tuple",
":container_memory",
":layout",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
"//absl/base:throw_delegate",
@@ -1010,7 +1039,6 @@ cc_library(
"//absl/strings",
"//absl/strings:cord",
"//absl/types:compare",
"//absl/utility",
],
)

View File

@@ -39,6 +39,7 @@ absl_source_set("inlined_vector_internal") {
public = [ "internal/inlined_vector.h" ]
deps = [
":compressed_tuple",
"//third_party/abseil-cpp/absl/base:base_internal",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/memory",
@@ -97,11 +98,11 @@ absl_source_set("flat_hash_map") {
public = [ "flat_hash_map.h" ]
deps = [
":container_memory",
":hash_function_defaults",
":hash_container_defaults",
":raw_hash_map",
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/meta:type_traits",
]
}
@@ -110,11 +111,13 @@ absl_test("flat_hash_map_test") {
deps = [
":flat_hash_map",
":hash_generator_testing",
":hash_policy_testing",
":test_allocator",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/log:check",
"//third_party/abseil-cpp/absl/meta:type_traits",
"//third_party/abseil-cpp/absl/types:any",
@@ -125,11 +128,12 @@ absl_source_set("flat_hash_set") {
public = [ "flat_hash_set.h" ]
deps = [
":container_memory",
":hash_function_defaults",
":hash_container_defaults",
":raw_hash_set",
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/meta:type_traits",
]
}
@@ -155,12 +159,27 @@ absl_source_set("node_hash_map") {
public = [ "node_hash_map.h" ]
deps = [
":container_memory",
":hash_function_defaults",
":hash_container_defaults",
":node_slot_policy",
":raw_hash_map",
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/meta:type_traits",
]
}
absl_test("node_hash_map_test") {
sources = [ "node_hash_map_test.cc" ]
deps = [
":hash_policy_testing",
":node_hash_map",
":tracked",
":unordered_map_constructor_test",
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
"//third_party/abseil-cpp/absl/base:config",
]
}
@@ -168,12 +187,28 @@ absl_source_set("node_hash_set") {
public = [ "node_hash_set.h" ]
deps = [
":container_memory",
":hash_function_defaults",
":hash_container_defaults",
":node_slot_policy",
":raw_hash_set",
"//third_party/abseil-cpp/absl/algorithm:container",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/meta:type_traits",
]
}
absl_test("node_hash_set_test") {
sources = [ "node_hash_set_test.cc" ]
deps = [
":hash_generator_testing",
":hash_policy_testing",
":node_hash_set",
":unordered_set_constructor_test",
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/memory",
]
}
@@ -213,6 +248,15 @@ absl_source_set("hash_function_defaults") {
]
}
absl_source_set("hash_container_defaults") {
public = [ "hash_container_defaults.h" ]
visibility = [ "//third_party/abseil-cpp/absl/container:*" ]
deps = [
":hash_function_defaults",
"//third_party/abseil-cpp/absl/base:config",
]
}
absl_test("hash_function_defaults_test") {
sources = [ "internal/hash_function_defaults_test.cc" ]
deps = [
@@ -370,17 +414,22 @@ absl_test("raw_hash_set_test") {
":hash_policy_testing",
":hashtable_debug",
":hashtablez_sampler",
":node_hash_set",
":raw_hash_set",
":test_allocator",
":test_instance_tracker",
"//third_party/abseil-cpp/absl/base",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/base:prefetch",
"//third_party/abseil-cpp/absl/functional:function_ref",
"//third_party/abseil-cpp/absl/hash",
"//third_party/abseil-cpp/absl/log",
"//third_party/abseil-cpp/absl/log:check",
"//third_party/abseil-cpp/absl/memory",
"//third_party/abseil-cpp/absl/meta:type_traits",
"//third_party/abseil-cpp/absl/strings",
"//third_party/abseil-cpp/absl/types:optional",
]
}
@@ -528,6 +577,7 @@ absl_source_set("btree") {
":compressed_tuple",
":container_memory",
":layout",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/base:raw_logging_internal",
"//third_party/abseil-cpp/absl/base:throw_delegate",
@@ -536,7 +586,6 @@ absl_source_set("btree") {
"//third_party/abseil-cpp/absl/strings:cord",
"//third_party/abseil-cpp/absl/strings:string_view",
"//third_party/abseil-cpp/absl/types:compare",
"//third_party/abseil-cpp/absl/utility",
]
}

View File

@@ -27,10 +27,11 @@ absl_cc_library(
LINKOPTS
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::container_common
absl::common_policy_traits
absl::compare
absl::compressed_tuple
absl::config
absl::container_common
absl::container_memory
absl::cord
absl::core_headers
@@ -40,7 +41,6 @@ absl_cc_library(
absl::strings
absl::throw_delegate
absl::type_traits
absl::utility
)
# Internal-only target, do not depend on directly.
@@ -176,6 +176,7 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::base_internal
absl::compressed_tuple
absl::config
absl::core_headers
@@ -288,10 +289,10 @@ absl_cc_library(
DEPS
absl::container_memory
absl::core_headers
absl::hash_function_defaults
absl::hash_container_defaults
absl::raw_hash_map
absl::algorithm_container
absl::memory
absl::type_traits
PUBLIC
)
@@ -305,8 +306,10 @@ absl_cc_test(
DEPS
absl::any
absl::check
absl::config
absl::flat_hash_map
absl::hash_generator_testing
absl::hash_policy_testing
absl::test_allocator
absl::type_traits
absl::unordered_map_constructor_test
@@ -325,11 +328,12 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::container_memory
absl::hash_function_defaults
absl::hash_container_defaults
absl::raw_hash_set
absl::algorithm_container
absl::core_headers
absl::memory
absl::type_traits
PUBLIC
)
@@ -367,11 +371,12 @@ absl_cc_library(
DEPS
absl::container_memory
absl::core_headers
absl::hash_function_defaults
absl::hash_container_defaults
absl::node_slot_policy
absl::raw_hash_map
absl::algorithm_container
absl::memory
absl::type_traits
PUBLIC
)
@@ -383,7 +388,8 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::hash_generator_testing
absl::config
absl::hash_policy_testing
absl::node_hash_map
absl::tracked
absl::unordered_map_constructor_test
@@ -403,11 +409,12 @@ absl_cc_library(
DEPS
absl::container_memory
absl::core_headers
absl::hash_function_defaults
absl::hash_container_defaults
absl::node_slot_policy
absl::raw_hash_set
absl::algorithm_container
absl::memory
absl::type_traits
PUBLIC
)
@@ -421,7 +428,10 @@ absl_cc_test(
"-DUNORDERED_SET_CXX17"
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
absl::memory
absl::node_hash_set
absl::type_traits
absl::unordered_set_constructor_test
absl::unordered_set_lookup_test
absl::unordered_set_members_test
@@ -429,6 +439,19 @@ absl_cc_test(
GTest::gmock_main
)
absl_cc_library(
NAME
hash_container_defaults
HDRS
"hash_container_defaults.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::hash_function_defaults
PUBLIC
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
@@ -753,11 +776,13 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::base
absl::check
absl::config
absl::container_memory
absl::core_headers
absl::flat_hash_map
absl::flat_hash_set
absl::function_ref
absl::hash
absl::hash_function_defaults
absl::hash_policy_testing
@@ -765,10 +790,13 @@ absl_cc_test(
absl::hashtablez_sampler
absl::log
absl::memory
absl::node_hash_set
absl::optional
absl::prefetch
absl::raw_hash_set
absl::strings
absl::test_allocator
absl::test_instance_tracker
absl::type_traits
GTest::gmock_main
)

View File

@@ -49,6 +49,8 @@
//
// Another API difference is that btree iterators can be subtracted, and this
// is faster than using std::distance.
//
// B-tree maps are not exception-safe.
#ifndef ABSL_CONTAINER_BTREE_MAP_H_
#define ABSL_CONTAINER_BTREE_MAP_H_
@@ -85,7 +87,7 @@ struct map_params;
//
template <typename Key, typename Value, typename Compare = std::less<Key>,
typename Alloc = std::allocator<std::pair<const Key, Value>>>
class btree_map
class ABSL_INTERNAL_ATTRIBUTE_OWNER btree_map
: public container_internal::btree_map_container<
container_internal::btree<container_internal::map_params<
Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
@@ -523,7 +525,7 @@ typename btree_map<K, V, C, A>::size_type erase_if(
//
template <typename Key, typename Value, typename Compare = std::less<Key>,
typename Alloc = std::allocator<std::pair<const Key, Value>>>
class btree_multimap
class ABSL_INTERNAL_ATTRIBUTE_OWNER btree_multimap
: public container_internal::btree_multimap_container<
container_internal::btree<container_internal::map_params<
Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,

View File

@@ -48,10 +48,13 @@
//
// Another API difference is that btree iterators can be subtracted, and this
// is faster than using std::distance.
//
// B-tree sets are not exception-safe.
#ifndef ABSL_CONTAINER_BTREE_SET_H_
#define ABSL_CONTAINER_BTREE_SET_H_
#include "absl/base/attributes.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/btree_container.h" // IWYU pragma: export
@@ -86,7 +89,7 @@ struct set_params;
//
template <typename Key, typename Compare = std::less<Key>,
typename Alloc = std::allocator<Key>>
class btree_set
class ABSL_INTERNAL_ATTRIBUTE_OWNER btree_set
: public container_internal::btree_set_container<
container_internal::btree<container_internal::set_params<
Key, Compare, Alloc, /*TargetNodeSize=*/256,
@@ -442,7 +445,7 @@ typename btree_set<K, C, A>::size_type erase_if(btree_set<K, C, A> &set,
//
template <typename Key, typename Compare = std::less<Key>,
typename Alloc = std::allocator<Key>>
class btree_multiset
class ABSL_INTERNAL_ATTRIBUTE_OWNER btree_multiset
: public container_internal::btree_multiset_container<
container_internal::btree<container_internal::set_params<
Key, Compare, Alloc, /*TargetNodeSize=*/256,

View File

@@ -26,21 +26,24 @@
//
// In most cases, your default choice for a hash map should be a map of type
// `flat_hash_map`.
//
// `flat_hash_map` is not exception-safe.
#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_
#define ABSL_CONTAINER_FLAT_HASH_MAP_H_
#include <cstddef>
#include <new>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/container/hash_container_defaults.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -119,13 +122,13 @@ struct FlatHashMapPolicy;
// if (result != ducks.end()) {
// std::cout << "Result: " << result->second << std::endl;
// }
template <class K, class V,
class Hash = absl::container_internal::hash_default_hash<K>,
class Eq = absl::container_internal::hash_default_eq<K>,
template <class K, class V, class Hash = DefaultHashContainerHash<K>,
class Eq = DefaultHashContainerEq<K>,
class Allocator = std::allocator<std::pair<const K, V>>>
class flat_hash_map : public absl::container_internal::raw_hash_map<
absl::container_internal::FlatHashMapPolicy<K, V>,
Hash, Eq, Allocator> {
class ABSL_INTERNAL_ATTRIBUTE_OWNER flat_hash_map
: public absl::container_internal::raw_hash_map<
absl::container_internal::FlatHashMapPolicy<K, V>, Hash, Eq,
Allocator> {
using Base = typename flat_hash_map::raw_hash_map;
public:
@@ -423,8 +426,7 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// flat_hash_map::swap(flat_hash_map& other)
//
// Exchanges the contents of this `flat_hash_map` with those of the `other`
// flat hash map, avoiding invocation of any move, copy, or swap operations on
// individual elements.
// flat hash map.
//
// All iterators and references on the `flat_hash_map` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
@@ -571,6 +573,53 @@ typename flat_hash_map<K, V, H, E, A>::size_type erase_if(
return container_internal::EraseIf(pred, &c);
}
// swap(flat_hash_map<>, flat_hash_map<>)
//
// Swaps the contents of two `flat_hash_map` containers.
//
// NOTE: we need to define this function template in order for
// `flat_hash_set::swap` to be called instead of `std::swap`. Even though we
// have `swap(raw_hash_set&, raw_hash_set&)` defined, that function requires a
// derived-to-base conversion, whereas `std::swap` is a function template so
// `std::swap` will be preferred by compiler.
template <typename K, typename V, typename H, typename E, typename A>
void swap(flat_hash_map<K, V, H, E, A>& x,
flat_hash_map<K, V, H, E, A>& y) noexcept(noexcept(x.swap(y))) {
x.swap(y);
}
namespace container_internal {
// c_for_each_fast(flat_hash_map<>, Function)
//
// Container-based version of the <algorithm> `std::for_each()` function to
// apply a function to a container's elements.
// There is no guarantees on the order of the function calls.
// Erasure and/or insertion of elements in the function is not allowed.
template <typename K, typename V, typename H, typename E, typename A,
typename Function>
decay_t<Function> c_for_each_fast(const flat_hash_map<K, V, H, E, A>& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename K, typename V, typename H, typename E, typename A,
typename Function>
decay_t<Function> c_for_each_fast(flat_hash_map<K, V, H, E, A>& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename K, typename V, typename H, typename E, typename A,
typename Function>
decay_t<Function> c_for_each_fast(flat_hash_map<K, V, H, E, A>&& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
} // namespace container_internal
namespace container_internal {
template <class K, class V>

View File

@@ -16,12 +16,16 @@
#include <cstddef>
#include <memory>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/container/internal/test_allocator.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
@@ -41,6 +45,7 @@ using ::testing::_;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
// Check that absl::flat_hash_map works in a global constructor.
struct BeforeMain {
@@ -303,6 +308,58 @@ TEST(FlatHashMap, EraseIf) {
}
}
TEST(FlatHashMap, CForEach) {
flat_hash_map<int, int> m;
std::vector<std::pair<int, int>> expected;
for (int i = 0; i < 100; ++i) {
{
SCOPED_TRACE("mutable object iteration");
std::vector<std::pair<int, int>> v;
absl::container_internal::c_for_each_fast(
m, [&v](std::pair<const int, int>& p) { v.push_back(p); });
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("const object iteration");
std::vector<std::pair<int, int>> v;
const flat_hash_map<int, int>& cm = m;
absl::container_internal::c_for_each_fast(
cm, [&v](const std::pair<const int, int>& p) { v.push_back(p); });
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("const object iteration");
std::vector<std::pair<int, int>> v;
absl::container_internal::c_for_each_fast(
flat_hash_map<int, int>(m),
[&v](std::pair<const int, int>& p) { v.push_back(p); });
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
}
m[i] = i;
expected.emplace_back(i, i);
}
}
TEST(FlatHashMap, CForEachMutate) {
flat_hash_map<int, int> s;
std::vector<std::pair<int, int>> expected;
for (int i = 0; i < 100; ++i) {
std::vector<std::pair<int, int>> v;
absl::container_internal::c_for_each_fast(
s, [&v](std::pair<const int, int>& p) {
v.push_back(p);
p.second++;
});
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
for (auto& p : expected) {
p.second++;
}
EXPECT_THAT(s, UnorderedElementsAreArray(expected));
s[i] = i;
expected.emplace_back(i, i);
}
}
// This test requires std::launder for mutable key access in node handles.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
TEST(FlatHashMap, NodeHandleMutableKeyAccess) {
@@ -363,6 +420,38 @@ TEST(FlatHashMap, FlatHashMapPolicyDestroyReturnsTrue) {
std::allocator<char>>(nullptr, nullptr))()));
}
struct InconsistentHashEqType {
InconsistentHashEqType(int v1, int v2) : v1(v1), v2(v2) {}
template <typename H>
friend H AbslHashValue(H h, InconsistentHashEqType t) {
return H::combine(std::move(h), t.v1);
}
bool operator==(InconsistentHashEqType t) const { return v2 == t.v2; }
int v1, v2;
};
TEST(Iterator, InconsistentHashEqFunctorsValidation) {
if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
absl::flat_hash_map<InconsistentHashEqType, int> m;
for (int i = 0; i < 10; ++i) m[{i, i}] = 1;
// We need to insert multiple times to guarantee that we get the assertion
// because it's possible for the hash to collide with the inserted element
// that has v2==0. In those cases, the new element won't be inserted.
auto insert_conflicting_elems = [&] {
for (int i = 100; i < 20000; ++i) {
EXPECT_EQ((m[{i, 0}]), 1);
}
};
const char* crash_message = "hash/eq functors are inconsistent.";
#if defined(__arm__) || defined(__aarch64__)
// On ARM, the crash message is garbled so don't expect a specific message.
crash_message = "";
#endif
EXPECT_DEATH_IF_SUPPORTED(insert_conflicting_elems(), crash_message);
}
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END

View File

@@ -26,6 +26,9 @@
//
// In most cases, your default choice for a hash set should be a set of type
// `flat_hash_set`.
//
// `flat_hash_set` is not exception-safe.
#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_
#define ABSL_CONTAINER_FLAT_HASH_SET_H_
@@ -35,11 +38,13 @@
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/container/hash_container_defaults.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -114,10 +119,10 @@ struct FlatHashSetPolicy;
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
class Eq = absl::container_internal::hash_default_eq<T>,
template <class T, class Hash = DefaultHashContainerHash<T>,
class Eq = DefaultHashContainerEq<T>,
class Allocator = std::allocator<T>>
class flat_hash_set
class ABSL_INTERNAL_ATTRIBUTE_OWNER flat_hash_set
: public absl::container_internal::raw_hash_set<
absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> {
using Base = typename flat_hash_set::raw_hash_set;
@@ -355,8 +360,7 @@ class flat_hash_set
// flat_hash_set::swap(flat_hash_set& other)
//
// Exchanges the contents of this `flat_hash_set` with those of the `other`
// flat hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
// flat hash set.
//
// All iterators and references on the `flat_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
@@ -473,6 +477,48 @@ typename flat_hash_set<T, H, E, A>::size_type erase_if(
return container_internal::EraseIf(pred, &c);
}
// swap(flat_hash_set<>, flat_hash_set<>)
//
// Swaps the contents of two `flat_hash_set` containers.
//
// NOTE: we need to define this function template in order for
// `flat_hash_set::swap` to be called instead of `std::swap`. Even though we
// have `swap(raw_hash_set&, raw_hash_set&)` defined, that function requires a
// derived-to-base conversion, whereas `std::swap` is a function template so
// `std::swap` will be preferred by compiler.
template <typename T, typename H, typename E, typename A>
void swap(flat_hash_set<T, H, E, A>& x,
flat_hash_set<T, H, E, A>& y) noexcept(noexcept(x.swap(y))) {
return x.swap(y);
}
namespace container_internal {
// c_for_each_fast(flat_hash_set<>, Function)
//
// Container-based version of the <algorithm> `std::for_each()` function to
// apply a function to a container's elements.
// There is no guarantees on the order of the function calls.
// Erasure and/or insertion of elements in the function is not allowed.
template <typename T, typename H, typename E, typename A, typename Function>
decay_t<Function> c_for_each_fast(const flat_hash_set<T, H, E, A>& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename T, typename H, typename E, typename A, typename Function>
decay_t<Function> c_for_each_fast(flat_hash_set<T, H, E, A>& c, Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename T, typename H, typename E, typename A, typename Function>
decay_t<Function> c_for_each_fast(flat_hash_set<T, H, E, A>&& c, Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
} // namespace container_internal
namespace container_internal {
template <class T>

View File

@@ -181,15 +181,46 @@ TEST(FlatHashSet, EraseIf) {
}
}
class PoisonInline {
TEST(FlatHashSet, CForEach) {
using ValueType = std::pair<int, int>;
flat_hash_set<ValueType> s;
std::vector<ValueType> expected;
for (int i = 0; i < 100; ++i) {
{
SCOPED_TRACE("mutable object iteration");
std::vector<ValueType> v;
absl::container_internal::c_for_each_fast(
s, [&v](const ValueType& p) { v.push_back(p); });
ASSERT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("const object iteration");
std::vector<ValueType> v;
const flat_hash_set<ValueType>& cs = s;
absl::container_internal::c_for_each_fast(
cs, [&v](const ValueType& p) { v.push_back(p); });
ASSERT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("temporary object iteration");
std::vector<ValueType> v;
absl::container_internal::c_for_each_fast(
flat_hash_set<ValueType>(s),
[&v](const ValueType& p) { v.push_back(p); });
ASSERT_THAT(v, UnorderedElementsAreArray(expected));
}
s.emplace(i, i);
expected.emplace_back(i, i);
}
}
class PoisonSoo {
int64_t data_;
public:
explicit PoisonInline(int64_t d) : data_(d) {
SanitizerPoisonObject(&data_);
}
PoisonInline(const PoisonInline& that) : PoisonInline(*that) {}
~PoisonInline() { SanitizerUnpoisonObject(&data_); }
explicit PoisonSoo(int64_t d) : data_(d) { SanitizerPoisonObject(&data_); }
PoisonSoo(const PoisonSoo& that) : PoisonSoo(*that) {}
~PoisonSoo() { SanitizerUnpoisonObject(&data_); }
int64_t operator*() const {
SanitizerUnpoisonObject(&data_);
@@ -198,45 +229,56 @@ class PoisonInline {
return ret;
}
template <typename H>
friend H AbslHashValue(H h, const PoisonInline& pi) {
friend H AbslHashValue(H h, const PoisonSoo& pi) {
return H::combine(std::move(h), *pi);
}
bool operator==(const PoisonInline& rhs) const { return **this == *rhs; }
bool operator==(const PoisonSoo& rhs) const { return **this == *rhs; }
};
// Tests that we don't touch the poison_ member of PoisonInline.
TEST(FlatHashSet, PoisonInline) {
PoisonInline a(0), b(1);
{ // basic usage
flat_hash_set<PoisonInline> set;
set.insert(a);
EXPECT_THAT(set, UnorderedElementsAre(a));
set.insert(b);
EXPECT_THAT(set, UnorderedElementsAre(a, b));
set.erase(a);
EXPECT_THAT(set, UnorderedElementsAre(b));
set.rehash(0); // shrink to inline
EXPECT_THAT(set, UnorderedElementsAre(b));
}
{ // test move constructor from inline to inline
flat_hash_set<PoisonInline> set;
set.insert(a);
flat_hash_set<PoisonInline> set2(std::move(set));
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
{ // test move assignment from inline to inline
flat_hash_set<PoisonInline> set, set2;
set.insert(a);
set2 = std::move(set);
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
{ // test alloc move constructor from inline to inline
flat_hash_set<PoisonInline> set;
set.insert(a);
flat_hash_set<PoisonInline> set2(std::move(set),
std::allocator<PoisonInline>());
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooBasic) {
PoisonSoo a(0), b(1);
flat_hash_set<PoisonSoo> set;
set.insert(a);
EXPECT_THAT(set, UnorderedElementsAre(a));
set.insert(b);
EXPECT_THAT(set, UnorderedElementsAre(a, b));
set.erase(a);
EXPECT_THAT(set, UnorderedElementsAre(b));
set.rehash(0); // Shrink to SOO.
EXPECT_THAT(set, UnorderedElementsAre(b));
}
TEST(FlatHashSet, PoisonSooMoveConstructSooToSoo) {
PoisonSoo a(0);
flat_hash_set<PoisonSoo> set;
set.insert(a);
flat_hash_set<PoisonSoo> set2(std::move(set));
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooAllocMoveConstructSooToSoo) {
PoisonSoo a(0);
flat_hash_set<PoisonSoo> set;
set.insert(a);
flat_hash_set<PoisonSoo> set2(std::move(set), std::allocator<PoisonSoo>());
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooMoveAssignFullSooToEmptySoo) {
PoisonSoo a(0);
flat_hash_set<PoisonSoo> set, set2;
set.insert(a);
set2 = std::move(set);
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, PoisonSooMoveAssignFullSooToFullSoo) {
PoisonSoo a(0), b(1);
flat_hash_set<PoisonSoo> set, set2;
set.insert(a);
set2.insert(b);
set2 = std::move(set);
EXPECT_THAT(set2, UnorderedElementsAre(a));
}
TEST(FlatHashSet, FlatHashSetPolicyDestroyReturnsTrue) {

View File

@@ -0,0 +1,45 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_CONTAINER_HASH_CONTAINER_DEFAULTS_H_
#define ABSL_CONTAINER_HASH_CONTAINER_DEFAULTS_H_
#include "absl/base/config.h"
#include "absl/container/internal/hash_function_defaults.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
// DefaultHashContainerHash is a convenience alias for the functor that is used
// by default by Abseil hash-based (unordered) containers for hashing when
// `Hash` type argument is not explicitly specified.
//
// This type alias can be used by generic code that wants to provide more
// flexibility for defining underlying containers.
template <typename T>
using DefaultHashContainerHash = absl::container_internal::hash_default_hash<T>;
// DefaultHashContainerEq is a convenience alias for the functor that is used by
// default by Abseil hash-based (unordered) containers for equality check when
// `Eq` type argument is not explicitly specified.
//
// This type alias can be used by generic code that wants to provide more
// flexibility for defining underlying containers.
template <typename T>
using DefaultHashContainerEq = absl::container_internal::hash_default_eq<T>;
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_CONTAINER_HASH_CONTAINER_DEFAULTS_H_

View File

@@ -775,7 +775,20 @@ class InlinedVector {
ABSL_HARDENING_ASSERT(pos >= begin());
ABSL_HARDENING_ASSERT(pos < end());
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2
// It appears that GCC thinks that since `pos` is a const pointer and may
// point to uninitialized memory at this point, a warning should be
// issued. But `pos` is actually only used to compute an array index to
// write to.
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#pragma GCC diagnostic ignored "-Wuninitialized"
#endif
return storage_.Erase(pos, pos + 1);
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
}
// Overload of `InlinedVector::erase(...)` that erases every element in the

View File

@@ -333,6 +333,57 @@ TEST(UniquePtr, Swap) {
}
}
// Erasing from a container of unique pointers should work fine, with no
// leaks, despite the fact that unique pointers are trivially relocatable but
// not trivially destructible.
// TODO(absl-team): Using unique_ptr here is technically correct, but
// a trivially relocatable struct would be less semantically confusing.
TEST(UniquePtr, EraseSingle) {
for (size_t size = 4; size < 16; ++size) {
absl::InlinedVector<std::unique_ptr<size_t>, 8> a;
for (size_t i = 0; i < size; ++i) {
a.push_back(std::make_unique<size_t>(i));
}
a.erase(a.begin());
ASSERT_THAT(a, SizeIs(size - 1));
for (size_t i = 0; i < size - 1; ++i) {
ASSERT_THAT(a[i], Pointee(i + 1));
}
a.erase(a.begin() + 2);
ASSERT_THAT(a, SizeIs(size - 2));
ASSERT_THAT(a[0], Pointee(1));
ASSERT_THAT(a[1], Pointee(2));
for (size_t i = 2; i < size - 2; ++i) {
ASSERT_THAT(a[i], Pointee(i + 2));
}
}
}
// Erasing from a container of unique pointers should work fine, with no
// leaks, despite the fact that unique pointers are trivially relocatable but
// not trivially destructible.
// TODO(absl-team): Using unique_ptr here is technically correct, but
// a trivially relocatable struct would be less semantically confusing.
TEST(UniquePtr, EraseMulti) {
for (size_t size = 5; size < 16; ++size) {
absl::InlinedVector<std::unique_ptr<size_t>, 8> a;
for (size_t i = 0; i < size; ++i) {
a.push_back(std::make_unique<size_t>(i));
}
a.erase(a.begin(), a.begin() + 2);
ASSERT_THAT(a, SizeIs(size - 2));
for (size_t i = 0; i < size - 2; ++i) {
ASSERT_THAT(a[i], Pointee(i + 2));
}
a.erase(a.begin() + 1, a.begin() + 3);
ASSERT_THAT(a, SizeIs(size - 4));
ASSERT_THAT(a[0], Pointee(2));
for (size_t i = 1; i < size - 4; ++i) {
ASSERT_THAT(a[i], Pointee(i + 4));
}
}
}
// At the end of this test loop, the elements between [erase_begin, erase_end)
// should have reference counts == 0, and all others elements should have
// reference counts == 1.

View File

@@ -53,11 +53,11 @@
#include <functional>
#include <iterator>
#include <limits>
#include <new>
#include <string>
#include <type_traits>
#include <utility>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/container/internal/common.h"
@@ -70,7 +70,6 @@
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/compare.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -78,9 +77,10 @@ namespace container_internal {
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set
#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
defined(ABSL_HAVE_MEMORY_SANITIZER)
#elif (defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
defined(ABSL_HAVE_MEMORY_SANITIZER)) && \
!defined(NDEBUG_SANITIZER) // If defined, performance is important.
// When compiled in sanitizer mode, we add generation integers to the nodes and
// iterators. When iterators are used, we validate that the container has not
// been mutated since the iterator was constructed.
@@ -475,7 +475,7 @@ struct SearchResult {
// useful information.
template <typename V>
struct SearchResult<V, false> {
SearchResult() {}
SearchResult() = default;
explicit SearchResult(V v) : value(v) {}
SearchResult(V v, MatchKind /*match*/) : value(v) {}
@@ -580,14 +580,12 @@ class btree_node {
using layout_type =
absl::container_internal::Layout<btree_node *, uint32_t, field_type,
slot_type, btree_node *>;
using leaf_layout_type = typename layout_type::template WithStaticSizes<
/*parent*/ 1,
/*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
/*position, start, finish, max_count*/ 4>;
constexpr static size_type SizeWithNSlots(size_type n) {
return layout_type(
/*parent*/ 1,
/*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
/*position, start, finish, max_count*/ 4,
/*slots*/ n,
/*children*/ 0)
.AllocSize();
return leaf_layout_type(/*slots*/ n, /*children*/ 0).AllocSize();
}
// A lower bound for the overhead of fields other than slots in a leaf node.
constexpr static size_type MinimumOverhead() {
@@ -619,27 +617,22 @@ class btree_node {
constexpr static size_type kNodeSlots =
kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots;
using internal_layout_type = typename layout_type::template WithStaticSizes<
/*parent*/ 1,
/*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
/*position, start, finish, max_count*/ 4, /*slots*/ kNodeSlots,
/*children*/ kNodeSlots + 1>;
// The node is internal (i.e. is not a leaf node) if and only if `max_count`
// has this value.
constexpr static field_type kInternalNodeMaxCount = 0;
constexpr static layout_type Layout(const size_type slot_count,
const size_type child_count) {
return layout_type(
/*parent*/ 1,
/*generation*/ BtreeGenerationsEnabled() ? 1 : 0,
/*position, start, finish, max_count*/ 4,
/*slots*/ slot_count,
/*children*/ child_count);
}
// Leaves can have less than kNodeSlots values.
constexpr static layout_type LeafLayout(
constexpr static leaf_layout_type LeafLayout(
const size_type slot_count = kNodeSlots) {
return Layout(slot_count, 0);
}
constexpr static layout_type InternalLayout() {
return Layout(kNodeSlots, kNodeSlots + 1);
return leaf_layout_type(slot_count, 0);
}
constexpr static auto InternalLayout() { return internal_layout_type(); }
constexpr static size_type LeafSize(const size_type slot_count = kNodeSlots) {
return LeafLayout(slot_count).AllocSize();
}
@@ -1407,9 +1400,9 @@ class btree {
copy_or_move_values_in_order(other);
}
btree(btree &&other) noexcept
: root_(absl::exchange(other.root_, EmptyNode())),
: root_(std::exchange(other.root_, EmptyNode())),
rightmost_(std::move(other.rightmost_)),
size_(absl::exchange(other.size_, 0u)) {
size_(std::exchange(other.size_, 0u)) {
other.mutable_rightmost() = EmptyNode();
}
btree(btree &&other, const allocator_type &alloc)

View File

@@ -87,11 +87,11 @@ struct Storage {
constexpr Storage() = default;
template <typename V>
explicit constexpr Storage(absl::in_place_t, V&& v)
: value(absl::forward<V>(v)) {}
: value(std::forward<V>(v)) {}
constexpr const T& get() const& { return value; }
T& get() & { return value; }
constexpr const T&& get() const&& { return absl::move(*this).value; }
T&& get() && { return std::move(*this).value; }
constexpr T& get() & { return value; }
constexpr const T&& get() const&& { return std::move(*this).value; }
constexpr T&& get() && { return std::move(*this).value; }
};
template <typename T, size_t I>
@@ -99,13 +99,12 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
constexpr Storage() = default;
template <typename V>
explicit constexpr Storage(absl::in_place_t, V&& v)
: T(absl::forward<V>(v)) {}
explicit constexpr Storage(absl::in_place_t, V&& v) : T(std::forward<V>(v)) {}
constexpr const T& get() const& { return *this; }
T& get() & { return *this; }
constexpr const T&& get() const&& { return absl::move(*this); }
T&& get() && { return std::move(*this); }
constexpr T& get() & { return *this; }
constexpr const T&& get() const&& { return std::move(*this); }
constexpr T&& get() && { return std::move(*this); }
};
template <typename D, typename I, bool ShouldAnyUseBase>
@@ -123,7 +122,7 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
: Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
: Storage<Ts, I>(absl::in_place, std::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};
@@ -135,7 +134,7 @@ struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
constexpr CompressedTupleImpl() = default;
template <typename... Vs>
explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
: Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
: Storage<Ts, I, false>(absl::in_place, std::forward<Vs>(args))... {}
friend CompressedTuple<Ts...>;
};
@@ -234,11 +233,11 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
bool> = true>
explicit constexpr CompressedTuple(First&& first, Vs&&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place,
absl::forward<First>(first),
absl::forward<Vs>(base)...) {}
std::forward<First>(first),
std::forward<Vs>(base)...) {}
template <int I>
ElemT<I>& get() & {
constexpr ElemT<I>& get() & {
return StorageT<I>::get();
}
@@ -248,13 +247,13 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
}
template <int I>
ElemT<I>&& get() && {
constexpr ElemT<I>&& get() && {
return std::move(*this).StorageT<I>::get();
}
template <int I>
constexpr const ElemT<I>&& get() const&& {
return absl::move(*this).StorageT<I>::get();
return std::move(*this).StorageT<I>::get();
}
};

View File

@@ -15,7 +15,11 @@
#include "absl/container/internal/compressed_tuple.h"
#include <memory>
#include <set>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -27,14 +31,22 @@
// These are declared at global scope purely so that error messages
// are smaller and easier to understand.
enum class CallType { kConstRef, kConstMove };
enum class CallType { kMutableRef, kConstRef, kMutableMove, kConstMove };
template <int>
struct Empty {
constexpr CallType value() & { return CallType::kMutableRef; }
constexpr CallType value() const& { return CallType::kConstRef; }
constexpr CallType value() && { return CallType::kMutableMove; }
constexpr CallType value() const&& { return CallType::kConstMove; }
};
// Unconditionally return an lvalue reference to `t`.
template <typename T>
constexpr T& AsLValue(T&& t) {
return t;
}
template <typename T>
struct NotEmpty {
T value;
@@ -54,6 +66,7 @@ namespace {
using absl::test_internal::CopyableMovableInstance;
using absl::test_internal::InstanceTracker;
using ::testing::Each;
TEST(CompressedTupleTest, Sizeof) {
EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
@@ -70,6 +83,30 @@ TEST(CompressedTupleTest, Sizeof) {
sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>));
}
TEST(CompressedTupleTest, PointerToEmpty) {
auto to_void_ptrs = [](const auto&... objs) {
return std::vector<const void*>{static_cast<const void*>(&objs)...};
};
{
using Tuple = CompressedTuple<int, Empty<0>>;
EXPECT_EQ(sizeof(int), sizeof(Tuple));
Tuple t;
EXPECT_THAT(to_void_ptrs(t.get<1>()), Each(&t));
}
{
using Tuple = CompressedTuple<int, Empty<0>, Empty<1>>;
EXPECT_EQ(sizeof(int), sizeof(Tuple));
Tuple t;
EXPECT_THAT(to_void_ptrs(t.get<1>(), t.get<2>()), Each(&t));
}
{
using Tuple = CompressedTuple<int, Empty<0>, Empty<1>, Empty<2>>;
EXPECT_EQ(sizeof(int), sizeof(Tuple));
Tuple t;
EXPECT_THAT(to_void_ptrs(t.get<1>(), t.get<2>(), t.get<3>()), Each(&t));
}
}
TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) {
InstanceTracker tracker;
CompressedTuple<CopyableMovableInstance> x1(CopyableMovableInstance(1));
@@ -346,8 +383,24 @@ TEST(CompressedTupleTest, Constexpr) {
constexpr int value() const { return v; }
int v;
};
constexpr CompressedTuple<int, double, CompressedTuple<int>, Empty<0>> x(
7, 1.25, CompressedTuple<int>(5), {});
using Tuple = CompressedTuple<int, double, CompressedTuple<int>, Empty<0>>;
constexpr int r0 =
AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<0>();
constexpr double r1 =
AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<1>();
constexpr int r2 =
AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<2>().get<0>();
constexpr CallType r3 =
AsLValue(Tuple(1, 0.75, CompressedTuple<int>(9), {})).get<3>().value();
EXPECT_EQ(r0, 1);
EXPECT_EQ(r1, 0.75);
EXPECT_EQ(r2, 9);
EXPECT_EQ(r3, CallType::kMutableRef);
constexpr Tuple x(7, 1.25, CompressedTuple<int>(5), {});
constexpr int x0 = x.get<0>();
constexpr double x1 = x.get<1>();
constexpr int x2 = x.get<2>().get<0>();
@@ -358,6 +411,18 @@ TEST(CompressedTupleTest, Constexpr) {
EXPECT_EQ(x2, 5);
EXPECT_EQ(x3, CallType::kConstRef);
constexpr int m0 = Tuple(5, 0.25, CompressedTuple<int>(3), {}).get<0>();
constexpr double m1 = Tuple(5, 0.25, CompressedTuple<int>(3), {}).get<1>();
constexpr int m2 =
Tuple(5, 0.25, CompressedTuple<int>(3), {}).get<2>().get<0>();
constexpr CallType m3 =
Tuple(5, 0.25, CompressedTuple<int>(3), {}).get<3>().value();
EXPECT_EQ(m0, 5);
EXPECT_EQ(m1, 0.25);
EXPECT_EQ(m2, 3);
EXPECT_EQ(m3, CallType::kMutableMove);
constexpr CompressedTuple<Empty<0>, TrivialStruct, int> trivial = {};
constexpr CallType trivial0 = trivial.get<0>().value();
constexpr int trivial1 = trivial.get<1>().value();
@@ -384,8 +449,8 @@ TEST(CompressedTupleTest, Constexpr) {
#if defined(__clang__)
// An apparent bug in earlier versions of gcc claims these are ambiguous.
constexpr int x2m = absl::move(x.get<2>()).get<0>();
constexpr CallType x3m = absl::move(x).get<3>().value();
constexpr int x2m = std::move(x.get<2>()).get<0>();
constexpr CallType x3m = std::move(x).get<3>().value();
EXPECT_EQ(x2m, 5);
EXPECT_EQ(x3m, CallType::kConstMove);
#endif

View File

@@ -17,6 +17,7 @@
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <cstring>
#include <memory>
#include <new>

View File

@@ -168,6 +168,9 @@ struct hash_policy_traits : common_policy_traits<Policy> {
#endif
}
// Whether small object optimization is enabled. True by default.
static constexpr bool soo_enabled() { return soo_enabled_impl(Rank1{}); }
private:
template <class Hash>
struct HashElement {
@@ -183,6 +186,18 @@ struct hash_policy_traits : common_policy_traits<Policy> {
return Policy::apply(HashElement<Hash>{*static_cast<const Hash*>(hash_fn)},
Policy::element(static_cast<slot_type*>(slot)));
}
// Use go/ranked-overloads for dispatching. Rank1 is preferred.
struct Rank0 {};
struct Rank1 : Rank0 {};
// Use auto -> decltype as an enabler.
template <class P = Policy>
static constexpr auto soo_enabled_impl(Rank1) -> decltype(P::soo_enabled()) {
return P::soo_enabled();
}
static constexpr bool soo_enabled_impl(Rank0) { return true; }
};
} // namespace container_internal

View File

@@ -18,13 +18,18 @@
#include <atomic>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
#include "absl/base/no_destructor.h"
#include "absl/base/optimization.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
#include "absl/profiling/internal/exponential_biased.h"
@@ -73,7 +78,10 @@ HashtablezInfo::HashtablezInfo() = default;
HashtablezInfo::~HashtablezInfo() = default;
void HashtablezInfo::PrepareForSampling(int64_t stride,
size_t inline_element_size_value) {
size_t inline_element_size_value,
size_t key_size_value,
size_t value_size_value,
uint16_t soo_capacity_value) {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
@@ -93,6 +101,9 @@ void HashtablezInfo::PrepareForSampling(int64_t stride,
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0);
inline_element_size = inline_element_size_value;
key_size = key_size_value;
value_size = value_size_value;
soo_capacity = soo_capacity_value;
}
static bool ShouldForceSampling() {
@@ -116,12 +127,13 @@ static bool ShouldForceSampling() {
}
HashtablezInfo* SampleSlow(SamplingState& next_sample,
size_t inline_element_size) {
size_t inline_element_size, size_t key_size,
size_t value_size, uint16_t soo_capacity) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
next_sample.next_sample = 1;
const int64_t old_stride = exchange(next_sample.sample_stride, 1);
HashtablezInfo* result =
GlobalHashtablezSampler().Register(old_stride, inline_element_size);
HashtablezInfo* result = GlobalHashtablezSampler().Register(
old_stride, inline_element_size, key_size, value_size, soo_capacity);
return result;
}
@@ -151,10 +163,12 @@ HashtablezInfo* SampleSlow(SamplingState& next_sample,
// that case.
if (first) {
if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr;
return SampleSlow(next_sample, inline_element_size);
return SampleSlow(next_sample, inline_element_size, key_size, value_size,
soo_capacity);
}
return GlobalHashtablezSampler().Register(old_stride, inline_element_size);
return GlobalHashtablezSampler().Register(old_stride, inline_element_size,
key_size, value_size, soo_capacity);
#endif
}

View File

@@ -40,15 +40,20 @@
#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
#include <atomic>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
#include "absl/base/thread_annotations.h"
#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/utility/utility.h"
namespace absl {
@@ -67,7 +72,9 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Puts the object into a clean state, fills in the logically `const` members,
// blocking for any readers that are currently sampling the object.
void PrepareForSampling(int64_t stride, size_t inline_element_size_value)
void PrepareForSampling(int64_t stride, size_t inline_element_size_value,
size_t key_size, size_t value_size,
uint16_t soo_capacity_value)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
// These fields are mutated by the various Record* APIs and need to be
@@ -91,8 +98,15 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
static constexpr int kMaxStackDepth = 64;
absl::Time create_time;
int32_t depth;
// The SOO capacity for this table in elements (not bytes). Note that sampled
// tables are never SOO because we need to store the infoz handle on the heap.
// Tables that would be SOO if not sampled should have: soo_capacity > 0 &&
// size <= soo_capacity && max_reserve <= soo_capacity.
uint16_t soo_capacity;
void* stack[kMaxStackDepth];
size_t inline_element_size; // How big is the slot?
size_t inline_element_size; // How big is the slot in bytes?
size_t key_size; // sizeof(key_type)
size_t value_size; // sizeof(value_type)
};
void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length);
@@ -117,7 +131,8 @@ struct SamplingState {
};
HashtablezInfo* SampleSlow(SamplingState& next_sample,
size_t inline_element_size);
size_t inline_element_size, size_t key_size,
size_t value_size, uint16_t soo_capacity);
void UnsampleSlow(HashtablezInfo* info);
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -204,16 +219,19 @@ class HashtablezInfoHandle {
extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
// Returns a sampling handle.
inline HashtablezInfoHandle Sample(
size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
ABSL_ATTRIBUTE_UNUSED size_t inline_element_size,
ABSL_ATTRIBUTE_UNUSED size_t key_size,
ABSL_ATTRIBUTE_UNUSED size_t value_size,
ABSL_ATTRIBUTE_UNUSED uint16_t soo_capacity) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
return HashtablezInfoHandle(
SampleSlow(global_next_sample, inline_element_size));
return HashtablezInfoHandle(SampleSlow(global_next_sample,
inline_element_size, key_size,
value_size, soo_capacity));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS

View File

@@ -15,8 +15,12 @@
#include "absl/container/internal/hashtablez_sampler.h"
#include <atomic>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <random>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -67,7 +71,11 @@ std::vector<size_t> GetSizes(HashtablezSampler* s) {
HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
const int64_t test_stride = 123;
const size_t test_element_size = 17;
auto* info = s->Register(test_stride, test_element_size);
const size_t test_key_size = 3;
const size_t test_value_size = 5;
auto* info =
s->Register(test_stride, test_element_size, /*key_size=*/test_key_size,
/*value_size=*/test_value_size, /*soo_capacity=*/0);
assert(info != nullptr);
info->size.store(size);
return info;
@@ -77,9 +85,15 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
absl::Time test_start = absl::Now();
const int64_t test_stride = 123;
const size_t test_element_size = 17;
const size_t test_key_size = 15;
const size_t test_value_size = 13;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(test_stride, test_element_size);
info.PrepareForSampling(test_stride, test_element_size,
/*key_size=*/test_key_size,
/*value_size=*/test_value_size,
/*soo_capacity_value=*/1);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
@@ -94,6 +108,9 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_GE(info.create_time, test_start);
EXPECT_EQ(info.weight, test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.key_size, test_key_size);
EXPECT_EQ(info.value_size, test_value_size);
EXPECT_EQ(info.soo_capacity, 1);
info.capacity.store(1, std::memory_order_relaxed);
info.size.store(1, std::memory_order_relaxed);
@@ -106,7 +123,10 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
info.max_reserve.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20);
info.PrepareForSampling(test_stride * 2, test_element_size);
info.PrepareForSampling(test_stride * 2, test_element_size,
/*key_size=*/test_key_size,
/*value_size=*/test_value_size,
/*soo_capacity_value=*/0);
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
@@ -119,7 +139,10 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_EQ(info.weight, 2 * test_stride);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.key_size, test_key_size);
EXPECT_EQ(info.value_size, test_value_size);
EXPECT_GE(info.create_time, test_start);
EXPECT_EQ(info.soo_capacity, 0);
}
TEST(HashtablezInfoTest, RecordStorageChanged) {
@@ -127,7 +150,13 @@ TEST(HashtablezInfoTest, RecordStorageChanged) {
absl::MutexLock l(&info.init_mu);
const int64_t test_stride = 21;
const size_t test_element_size = 19;
info.PrepareForSampling(test_stride, test_element_size);
const size_t test_key_size = 17;
const size_t test_value_size = 15;
info.PrepareForSampling(test_stride, test_element_size,
/*key_size=*/test_key_size,
/*value_size=*/test_value_size,
/*soo_capacity_value=*/0);
RecordStorageChangedSlow(&info, 17, 47);
EXPECT_EQ(info.size.load(), 17);
EXPECT_EQ(info.capacity.load(), 47);
@@ -141,7 +170,13 @@ TEST(HashtablezInfoTest, RecordInsert) {
absl::MutexLock l(&info.init_mu);
const int64_t test_stride = 25;
const size_t test_element_size = 23;
info.PrepareForSampling(test_stride, test_element_size);
const size_t test_key_size = 21;
const size_t test_value_size = 19;
info.PrepareForSampling(test_stride, test_element_size,
/*key_size=*/test_key_size,
/*value_size=*/test_value_size,
/*soo_capacity_value=*/0);
EXPECT_EQ(info.max_probe_length.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6);
@@ -163,9 +198,15 @@ TEST(HashtablezInfoTest, RecordInsert) {
TEST(HashtablezInfoTest, RecordErase) {
const int64_t test_stride = 31;
const size_t test_element_size = 29;
const size_t test_key_size = 27;
const size_t test_value_size = 25;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(test_stride, test_element_size);
info.PrepareForSampling(test_stride, test_element_size,
/*key_size=*/test_key_size,
/*value_size=*/test_value_size,
/*soo_capacity_value=*/1);
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -174,14 +215,23 @@ TEST(HashtablezInfoTest, RecordErase) {
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 1);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.key_size, test_key_size);
EXPECT_EQ(info.value_size, test_value_size);
EXPECT_EQ(info.soo_capacity, 1);
}
TEST(HashtablezInfoTest, RecordRehash) {
const int64_t test_stride = 33;
const size_t test_element_size = 31;
const size_t test_key_size = 29;
const size_t test_value_size = 27;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling(test_stride, test_element_size);
info.PrepareForSampling(test_stride, test_element_size,
/*key_size=*/test_key_size,
/*value_size=*/test_value_size,
/*soo_capacity_value=*/0);
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -201,6 +251,9 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.num_rehashes.load(), 1);
EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_EQ(info.key_size, test_key_size);
EXPECT_EQ(info.value_size, test_value_size);
EXPECT_EQ(info.soo_capacity, 0);
}
TEST(HashtablezInfoTest, RecordReservation) {
@@ -208,7 +261,14 @@ TEST(HashtablezInfoTest, RecordReservation) {
absl::MutexLock l(&info.init_mu);
const int64_t test_stride = 35;
const size_t test_element_size = 33;
info.PrepareForSampling(test_stride, test_element_size);
const size_t test_key_size = 31;
const size_t test_value_size = 29;
info.PrepareForSampling(test_stride, test_element_size,
/*key_size=*/test_key_size,
/*value_size=*/test_value_size,
/*soo_capacity_value=*/0);
RecordReservationSlow(&info, 3);
EXPECT_EQ(info.max_reserve.load(), 3);
@@ -224,12 +284,19 @@ TEST(HashtablezInfoTest, RecordReservation) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) {
const size_t test_element_size = 31;
const size_t test_key_size = 33;
const size_t test_value_size = 35;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
for (int i = 0; i < 1000; ++i) {
SamplingState next_sample = {0, 0};
HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
HashtablezInfo* sample =
SampleSlow(next_sample, test_element_size,
/*key_size=*/test_key_size, /*value_size=*/test_value_size,
/*soo_capacity=*/0);
EXPECT_GT(next_sample.next_sample, 0);
EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
@@ -239,12 +306,17 @@ TEST(HashtablezSamplerTest, SmallSampleParameter) {
TEST(HashtablezSamplerTest, LargeSampleParameter) {
const size_t test_element_size = 31;
const size_t test_key_size = 33;
const size_t test_value_size = 35;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
for (int i = 0; i < 1000; ++i) {
SamplingState next_sample = {0, 0};
HashtablezInfo* sample = SampleSlow(next_sample, test_element_size);
HashtablezInfo* sample =
SampleSlow(next_sample, test_element_size,
/*key_size=*/test_key_size, /*value_size=*/test_value_size,
/*soo_capacity=*/0);
EXPECT_GT(next_sample.next_sample, 0);
EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride);
EXPECT_NE(sample, nullptr);
@@ -254,13 +326,20 @@ TEST(HashtablezSamplerTest, LargeSampleParameter) {
TEST(HashtablezSamplerTest, Sample) {
const size_t test_element_size = 31;
const size_t test_key_size = 33;
const size_t test_value_size = 35;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
int64_t num_sampled = 0;
int64_t total = 0;
double sample_rate = 0.0;
for (int i = 0; i < 1000000; ++i) {
HashtablezInfoHandle h = Sample(test_element_size);
HashtablezInfoHandle h =
Sample(test_element_size,
/*key_size=*/test_key_size, /*value_size=*/test_value_size,
/*soo_capacity=*/0);
++total;
if (h.IsSampled()) {
++num_sampled;
@@ -275,7 +354,12 @@ TEST(HashtablezSamplerTest, Handle) {
auto& sampler = GlobalHashtablezSampler();
const int64_t test_stride = 41;
const size_t test_element_size = 39;
HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size));
const size_t test_key_size = 37;
const size_t test_value_size = 35;
HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size,
/*key_size=*/test_key_size,
/*value_size=*/test_value_size,
/*soo_capacity=*/0));
auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
@@ -351,18 +435,28 @@ TEST(HashtablezSamplerTest, MultiThreaded) {
for (int i = 0; i < 10; ++i) {
const int64_t sampling_stride = 11 + i % 3;
const size_t elt_size = 10 + i % 2;
pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() {
const size_t key_size = 12 + i % 4;
const size_t value_size = 13 + i % 5;
pool.Schedule([&sampler, &stop, sampling_stride, elt_size, key_size,
value_size]() {
std::random_device rd;
std::mt19937 gen(rd());
std::vector<HashtablezInfo*> infoz;
while (!stop.HasBeenNotified()) {
if (infoz.empty()) {
infoz.push_back(sampler.Register(sampling_stride, elt_size));
infoz.push_back(sampler.Register(sampling_stride, elt_size,
/*key_size=*/key_size,
/*value_size=*/value_size,
/*soo_capacity=*/0));
}
switch (std::uniform_int_distribution<>(0, 2)(gen)) {
case 0: {
infoz.push_back(sampler.Register(sampling_stride, elt_size));
infoz.push_back(sampler.Register(sampling_stride, elt_size,
/*key_size=*/key_size,
/*value_size=*/value_size,
/*soo_capacity=*/0));
break;
}
case 1: {

View File

@@ -27,6 +27,7 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/identity.h"
#include "absl/base/macros.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
@@ -82,16 +83,6 @@ using IsMoveAssignOk = std::is_move_assignable<ValueType<A>>;
template <typename A>
using IsSwapOk = absl::type_traits_internal::IsSwappable<ValueType<A>>;
template <typename T>
struct TypeIdentity {
using type = T;
};
// Used for function arguments in template functions to prevent ADL by forcing
// callers to explicitly specify the template parameter.
template <typename T>
using NoTypeDeduction = typename TypeIdentity<T>::type;
template <typename A, bool IsTriviallyDestructible =
absl::is_trivially_destructible<ValueType<A>>::value>
struct DestroyAdapter;
@@ -139,7 +130,7 @@ struct MallocAdapter {
};
template <typename A, typename ValueAdapter>
void ConstructElements(NoTypeDeduction<A>& allocator,
void ConstructElements(absl::internal::type_identity_t<A>& allocator,
Pointer<A> construct_first, ValueAdapter& values,
SizeType<A> construct_size) {
for (SizeType<A> i = 0; i < construct_size; ++i) {
@@ -893,16 +884,30 @@ auto Storage<T, N, A>::Erase(ConstIterator<A> from,
std::distance(ConstIterator<A>(storage_view.data), from));
SizeType<A> erase_end_index = erase_index + erase_size;
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data + erase_end_index));
// Fast path: if the value type is trivially relocatable and we know
// the allocator doesn't do anything fancy, then we know it is legal for us to
// simply destroy the elements in the "erasure window" (which cannot throw)
// and then memcpy downward to close the window.
if (absl::is_trivially_relocatable<ValueType<A>>::value &&
std::is_nothrow_destructible<ValueType<A>>::value &&
std::is_same<A, std::allocator<ValueType<A>>>::value) {
DestroyAdapter<A>::DestroyElements(
GetAllocator(), storage_view.data + erase_index, erase_size);
std::memmove(
reinterpret_cast<char*>(storage_view.data + erase_index),
reinterpret_cast<const char*>(storage_view.data + erase_end_index),
(storage_view.size - erase_end_index) * sizeof(ValueType<A>));
} else {
IteratorValueAdapter<A, MoveIterator<A>> move_values(
MoveIterator<A>(storage_view.data + erase_end_index));
AssignElements<A>(storage_view.data + erase_index, move_values,
storage_view.size - erase_end_index);
DestroyAdapter<A>::DestroyElements(
GetAllocator(), storage_view.data + (storage_view.size - erase_size),
erase_size);
AssignElements<A>(storage_view.data + erase_index, move_values,
storage_view.size - erase_end_index);
DestroyAdapter<A>::DestroyElements(
GetAllocator(), storage_view.data + (storage_view.size - erase_size),
erase_size);
}
SubtractSize(erase_size);
return Iterator<A>(storage_view.data + erase_index);
}

View File

@@ -81,9 +81,30 @@
// }
//
// The layout we used above combines fixed-size with dynamically-sized fields.
// This is quite common. Layout is optimized for this use case and generates
// optimal code. All computations that can be performed at compile time are
// indeed performed at compile time.
// This is quite common. Layout is optimized for this use case and attempts to
// generate optimal code. To help the compiler do that in more cases, you can
// specify the fixed sizes using `WithStaticSizes`. This ensures that all
// computations that can be performed at compile time are indeed performed at
// compile time. Note that sometimes the `template` keyword is needed. E.g.:
//
// using SL = L::template WithStaticSizes<1, 1>;
//
// void Use(unsigned char* p) {
// // First, extract N and M.
// // Using `prefix` we can access the first three arrays but not more.
// //
// // More details: The first element always has offset 0. `SL`
// // has offsets for the second and third array based on sizes of
// // the first and second array, specified via `WithStaticSizes`.
// constexpr auto prefix = SL::Partial();
// size_t n = *prefix.Pointer<0>(p);
// size_t m = *prefix.Pointer<1>(p);
//
// // Now we can get a pointer to the final payload.
// const SL layout(n, m);
// double* a = layout.Pointer<double>(p);
// int* b = layout.Pointer<int>(p);
// }
//
// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
@@ -107,7 +128,7 @@
// CompactString(const char* s = "") {
// const size_t size = strlen(s);
// // size_t[1] followed by char[size + 1].
// const L layout(1, size + 1);
// const L layout(size + 1);
// p_.reset(new unsigned char[layout.AllocSize()]);
// // If running under ASAN, mark the padding bytes, if any, to catch
// // memory errors.
@@ -125,14 +146,13 @@
//
// const char* c_str() const {
// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
// // The argument in Partial(1) specifies that we have size_t[1] in front
// // of the characters.
// return L::Partial(1).Pointer<char>(p_.get());
// return L::Partial().Pointer<char>(p_.get());
// }
//
// private:
// // Our heap allocation contains a size_t followed by an array of chars.
// using L = Layout<size_t, char>;
// // Our heap allocation contains a single size_t followed by an array of
// // chars.
// using L = Layout<size_t, char>::WithStaticSizes<1>;
// std::unique_ptr<unsigned char[]> p_;
// };
//
@@ -146,11 +166,12 @@
//
// The interface exported by this file consists of:
// - class `Layout<>` and its public members.
// - The public members of class `internal_layout::LayoutImpl<>`. That class
// isn't intended to be used directly, and its name and template parameter
// list are internal implementation details, but the class itself provides
// most of the functionality in this file. See comments on its members for
// detailed documentation.
// - The public members of classes `internal_layout::LayoutWithStaticSizes<>`
// and `internal_layout::LayoutImpl<>`. Those classes aren't intended to be
// used directly, and their name and template parameter list are internal
// implementation details, but the classes themselves provide most of the
// functionality in this file. See comments on their members for detailed
// documentation.
//
// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
@@ -164,13 +185,14 @@
#include <stddef.h>
#include <stdint.h>
#include <ostream>
#include <array>
#include <string>
#include <tuple>
#include <type_traits>
#include <typeinfo>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/debugging/internal/demangle.h"
#include "absl/meta/type_traits.h"
@@ -209,9 +231,6 @@ struct NotAligned<const Aligned<T, N>> {
template <size_t>
using IntToSize = size_t;
template <class>
using TypeToSize = size_t;
template <class T>
struct Type : NotAligned<T> {
using type = T;
@@ -308,7 +327,8 @@ using IsLegalElementType = std::integral_constant<
!std::is_volatile<typename Type<T>::type>::value &&
adl_barrier::IsPow2(AlignOf<T>::value)>;
template <class Elements, class SizeSeq, class OffsetSeq>
template <class Elements, class StaticSizeSeq, class RuntimeSizeSeq,
class SizeSeq, class OffsetSeq>
class LayoutImpl;
// Public base class of `Layout` and the result type of `Layout::Partial()`.
@@ -316,31 +336,49 @@ class LayoutImpl;
// `Elements...` contains all template arguments of `Layout` that created this
// instance.
//
// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
// passed to `Layout::Partial()` or `Layout::Layout()`.
// `StaticSizeSeq...` is an index_sequence containing the sizes specified at
// compile-time.
//
// `RuntimeSizeSeq...` is `[0, NumRuntimeSizes)`, where `NumRuntimeSizes` is the
// number of arguments passed to `Layout::Partial()` or `Layout::Layout()`.
//
// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is `NumRuntimeSizes` plus
// the number of sizes in `StaticSizeSeq`.
//
// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
// can compute offsets).
template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
absl::index_sequence<OffsetSeq...>> {
template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
size_t... SizeSeq, size_t... OffsetSeq>
class LayoutImpl<
std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
absl::index_sequence<OffsetSeq...>> {
private:
static_assert(sizeof...(Elements) > 0, "At least one field is required");
static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
"Invalid element type (see IsLegalElementType)");
static_assert(sizeof...(StaticSizeSeq) <= sizeof...(Elements),
"Too many static sizes specified");
enum {
NumTypes = sizeof...(Elements),
NumStaticSizes = sizeof...(StaticSizeSeq),
NumRuntimeSizes = sizeof...(RuntimeSizeSeq),
NumSizes = sizeof...(SizeSeq),
NumOffsets = sizeof...(OffsetSeq),
};
// These are guaranteed by `Layout`.
static_assert(NumStaticSizes + NumRuntimeSizes == NumSizes, "Internal error");
static_assert(NumSizes <= NumTypes, "Internal error");
static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
"Internal error");
static_assert(NumTypes > 0, "Internal error");
static constexpr std::array<size_t, sizeof...(StaticSizeSeq)> kStaticSizes = {
StaticSizeSeq...};
// Returns the index of `T` in `Elements...`. Results in a compilation error
// if `Elements...` doesn't contain exactly one instance of `T`.
template <class T>
@@ -363,7 +401,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
template <size_t N>
using ElementType = typename std::tuple_element<N, ElementTypes>::type;
constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
constexpr explicit LayoutImpl(IntToSize<RuntimeSizeSeq>... sizes)
: size_{sizes...} {}
// Alignment of the layout, equal to the strictest alignment of all elements.
@@ -389,7 +427,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align(
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>(),
ElementAlignment<N>::value);
}
@@ -411,8 +449,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
return {{Offset<OffsetSeq>()...}};
}
// The number of elements in the Nth array. This is the Nth argument of
// `Layout::Partial()` or `Layout::Layout()` (zero-based).
// The number of elements in the Nth array (zero-based).
//
// // int[3], 4 bytes of padding, double[4].
// Layout<int, double> x(3, 4);
@@ -420,10 +457,15 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// assert(x.Size<1>() == 4);
//
// Requires: `N < NumSizes`.
template <size_t N>
template <size_t N, EnableIf<(N < NumStaticSizes)> = 0>
constexpr size_t Size() const {
return kStaticSizes[N];
}
template <size_t N, EnableIf<(N >= NumStaticSizes)> = 0>
constexpr size_t Size() const {
static_assert(N < NumSizes, "Index out of bounds");
return size_[N];
return size_[N - NumStaticSizes];
}
// The number of elements in the array with the specified element type.
@@ -500,13 +542,8 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// std::tie(ints, doubles) = x.Pointers(p);
//
// Requires: `p` is aligned to `Alignment()`.
//
// Note: We're not using ElementType alias here because it does not compile
// under MSVC.
template <class Char>
std::tuple<CopyConst<
Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
Pointers(Char* p) const {
auto Pointers(Char* p) const {
return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
Pointer<OffsetSeq>(p)...);
}
@@ -559,15 +596,10 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
//
// Requires: `p` is aligned to `Alignment()`.
//
// Note: We're not using ElementType alias here because it does not compile
// under MSVC.
// Note: We mark the parameter as unused because GCC detects it is not used
// when `SizeSeq` is empty [-Werror=unused-but-set-parameter].
template <class Char>
std::tuple<SliceType<CopyConst<
Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
Slices(Char* p) const {
// Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
// in 6.1).
(void)p;
auto Slices(ABSL_ATTRIBUTE_UNUSED Char* p) const {
return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
Slice<SizeSeq>(p)...);
}
@@ -582,7 +614,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() +
SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
SizeOf<ElementType<NumTypes - 1>>::value * Size<NumTypes - 1>();
}
// If built with --config=asan, poisons padding bytes (if any) in the
@@ -606,7 +638,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
size_t start =
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * Size<N - 1>();
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
}
#endif
@@ -635,47 +667,66 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
for (size_t i = 0; i != NumOffsets - 1; ++i) {
absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
"(", sizes[i + 1], ")");
absl::StrAppend(&res, "[", DebugSize(i), "]; @", offsets[i + 1],
types[i + 1], "(", sizes[i + 1], ")");
}
// NumSizes is a constant that may be zero. Some compilers cannot see that
// inside the if statement "size_[NumSizes - 1]" must be valid.
int last = static_cast<int>(NumSizes) - 1;
if (NumTypes == NumSizes && last >= 0) {
absl::StrAppend(&res, "[", size_[last], "]");
absl::StrAppend(&res, "[", DebugSize(static_cast<size_t>(last)), "]");
}
return res;
}
private:
size_t DebugSize(size_t n) const {
if (n < NumStaticSizes) {
return kStaticSizes[n];
} else {
return size_[n - NumStaticSizes];
}
}
// Arguments of `Layout::Partial()` or `Layout::Layout()`.
size_t size_[NumSizes > 0 ? NumSizes : 1];
size_t size_[NumRuntimeSizes > 0 ? NumRuntimeSizes : 1];
};
template <size_t NumSizes, class... Ts>
// Defining a constexpr static class member variable is redundant and deprecated
// in C++17, but required in C++14.
template <class... Elements, size_t... StaticSizeSeq, size_t... RuntimeSizeSeq,
size_t... SizeSeq, size_t... OffsetSeq>
constexpr std::array<size_t, sizeof...(StaticSizeSeq)> LayoutImpl<
std::tuple<Elements...>, absl::index_sequence<StaticSizeSeq...>,
absl::index_sequence<RuntimeSizeSeq...>, absl::index_sequence<SizeSeq...>,
absl::index_sequence<OffsetSeq...>>::kStaticSizes;
template <class StaticSizeSeq, size_t NumRuntimeSizes, class... Ts>
using LayoutType = LayoutImpl<
std::tuple<Ts...>, absl::make_index_sequence<NumSizes>,
absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
std::tuple<Ts...>, StaticSizeSeq,
absl::make_index_sequence<NumRuntimeSizes>,
absl::make_index_sequence<NumRuntimeSizes + StaticSizeSeq::size()>,
absl::make_index_sequence<adl_barrier::Min(
sizeof...(Ts), NumRuntimeSizes + StaticSizeSeq::size() + 1)>>;
} // namespace internal_layout
template <class StaticSizeSeq, class... Ts>
class LayoutWithStaticSizes
: public LayoutType<StaticSizeSeq,
sizeof...(Ts) - adl_barrier::Min(sizeof...(Ts),
StaticSizeSeq::size()),
Ts...> {
private:
using Super =
LayoutType<StaticSizeSeq,
sizeof...(Ts) -
adl_barrier::Min(sizeof...(Ts), StaticSizeSeq::size()),
Ts...>;
// Descriptor of arrays of various types and sizes laid out in memory one after
// another. See the top of the file for documentation.
//
// Check out the public API of internal_layout::LayoutImpl above. The type is
// internal to the library but its methods are public, and they are inherited
// by `Layout`.
template <class... Ts>
class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
public:
static_assert(sizeof...(Ts) > 0, "At least one field is required");
static_assert(
absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
"Invalid element type (see IsLegalElementType)");
// The result type of `Partial()` with `NumSizes` arguments.
template <size_t NumSizes>
using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
using PartialType =
internal_layout::LayoutType<StaticSizeSeq, NumSizes, Ts...>;
// `Layout` knows the element types of the arrays we want to lay out in
// memory but not the number of elements in each array.
@@ -701,14 +752,18 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
// Note: The sizes of the arrays must be specified in number of elements,
// not in bytes.
//
// Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
// Requires: `sizeof...(Sizes) + NumStaticSizes <= sizeof...(Ts)`.
// Requires: all arguments are convertible to `size_t`.
template <class... Sizes>
static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...);
static_assert(sizeof...(Sizes) + StaticSizeSeq::size() <= sizeof...(Ts),
"");
return PartialType<sizeof...(Sizes)>(
static_cast<size_t>(std::forward<Sizes>(sizes))...);
}
// Inherit LayoutType's constructor.
//
// Creates a layout with the sizes of all arrays specified. If you know
// only the sizes of the first N arrays (where N can be zero), you can use
// `Partial()` defined above. The constructor is essentially equivalent to
@@ -717,8 +772,69 @@ class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
//
// Note: The sizes of the arrays must be specified in number of elements,
// not in bytes.
constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
: internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
//
// Implementation note: we do this via a `using` declaration instead of
// defining our own explicit constructor because the signature of LayoutType's
// constructor depends on RuntimeSizeSeq, which we don't have access to here.
// If we defined our own constructor here, it would have to use a parameter
// pack and then cast the arguments to size_t when calling the superclass
// constructor, similar to what Partial() does. But that would suffer from the
// same problem that Partial() has, which is that the parameter types are
// inferred from the arguments, which may be signed types, which must then be
// cast to size_t. This can lead to negative values being silently (i.e. with
// no compiler warnings) cast to an unsigned type. Having a constructor with
// size_t parameters helps the compiler generate better warnings about
// potential bad casts, while avoiding false warnings when positive literal
// arguments are used. If an argument is a positive literal integer (e.g.
// `1`), the compiler will understand that it can be safely converted to
// size_t, and hence not generate a warning. But if a negative literal (e.g.
// `-1`) or a variable with signed type is used, then it can generate a
// warning about a potentially unsafe implicit cast. It would be great if we
// could do this for Partial() too, but unfortunately as of C++23 there seems
// to be no way to define a function with a variable number of parameters of a
// certain type, a.k.a. homogeneous function parameter packs. So we're forced
// to choose between explicitly casting the arguments to size_t, which
// suppresses all warnings, even potentially valid ones, or implicitly casting
// them to size_t, which generates bogus warnings whenever literal arguments
// are used, even if they're positive.
using Super::Super;
};
} // namespace internal_layout
// Descriptor of arrays of various types and sizes laid out in memory one after
// another. See the top of the file for documentation.
//
// Check out the public API of internal_layout::LayoutWithStaticSizes and
// internal_layout::LayoutImpl above. Those types are internal to the library
// but their methods are public, and they are inherited by `Layout`.
template <class... Ts>
class Layout : public internal_layout::LayoutWithStaticSizes<
absl::make_index_sequence<0>, Ts...> {
private:
using Super =
internal_layout::LayoutWithStaticSizes<absl::make_index_sequence<0>,
Ts...>;
public:
// If you know the sizes of some or all of the arrays at compile time, you can
// use `WithStaticSizes` or `WithStaticSizeSequence` to create a `Layout` type
// with those sizes baked in. This can help the compiler generate optimal code
// for calculating array offsets and AllocSize().
//
// Like `Partial()`, the N sizes you specify are for the first N arrays, and
// they specify the number of elements in each array, not the number of bytes.
template <class StaticSizeSeq>
using WithStaticSizeSequence =
internal_layout::LayoutWithStaticSizes<StaticSizeSeq, Ts...>;
template <size_t... StaticSizes>
using WithStaticSizes =
WithStaticSizeSequence<std::index_sequence<StaticSizes...>>;
// Inherit LayoutWithStaticSizes's constructor, which requires you to specify
// all the array sizes.
using Super::Super;
};
} // namespace container_internal

View File

@@ -15,6 +15,9 @@
// Every benchmark should have the same performance as the corresponding
// headroom benchmark.
#include <cstddef>
#include <cstdint>
#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/layout.h"
#include "benchmark/benchmark.h"
@@ -28,6 +31,8 @@ using ::benchmark::DoNotOptimize;
using Int128 = int64_t[2];
constexpr size_t MyAlign(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
// This benchmark provides the upper bound on performance for BM_OffsetConstant.
template <size_t Offset, class... Ts>
void BM_OffsetConstantHeadroom(benchmark::State& state) {
@@ -36,6 +41,15 @@ void BM_OffsetConstantHeadroom(benchmark::State& state) {
}
}
template <size_t Offset, class... Ts>
void BM_OffsetConstantStatic(benchmark::State& state) {
using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
ABSL_RAW_CHECK(L::Partial().template Offset<3>() == Offset, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(L::Partial().template Offset<3>());
}
}
template <size_t Offset, class... Ts>
void BM_OffsetConstant(benchmark::State& state) {
using L = Layout<Ts...>;
@@ -46,14 +60,74 @@ void BM_OffsetConstant(benchmark::State& state) {
}
}
template <size_t Offset, class... Ts>
void BM_OffsetConstantIndirect(benchmark::State& state) {
using L = Layout<Ts...>;
auto p = L::Partial(3, 5, 7);
ABSL_RAW_CHECK(p.template Offset<3>() == Offset, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(p);
DoNotOptimize(p.template Offset<3>());
}
}
template <class... Ts>
size_t PartialOffset(size_t k);
template <>
size_t PartialOffset<int8_t, int16_t, int32_t, Int128>(size_t k) {
constexpr size_t o = MyAlign(MyAlign(3 * 1, 2) + 5 * 2, 4);
return MyAlign(o + k * 4, 8);
}
template <>
size_t PartialOffset<Int128, int32_t, int16_t, int8_t>(size_t k) {
// No alignment is necessary.
return 3 * 16 + 5 * 4 + k * 2;
}
// This benchmark provides the upper bound on performance for BM_OffsetVariable.
template <size_t Offset, class... Ts>
void BM_OffsetPartialHeadroom(benchmark::State& state) {
size_t k = 7;
ABSL_RAW_CHECK(PartialOffset<Ts...>(k) == Offset, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(k);
DoNotOptimize(PartialOffset<Ts...>(k));
}
}
template <size_t Offset, class... Ts>
void BM_OffsetPartialStatic(benchmark::State& state) {
using L = typename Layout<Ts...>::template WithStaticSizes<3, 5>;
size_t k = 7;
ABSL_RAW_CHECK(L::Partial(k).template Offset<3>() == Offset,
"Invalid offset");
for (auto _ : state) {
DoNotOptimize(k);
DoNotOptimize(L::Partial(k).template Offset<3>());
}
}
template <size_t Offset, class... Ts>
void BM_OffsetPartial(benchmark::State& state) {
using L = Layout<Ts...>;
size_t k = 7;
ABSL_RAW_CHECK(L::Partial(3, 5, k).template Offset<3>() == Offset,
"Invalid offset");
for (auto _ : state) {
DoNotOptimize(k);
DoNotOptimize(L::Partial(3, 5, k).template Offset<3>());
}
}
template <class... Ts>
size_t VariableOffset(size_t n, size_t m, size_t k);
template <>
size_t VariableOffset<int8_t, int16_t, int32_t, Int128>(size_t n, size_t m,
size_t k) {
auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); };
return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8);
return MyAlign(MyAlign(MyAlign(n * 1, 2) + m * 2, 4) + k * 4, 8);
}
template <>
@@ -94,6 +168,75 @@ void BM_OffsetVariable(benchmark::State& state) {
}
}
template <class... Ts>
size_t AllocSize(size_t x);
template <>
size_t AllocSize<int8_t, int16_t, int32_t, Int128>(size_t x) {
constexpr size_t o =
Layout<int8_t, int16_t, int32_t, Int128>::Partial(3, 5, 7)
.template Offset<Int128>();
return o + sizeof(Int128) * x;
}
template <>
size_t AllocSize<Int128, int32_t, int16_t, int8_t>(size_t x) {
constexpr size_t o =
Layout<Int128, int32_t, int16_t, int8_t>::Partial(3, 5, 7)
.template Offset<int8_t>();
return o + sizeof(int8_t) * x;
}
// This benchmark provides the upper bound on performance for BM_AllocSize
template <size_t Size, class... Ts>
void BM_AllocSizeHeadroom(benchmark::State& state) {
size_t x = 9;
ABSL_RAW_CHECK(AllocSize<Ts...>(x) == Size, "Invalid size");
for (auto _ : state) {
DoNotOptimize(x);
DoNotOptimize(AllocSize<Ts...>(x));
}
}
template <size_t Size, class... Ts>
void BM_AllocSizeStatic(benchmark::State& state) {
using L = typename Layout<Ts...>::template WithStaticSizes<3, 5, 7>;
size_t x = 9;
ABSL_RAW_CHECK(L(x).AllocSize() == Size, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(x);
DoNotOptimize(L(x).AllocSize());
}
}
template <size_t Size, class... Ts>
void BM_AllocSize(benchmark::State& state) {
using L = Layout<Ts...>;
size_t n = 3;
size_t m = 5;
size_t k = 7;
size_t x = 9;
ABSL_RAW_CHECK(L(n, m, k, x).AllocSize() == Size, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(n);
DoNotOptimize(m);
DoNotOptimize(k);
DoNotOptimize(x);
DoNotOptimize(L(n, m, k, x).AllocSize());
}
}
template <size_t Size, class... Ts>
void BM_AllocSizeIndirect(benchmark::State& state) {
using L = Layout<Ts...>;
auto l = L(3, 5, 7, 9);
ABSL_RAW_CHECK(l.AllocSize() == Size, "Invalid offset");
for (auto _ : state) {
DoNotOptimize(l);
DoNotOptimize(l.AllocSize());
}
}
// Run all benchmarks in two modes:
//
// Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?].
@@ -106,16 +249,46 @@ void BM_OffsetVariable(benchmark::State& state) {
OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t,
Int128);
OFFSET_BENCHMARK(BM_OffsetConstantStatic, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 48, int8_t, int16_t, int32_t,
Int128);
OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t,
int8_t);
OFFSET_BENCHMARK(BM_OffsetConstantStatic, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_OffsetConstantIndirect, 82, Int128, int32_t, int16_t,
int8_t);
OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 48, int8_t, int16_t, int32_t,
Int128);
OFFSET_BENCHMARK(BM_OffsetPartialStatic, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetPartial, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetPartialHeadroom, 82, Int128, int32_t, int16_t,
int8_t);
OFFSET_BENCHMARK(BM_OffsetPartialStatic, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_OffsetPartial, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t,
Int128);
OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t,
int8_t);
OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 192, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_AllocSizeStatic, 192, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_AllocSize, 192, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_AllocSizeIndirect, 192, int8_t, int16_t, int32_t, Int128);
OFFSET_BENCHMARK(BM_AllocSizeHeadroom, 91, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_AllocSizeStatic, 91, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_AllocSize, 91, Int128, int32_t, int16_t, int8_t);
OFFSET_BENCHMARK(BM_AllocSizeIndirect, 91, Int128, int32_t, int16_t, int8_t);
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END

View File

@@ -68,9 +68,7 @@ struct alignas(8) Int128 {
// int64_t is *not* 8-byte aligned on all platforms!
struct alignas(8) Int64 {
int64_t a;
friend bool operator==(Int64 lhs, Int64 rhs) {
return lhs.a == rhs.a;
}
friend bool operator==(Int64 lhs, Int64 rhs) { return lhs.a == rhs.a; }
};
// Properties of types that this test relies on.
@@ -271,6 +269,35 @@ TEST(Layout, Offsets) {
}
}
TEST(Layout, StaticOffsets) {
using L = Layout<int8_t, int32_t, Int128>;
{
using SL = L::WithStaticSizes<>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0));
EXPECT_THAT(SL::Partial(5).Offsets(), ElementsAre(0, 8));
EXPECT_THAT(SL::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
}
{
using SL = L::WithStaticSizes<5>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8));
EXPECT_THAT(SL::Partial(3).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL::Partial(3, 1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL(3, 1).Offsets(), ElementsAre(0, 8, 24));
}
{
using SL = L::WithStaticSizes<5, 3>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL::Partial(1).Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL(1).Offsets(), ElementsAre(0, 8, 24));
}
{
using SL = L::WithStaticSizes<5, 3, 1>;
EXPECT_THAT(SL::Partial().Offsets(), ElementsAre(0, 8, 24));
EXPECT_THAT(SL().Offsets(), ElementsAre(0, 8, 24));
}
}
TEST(Layout, AllocSize) {
{
using L = Layout<int32_t>;
@@ -295,6 +322,30 @@ TEST(Layout, AllocSize) {
}
}
TEST(Layout, StaticAllocSize) {
using L = Layout<int8_t, int32_t, Int128>;
{
using SL = L::WithStaticSizes<>;
EXPECT_EQ(136, SL::Partial(3, 5, 7).AllocSize());
EXPECT_EQ(136, SL(3, 5, 7).AllocSize());
}
{
using SL = L::WithStaticSizes<3>;
EXPECT_EQ(136, SL::Partial(5, 7).AllocSize());
EXPECT_EQ(136, SL(5, 7).AllocSize());
}
{
using SL = L::WithStaticSizes<3, 5>;
EXPECT_EQ(136, SL::Partial(7).AllocSize());
EXPECT_EQ(136, SL(7).AllocSize());
}
{
using SL = L::WithStaticSizes<3, 5, 7>;
EXPECT_EQ(136, SL::Partial().AllocSize());
EXPECT_EQ(136, SL().AllocSize());
}
}
TEST(Layout, SizeByIndex) {
{
using L = Layout<int32_t>;
@@ -370,6 +421,27 @@ TEST(Layout, Sizes) {
}
}
TEST(Layout, StaticSize) {
using L = Layout<int8_t, int32_t, Int128>;
{
using SL = L::WithStaticSizes<>;
EXPECT_THAT(SL::Partial().Sizes(), ElementsAre());
EXPECT_THAT(SL::Partial(3).Size<0>(), 3);
EXPECT_THAT(SL::Partial(3).Size<int8_t>(), 3);
EXPECT_THAT(SL::Partial(3).Sizes(), ElementsAre(3));
EXPECT_THAT(SL::Partial(3, 5, 7).Size<0>(), 3);
EXPECT_THAT(SL::Partial(3, 5, 7).Size<int8_t>(), 3);
EXPECT_THAT(SL::Partial(3, 5, 7).Size<2>(), 7);
EXPECT_THAT(SL::Partial(3, 5, 7).Size<Int128>(), 7);
EXPECT_THAT(SL::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
EXPECT_THAT(SL(3, 5, 7).Size<0>(), 3);
EXPECT_THAT(SL(3, 5, 7).Size<int8_t>(), 3);
EXPECT_THAT(SL(3, 5, 7).Size<2>(), 7);
EXPECT_THAT(SL(3, 5, 7).Size<Int128>(), 7);
EXPECT_THAT(SL(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
}
}
TEST(Layout, PointerByIndex) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
@@ -720,6 +792,61 @@ TEST(Layout, MutablePointers) {
}
}
TEST(Layout, StaticPointers) {
alignas(max_align_t) const unsigned char p[100] = {0};
using L = Layout<int8_t, int8_t, Int128>;
{
const auto x = L::WithStaticSizes<>::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
Type<std::tuple<const int8_t*>>(x.Pointers(p)));
}
{
const auto x = L::WithStaticSizes<>::Partial(1);
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1>::Partial();
EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
(Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<>::Partial(1, 2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1>::Partial(2, 3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1, 2>::Partial(3);
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const auto x = L::WithStaticSizes<1, 2, 3>::Partial();
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
{
const L::WithStaticSizes<1, 2, 3> x;
EXPECT_EQ(
std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
(Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
x.Pointers(p))));
}
}
TEST(Layout, SliceByIndexSize) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
@@ -769,7 +896,6 @@ TEST(Layout, SliceByTypeSize) {
EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
}
}
TEST(Layout, MutableSliceByIndexSize) {
alignas(max_align_t) unsigned char p[100] = {0};
{
@@ -820,6 +946,39 @@ TEST(Layout, MutableSliceByTypeSize) {
}
}
TEST(Layout, StaticSliceSize) {
alignas(max_align_t) const unsigned char cp[100] = {0};
alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int32_t, Int128>;
using SL = L::WithStaticSizes<3, 5>;
EXPECT_EQ(3, SL::Partial().Slice<0>(cp).size());
EXPECT_EQ(3, SL::Partial().Slice<int8_t>(cp).size());
EXPECT_EQ(3, SL::Partial(7).Slice<0>(cp).size());
EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(cp).size());
EXPECT_EQ(5, SL::Partial().Slice<1>(cp).size());
EXPECT_EQ(5, SL::Partial().Slice<int32_t>(cp).size());
EXPECT_EQ(5, SL::Partial(7).Slice<1>(cp).size());
EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(cp).size());
EXPECT_EQ(7, SL::Partial(7).Slice<2>(cp).size());
EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(cp).size());
EXPECT_EQ(3, SL::Partial().Slice<0>(p).size());
EXPECT_EQ(3, SL::Partial().Slice<int8_t>(p).size());
EXPECT_EQ(3, SL::Partial(7).Slice<0>(p).size());
EXPECT_EQ(3, SL::Partial(7).Slice<int8_t>(p).size());
EXPECT_EQ(5, SL::Partial().Slice<1>(p).size());
EXPECT_EQ(5, SL::Partial().Slice<int32_t>(p).size());
EXPECT_EQ(5, SL::Partial(7).Slice<1>(p).size());
EXPECT_EQ(5, SL::Partial(7).Slice<int32_t>(p).size());
EXPECT_EQ(7, SL::Partial(7).Slice<2>(p).size());
EXPECT_EQ(7, SL::Partial(7).Slice<Int128>(p).size());
}
TEST(Layout, SliceByIndexData) {
alignas(max_align_t) const unsigned char p[100] = {0};
{
@@ -1230,6 +1389,39 @@ TEST(Layout, MutableSliceByTypeData) {
}
}
TEST(Layout, StaticSliceData) {
alignas(max_align_t) const unsigned char cp[100] = {0};
alignas(max_align_t) unsigned char p[100] = {0};
using L = Layout<int8_t, int32_t, Int128>;
using SL = L::WithStaticSizes<3, 5>;
EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<0>(cp).data()));
EXPECT_EQ(0, Distance(cp, SL::Partial().Slice<int8_t>(cp).data()));
EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<0>(cp).data()));
EXPECT_EQ(0, Distance(cp, SL::Partial(7).Slice<int8_t>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<1>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial().Slice<int32_t>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<1>(cp).data()));
EXPECT_EQ(4, Distance(cp, SL::Partial(7).Slice<int32_t>(cp).data()));
EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<2>(cp).data()));
EXPECT_EQ(24, Distance(cp, SL::Partial(7).Slice<Int128>(cp).data()));
EXPECT_EQ(0, Distance(p, SL::Partial().Slice<0>(p).data()));
EXPECT_EQ(0, Distance(p, SL::Partial().Slice<int8_t>(p).data()));
EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<0>(p).data()));
EXPECT_EQ(0, Distance(p, SL::Partial(7).Slice<int8_t>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial().Slice<1>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial().Slice<int32_t>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<1>(p).data()));
EXPECT_EQ(4, Distance(p, SL::Partial(7).Slice<int32_t>(p).data()));
EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<2>(p).data()));
EXPECT_EQ(24, Distance(p, SL::Partial(7).Slice<Int128>(p).data()));
}
MATCHER_P(IsSameSlice, slice, "") {
return arg.size() == slice.size() && arg.data() == slice.data();
}
@@ -1339,6 +1531,43 @@ TEST(Layout, MutableSlices) {
}
}
TEST(Layout, StaticSlices) {
alignas(max_align_t) const unsigned char cp[100] = {0};
alignas(max_align_t) unsigned char p[100] = {0};
using SL = Layout<int8_t, int8_t, Int128>::WithStaticSizes<1, 2>;
{
const auto x = SL::Partial();
EXPECT_THAT(
(Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(
x.Slices(cp))),
Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
}
{
const auto x = SL::Partial(3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(cp))),
Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
IsSameSlice(x.Slice<2>(cp))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
{
const SL x(3);
EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
Span<const Int128>>>(x.Slices(cp))),
Tuple(IsSameSlice(x.Slice<0>(cp)), IsSameSlice(x.Slice<1>(cp)),
IsSameSlice(x.Slice<2>(cp))));
EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
x.Slices(p))),
Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
IsSameSlice(x.Slice<2>(p))));
}
}
TEST(Layout, UnalignedTypes) {
constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3);
alignas(max_align_t) unsigned char p[x.AllocSize() + 1];
@@ -1377,6 +1606,36 @@ TEST(Layout, Alignment) {
static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
}
TEST(Layout, StaticAlignment) {
static_assert(Layout<int8_t>::WithStaticSizes<>::Alignment() == 1, "");
static_assert(Layout<int8_t>::WithStaticSizes<0>::Alignment() == 1, "");
static_assert(Layout<int8_t>::WithStaticSizes<7>::Alignment() == 1, "");
static_assert(Layout<int32_t>::WithStaticSizes<>::Alignment() == 4, "");
static_assert(Layout<int32_t>::WithStaticSizes<0>::Alignment() == 4, "");
static_assert(Layout<int32_t>::WithStaticSizes<3>::Alignment() == 4, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<>::Alignment() == 64, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<0>::Alignment() == 64, "");
static_assert(
Layout<Aligned<int8_t, 64>>::WithStaticSizes<2>::Alignment() == 64, "");
static_assert(
Layout<int32_t, Int64, int8_t>::WithStaticSizes<>::Alignment() == 8, "");
static_assert(
Layout<int32_t, Int64, int8_t>::WithStaticSizes<0, 0, 0>::Alignment() ==
8,
"");
static_assert(
Layout<int32_t, Int64, int8_t>::WithStaticSizes<1, 1, 1>::Alignment() ==
8,
"");
}
TEST(Layout, ConstexprPartial) {
@@ -1384,6 +1643,15 @@ TEST(Layout, ConstexprPartial) {
constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
static_assert(x.Partial(1).template Offset<1>() == 2 * M, "");
}
TEST(Layout, StaticConstexpr) {
constexpr size_t M = alignof(max_align_t);
using L = Layout<unsigned char, Aligned<unsigned char, 2 * M>>;
using SL = L::WithStaticSizes<1, 3>;
constexpr SL x;
static_assert(x.Offset<1>() == 2 * M, "");
}
// [from, to)
struct Region {
size_t from;
@@ -1458,6 +1726,41 @@ TEST(Layout, PoisonPadding) {
}
}
TEST(Layout, StaticPoisonPadding) {
using L = Layout<int8_t, Int64, int32_t, Int128>;
using SL = L::WithStaticSizes<1, 2>;
constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
{
constexpr auto x = SL::Partial();
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}});
}
{
constexpr auto x = SL::Partial(3);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr auto x = SL::Partial(3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
{
constexpr SL x(3, 4);
alignas(max_align_t) const unsigned char c[n] = {};
x.PoisonPadding(c);
EXPECT_EQ(x.Slices(c), x.Slices(c));
ExpectPoisoned(c, {{1, 8}, {36, 40}});
}
}
TEST(Layout, DebugString) {
{
constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial();
@@ -1500,6 +1803,62 @@ TEST(Layout, DebugString) {
}
}
TEST(Layout, StaticDebugString) {
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial();
EXPECT_EQ("@0<signed char>(1)", x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial();
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<>::Partial(1,
2);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x =
Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1>::Partial(2);
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t,
Int128>::WithStaticSizes<1, 2>::Partial();
EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
x.DebugString());
}
{
constexpr auto x = Layout<int8_t, int32_t, int8_t,
Int128>::WithStaticSizes<1, 2, 3, 4>::Partial();
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
Int128::Name() + "(16)[4]",
x.DebugString());
}
{
constexpr Layout<int8_t, int32_t, int8_t, Int128>::WithStaticSizes<1, 2, 3,
4>
x;
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
Int128::Name() + "(16)[4]",
x.DebugString());
}
}
TEST(Layout, CharTypes) {
constexpr Layout<int32_t> x(1);
alignas(max_align_t) char c[x.AllocSize()] = {};
@@ -1638,6 +1997,35 @@ TEST(CompactString, Works) {
EXPECT_STREQ("hello", s.c_str());
}
// Same as the previous CompactString example, except we set the first array
// size to 1 statically, since we know it is always 1. This allows us to compute
// the offset of the character array at compile time.
class StaticCompactString {
public:
StaticCompactString(const char* s = "") { // NOLINT
const size_t size = strlen(s);
const SL layout(size + 1);
p_.reset(new unsigned char[layout.AllocSize()]);
layout.PoisonPadding(p_.get());
*layout.Pointer<size_t>(p_.get()) = size;
memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
}
size_t size() const { return *SL::Partial().Pointer<size_t>(p_.get()); }
const char* c_str() const { return SL::Partial().Pointer<char>(p_.get()); }
private:
using SL = Layout<size_t, char>::WithStaticSizes<1>;
std::unique_ptr<unsigned char[]> p_;
};
TEST(StaticCompactString, Works) {
StaticCompactString s = "hello";
EXPECT_EQ(5, s.size());
EXPECT_STREQ("hello", s.c_str());
}
} // namespace example
} // namespace

View File

@@ -23,19 +23,24 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/endian.h"
#include "absl/base/optimization.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hashtablez_sampler.h"
#include "absl/hash/hash.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
// We have space for `growth_left` before a single block of control bytes. A
// Represents a control byte corresponding to a full slot with arbitrary hash.
constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
// We have space for `growth_info` before a single block of control bytes. A
// single block of empty control bytes for tables without any slots allocated.
// This enables removing a branch in the hot path of find(). In order to ensure
// that the control bytes are aligned to 16, we have 16 bytes before the control
// bytes even though growth_left only needs 8.
constexpr ctrl_t ZeroCtrlT() { return static_cast<ctrl_t>(0); }
// bytes even though growth_info only needs 8.
alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(), ZeroCtrlT(),
@@ -46,6 +51,18 @@ alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[32] = {
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
// We need one full byte followed by a sentinel byte for iterator::operator++ to
// work. We have a full group after kSentinel to be safe (in case operator++ is
// changed to read a full group).
ABSL_CONST_INIT ABSL_DLL const ctrl_t kSooControl[17] = {
ZeroCtrlT(), ctrl_t::kSentinel, ZeroCtrlT(), ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
ctrl_t::kEmpty};
static_assert(NumControlBytes(SooCapacity()) <= 17,
"kSooControl capacity too small");
#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
constexpr size_t Group::kWidth;
#endif
@@ -104,10 +121,25 @@ bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
return ShouldRehashForBugDetection(ctrl, capacity);
}
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
bool ShouldInsertBackwardsForDebug(size_t capacity, size_t hash,
const ctrl_t* ctrl) {
// To avoid problems with weak hashes and single bit tests, we use % 13.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
return !is_small(capacity) && (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
}
size_t PrepareInsertAfterSoo(size_t hash, size_t slot_size,
CommonFields& common) {
assert(common.capacity() == NextCapacity(SooCapacity()));
// After resize from capacity 1 to 3, we always have exactly the slot with
// index 1 occupied, so we need to insert either at index 0 or index 2.
assert(HashSetResizeHelper::SooSlotIndex() == 1);
PrepareInsertCommon(common);
const size_t offset = H1(hash, common.control()) & 2;
common.growth_info().OverwriteEmptyAsFull();
SetCtrlInSingleGroupTable(common, offset, H2(hash), slot_size);
common.infoz().RecordInsert(hash, /*distance_from_desired=*/0);
return offset;
}
void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
@@ -128,6 +160,8 @@ FindInfo find_first_non_full_outofline(const CommonFields& common,
return find_first_non_full(common, hash);
}
namespace {
// Returns the address of the slot just after slot assuming each slot has the
// specified size.
static inline void* NextSlot(void* slot, size_t slot_size) {
@@ -140,8 +174,22 @@ static inline void* PrevSlot(void* slot, size_t slot_size) {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot) - slot_size);
}
void DropDeletesWithoutResize(CommonFields& common, const void* hash_fn,
const PolicyFunctions& policy, void* tmp_space) {
// Finds guaranteed to exists empty slot from the given position.
// NOTE: this function is almost never triggered inside of the
// DropDeletesWithoutResize, so we keep it simple.
// The table is rather sparse, so empty slot will be found very quickly.
size_t FindEmptySlot(size_t start, size_t end, const ctrl_t* ctrl) {
for (size_t i = start; i < end; ++i) {
if (IsEmpty(ctrl[i])) {
return i;
}
}
assert(false && "no empty slot");
return ~size_t{};
}
void DropDeletesWithoutResize(CommonFields& common,
const PolicyFunctions& policy) {
void* set = &common;
void* slot_array = common.slot_array();
const size_t capacity = common.capacity();
@@ -165,15 +213,26 @@ void DropDeletesWithoutResize(CommonFields& common, const void* hash_fn,
// repeat procedure for current slot with moved from element (target)
ctrl_t* ctrl = common.control();
ConvertDeletedToEmptyAndFullToDeleted(ctrl, capacity);
const void* hash_fn = policy.hash_fn(common);
auto hasher = policy.hash_slot;
auto transfer = policy.transfer;
const size_t slot_size = policy.slot_size;
size_t total_probe_length = 0;
void* slot_ptr = SlotAddress(slot_array, 0, slot_size);
// The index of an empty slot that can be used as temporary memory for
// the swap operation.
constexpr size_t kUnknownId = ~size_t{};
size_t tmp_space_id = kUnknownId;
for (size_t i = 0; i != capacity;
++i, slot_ptr = NextSlot(slot_ptr, slot_size)) {
assert(slot_ptr == SlotAddress(slot_array, i, slot_size));
if (IsEmpty(ctrl[i])) {
tmp_space_id = i;
continue;
}
if (!IsDeleted(ctrl[i])) continue;
const size_t hash = (*hasher)(hash_fn, slot_ptr);
const FindInfo target = find_first_non_full(common, hash);
@@ -202,16 +261,26 @@ void DropDeletesWithoutResize(CommonFields& common, const void* hash_fn,
SetCtrl(common, new_i, H2(hash), slot_size);
(*transfer)(set, new_slot_ptr, slot_ptr);
SetCtrl(common, i, ctrl_t::kEmpty, slot_size);
// Initialize or change empty space id.
tmp_space_id = i;
} else {
assert(IsDeleted(ctrl[new_i]));
SetCtrl(common, new_i, H2(hash), slot_size);
// Until we are done rehashing, DELETED marks previously FULL slots.
if (tmp_space_id == kUnknownId) {
tmp_space_id = FindEmptySlot(i + 1, capacity, ctrl);
}
void* tmp_space = SlotAddress(slot_array, tmp_space_id, slot_size);
SanitizerUnpoisonMemoryRegion(tmp_space, slot_size);
// Swap i and new_i elements.
(*transfer)(set, tmp_space, new_slot_ptr);
(*transfer)(set, new_slot_ptr, slot_ptr);
(*transfer)(set, slot_ptr, tmp_space);
SanitizerPoisonMemoryRegion(tmp_space, slot_size);
// repeat the processing of the ith slot
--i;
slot_ptr = PrevSlot(slot_ptr, slot_size);
@@ -238,6 +307,8 @@ static bool WasNeverFull(CommonFields& c, size_t index) {
Group::kWidth;
}
} // namespace
void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
assert(IsFull(c.control()[index]) && "erasing a dangling iterator");
c.decrement_size();
@@ -245,17 +316,19 @@ void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
if (WasNeverFull(c, index)) {
SetCtrl(c, index, ctrl_t::kEmpty, slot_size);
c.set_growth_left(c.growth_left() + 1);
c.growth_info().OverwriteFullAsEmpty();
return;
}
c.growth_info().OverwriteFullAsDeleted();
SetCtrl(c, index, ctrl_t::kDeleted, slot_size);
}
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
bool reuse) {
bool reuse, bool soo_enabled) {
c.set_size(0);
if (reuse) {
assert(!soo_enabled || c.capacity() > SooCapacity());
ResetCtrl(c, policy.slot_size);
ResetGrowthLeft(c);
c.infoz().RecordStorageChanged(0, c.capacity());
@@ -263,118 +336,308 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
// We need to record infoz before calling dealloc, which will unregister
// infoz.
c.infoz().RecordClearedReservation();
c.infoz().RecordStorageChanged(0, 0);
c.infoz().RecordStorageChanged(0, soo_enabled ? SooCapacity() : 0);
(*policy.dealloc)(c, policy);
c.set_control(EmptyGroup());
c.set_generation_ptr(EmptyGeneration());
c.set_slots(nullptr);
c.set_capacity(0);
c = soo_enabled ? CommonFields{soo_tag_t{}} : CommonFields{};
}
}
void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
ctrl_t* new_ctrl, size_t new_capacity) const {
ctrl_t* __restrict new_ctrl, size_t new_capacity) const {
assert(is_single_group(new_capacity));
constexpr size_t kHalfWidth = Group::kWidth / 2;
constexpr size_t kQuarterWidth = Group::kWidth / 4;
assert(old_capacity_ < kHalfWidth);
static_assert(sizeof(uint64_t) >= kHalfWidth,
"Group size is too large. The ctrl bytes for half a group must "
"fit into a uint64_t for this implementation.");
static_assert(sizeof(uint64_t) <= Group::kWidth,
"Group size is too small. The ctrl bytes for a group must "
"cover a uint64_t for this implementation.");
const size_t half_old_capacity = old_capacity_ / 2;
// NOTE: operations are done with compile time known size = kHalfWidth.
// Compiler optimizes that into single ASM operation.
// Copy second half of bytes to the beginning.
// We potentially copy more bytes in order to have compile time known size.
// Mirrored bytes from the old_ctrl_ will also be copied.
// In case of old_capacity_ == 3, we will copy 1st element twice.
// Load the bytes from half_old_capacity + 1. This contains the last half of
// old_ctrl bytes, followed by the sentinel byte, and then the first half of
// the cloned bytes. This effectively shuffles the control bytes.
uint64_t copied_bytes = 0;
copied_bytes =
absl::little_endian::Load64(old_ctrl() + half_old_capacity + 1);
// We change the sentinel byte to kEmpty before storing to both the start of
// the new_ctrl, and past the end of the new_ctrl later for the new cloned
// bytes. Note that this is faster than setting the sentinel byte to kEmpty
// after the copy directly in new_ctrl because we are limited on store
// bandwidth.
constexpr uint64_t kEmptyXorSentinel =
static_cast<uint8_t>(ctrl_t::kEmpty) ^
static_cast<uint8_t>(ctrl_t::kSentinel);
const uint64_t mask_convert_old_sentinel_to_empty =
kEmptyXorSentinel << (half_old_capacity * 8);
copied_bytes ^= mask_convert_old_sentinel_to_empty;
// Copy second half of bytes to the beginning. This correctly sets the bytes
// [0, old_capacity]. We potentially copy more bytes in order to have compile
// time known size. Mirrored bytes from the old_ctrl() will also be copied. In
// case of old_capacity_ == 3, we will copy 1st element twice.
// Examples:
// (old capacity = 1)
// old_ctrl = 0S0EEEEEEE...
// new_ctrl = S0EEEEEEEE...
// new_ctrl = E0EEEEEE??...
//
// old_ctrl = 01S01EEEEE...
// new_ctrl = 1S01EEEEEE...
// (old capacity = 3)
// old_ctrl = 012S012EEEEE...
// new_ctrl = 12E012EE????...
//
// (old capacity = 7)
// old_ctrl = 0123456S0123456EE...
// new_ctrl = 456S0123?????????...
std::memcpy(new_ctrl, old_ctrl_ + half_old_capacity + 1, kHalfWidth);
// Clean up copied kSentinel from old_ctrl.
new_ctrl[half_old_capacity] = ctrl_t::kEmpty;
// new_ctrl = 456E0123?????????...
absl::little_endian::Store64(new_ctrl, copied_bytes);
// Clean up damaged or uninitialized bytes.
// Clean bytes after the intended size of the copy.
// Example:
// new_ctrl = 1E01EEEEEEE????
// *new_ctrl= 1E0EEEEEEEE????
// position /
// Set the space [old_capacity + 1, new_capacity] to empty as these bytes will
// not be written again. This is safe because
// NumControlBytes = new_capacity + kWidth and new_capacity >=
// old_capacity+1.
// Examples:
// (old_capacity = 3, new_capacity = 15)
// new_ctrl = 12E012EE?????????????...??
// *new_ctrl = 12E0EEEEEEEEEEEEEEEE?...??
// position / S
//
// (old_capacity = 7, new_capacity = 15)
// new_ctrl = 456E0123?????????????????...??
// *new_ctrl = 456E0123EEEEEEEEEEEEEEEE?...??
// position / S
std::memset(new_ctrl + old_capacity_ + 1, static_cast<int8_t>(ctrl_t::kEmpty),
kHalfWidth);
// Clean non-mirrored bytes that are not initialized.
// For small old_capacity that may be inside of mirrored bytes zone.
Group::kWidth);
// Set the last kHalfWidth bytes to empty, to ensure the bytes all the way to
// the end are initialized.
// Examples:
// new_ctrl = 1E0EEEEEEEE??????????....
// *new_ctrl= 1E0EEEEEEEEEEEEE?????....
// position /
// new_ctrl = 12E0EEEEEEEEEEEEEEEE?...???????
// *new_ctrl = 12E0EEEEEEEEEEEEEEEE???EEEEEEEE
// position S /
//
// new_ctrl = 456E0123???????????...
// *new_ctrl= 456E0123EEEEEEEE???...
// position /
std::memset(new_ctrl + kHalfWidth, static_cast<int8_t>(ctrl_t::kEmpty),
kHalfWidth);
// Clean last mirrored bytes that are not initialized
// and will not be overwritten by mirroring.
// Examples:
// new_ctrl = 1E0EEEEEEEEEEEEE????????
// *new_ctrl= 1E0EEEEEEEEEEEEEEEEEEEEE
// position S /
//
// new_ctrl = 456E0123EEEEEEEE???????????????
// *new_ctrl= 456E0123EEEEEEEE???????EEEEEEEE
// position S /
std::memset(new_ctrl + new_capacity + kHalfWidth,
// new_ctrl = 456E0123EEEEEEEEEEEEEEEE???????
// *new_ctrl = 456E0123EEEEEEEEEEEEEEEEEEEEEEE
// position S /
std::memset(new_ctrl + NumControlBytes(new_capacity) - kHalfWidth,
static_cast<int8_t>(ctrl_t::kEmpty), kHalfWidth);
// Create mirrored bytes. old_capacity_ < kHalfWidth
// Example:
// new_ctrl = 456E0123EEEEEEEE???????EEEEEEEE
// *new_ctrl= 456E0123EEEEEEEE456E0123EEEEEEE
// position S/
ctrl_t g[kHalfWidth];
std::memcpy(g, new_ctrl, kHalfWidth);
std::memcpy(new_ctrl + new_capacity + 1, g, kHalfWidth);
// Copy the first bytes to the end (starting at new_capacity +1) to set the
// cloned bytes. Note that we use the already copied bytes from old_ctrl here
// rather than copying from new_ctrl to avoid a Read-after-Write hazard, since
// new_ctrl was just written to. The first old_capacity-1 bytes are set
// correctly. Then there may be up to old_capacity bytes that need to be
// overwritten, and any remaining bytes will be correctly set to empty. This
// sets [new_capacity + 1, new_capacity +1 + old_capacity] correctly.
// Examples:
// new_ctrl = 12E0EEEEEEEEEEEEEEEE?...???????
// *new_ctrl = 12E0EEEEEEEEEEEE12E012EEEEEEEEE
// position S/
//
// new_ctrl = 456E0123EEEEEEEE?...???EEEEEEEE
// *new_ctrl = 456E0123EEEEEEEE456E0123EEEEEEE
// position S/
absl::little_endian::Store64(new_ctrl + new_capacity + 1, copied_bytes);
// Finally set sentinel to its place.
// Set The remaining bytes at the end past the cloned bytes to empty. The
// incorrectly set bytes are [new_capacity + old_capacity + 2,
// min(new_capacity + 1 + kHalfWidth, new_capacity + old_capacity + 2 +
// half_old_capacity)]. Taking the difference, we need to set min(kHalfWidth -
// (old_capacity + 1), half_old_capacity)]. Since old_capacity < kHalfWidth,
// half_old_capacity < kQuarterWidth, so we set kQuarterWidth beginning at
// new_capacity + old_capacity + 2 to kEmpty.
// Examples:
// new_ctrl = 12E0EEEEEEEEEEEE12E012EEEEEEEEE
// *new_ctrl = 12E0EEEEEEEEEEEE12E0EEEEEEEEEEE
// position S /
//
// new_ctrl = 456E0123EEEEEEEE456E0123EEEEEEE
// *new_ctrl = 456E0123EEEEEEEE456E0123EEEEEEE (no change)
// position S /
std::memset(new_ctrl + new_capacity + old_capacity_ + 2,
static_cast<int8_t>(ctrl_t::kEmpty), kQuarterWidth);
// Finally, we set the new sentinel byte.
new_ctrl[new_capacity] = ctrl_t::kSentinel;
}
void HashSetResizeHelper::InitControlBytesAfterSoo(ctrl_t* new_ctrl, ctrl_t h2,
size_t new_capacity) {
assert(is_single_group(new_capacity));
std::memset(new_ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
NumControlBytes(new_capacity));
assert(HashSetResizeHelper::SooSlotIndex() == 1);
// This allows us to avoid branching on had_soo_slot_.
assert(had_soo_slot_ || h2 == ctrl_t::kEmpty);
new_ctrl[1] = new_ctrl[new_capacity + 2] = h2;
new_ctrl[new_capacity] = ctrl_t::kSentinel;
}
void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots(
void* old_slots, void* new_slots, size_t slot_size) const {
void* new_slots, size_t slot_size) const {
assert(old_capacity_ > 0);
const size_t half_old_capacity = old_capacity_ / 2;
SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
SanitizerUnpoisonMemoryRegion(old_slots(), slot_size * old_capacity_);
std::memcpy(new_slots,
SlotAddress(old_slots, half_old_capacity + 1, slot_size),
SlotAddress(old_slots(), half_old_capacity + 1, slot_size),
slot_size * half_old_capacity);
std::memcpy(SlotAddress(new_slots, half_old_capacity + 1, slot_size),
old_slots, slot_size * (half_old_capacity + 1));
old_slots(), slot_size * (half_old_capacity + 1));
}
void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable(
CommonFields& c, void* old_slots, size_t slot_size) {
CommonFields& c, size_t slot_size) {
assert(old_capacity_ < Group::kWidth / 2);
assert(is_single_group(c.capacity()));
assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
GrowIntoSingleGroupShuffleTransferableSlots(old_slots, c.slot_array(),
slot_size);
GrowIntoSingleGroupShuffleTransferableSlots(c.slot_array(), slot_size);
// We poison since GrowIntoSingleGroupShuffleTransferableSlots
// may leave empty slots unpoisoned.
PoisonSingleGroupEmptySlots(c, slot_size);
}
void HashSetResizeHelper::TransferSlotAfterSoo(CommonFields& c,
size_t slot_size) {
assert(was_soo_);
assert(had_soo_slot_);
assert(is_single_group(c.capacity()));
std::memcpy(SlotAddress(c.slot_array(), SooSlotIndex(), slot_size),
old_soo_data(), slot_size);
PoisonSingleGroupEmptySlots(c, slot_size);
}
namespace {
// Called whenever the table needs to vacate empty slots either by removing
// tombstones via rehash or growth.
ABSL_ATTRIBUTE_NOINLINE
FindInfo FindInsertPositionWithGrowthOrRehash(CommonFields& common, size_t hash,
const PolicyFunctions& policy) {
const size_t cap = common.capacity();
if (cap > Group::kWidth &&
// Do these calculations in 64-bit to avoid overflow.
common.size() * uint64_t{32} <= cap * uint64_t{25}) {
// Squash DELETED without growing if there is enough capacity.
//
// Rehash in place if the current size is <= 25/32 of capacity.
// Rationale for such a high factor: 1) DropDeletesWithoutResize() is
// faster than resize, and 2) it takes quite a bit of work to add
// tombstones. In the worst case, seems to take approximately 4
// insert/erase pairs to create a single tombstone and so if we are
// rehashing because of tombstones, we can afford to rehash-in-place as
// long as we are reclaiming at least 1/8 the capacity without doing more
// than 2X the work. (Where "work" is defined to be size() for rehashing
// or rehashing in place, and 1 for an insert or erase.) But rehashing in
// place is faster per operation than inserting or even doubling the size
// of the table, so we actually afford to reclaim even less space from a
// resize-in-place. The decision is to rehash in place if we can reclaim
// at about 1/8th of the usable capacity (specifically 3/28 of the
// capacity) which means that the total cost of rehashing will be a small
// fraction of the total work.
//
// Here is output of an experiment using the BM_CacheInSteadyState
// benchmark running the old case (where we rehash-in-place only if we can
// reclaim at least 7/16*capacity) vs. this code (which rehashes in place
// if we can recover 3/32*capacity).
//
// Note that although in the worst-case number of rehashes jumped up from
// 15 to 190, but the number of operations per second is almost the same.
//
// Abridged output of running BM_CacheInSteadyState benchmark from
// raw_hash_set_benchmark. N is the number of insert/erase operations.
//
// | OLD (recover >= 7/16 | NEW (recover >= 3/32)
// size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
// 448 | 145284 0.44 18 | 140118 0.44 19
// 493 | 152546 0.24 11 | 151417 0.48 28
// 538 | 151439 0.26 11 | 151152 0.53 38
// 583 | 151765 0.28 11 | 150572 0.57 50
// 628 | 150241 0.31 11 | 150853 0.61 66
// 672 | 149602 0.33 12 | 150110 0.66 90
// 717 | 149998 0.35 12 | 149531 0.70 129
// 762 | 149836 0.37 13 | 148559 0.74 190
// 807 | 149736 0.39 14 | 151107 0.39 14
// 852 | 150204 0.42 15 | 151019 0.42 15
DropDeletesWithoutResize(common, policy);
} else {
// Otherwise grow the container.
policy.resize(common, NextCapacity(cap), HashtablezInfoHandle{});
}
// This function is typically called with tables containing deleted slots.
// The table will be big and `FindFirstNonFullAfterResize` will always
// fallback to `find_first_non_full`. So using `find_first_non_full` directly.
return find_first_non_full(common, hash);
}
} // namespace
const void* GetHashRefForEmptyHasher(const CommonFields& common) {
// Empty base optimization typically make the empty base class address to be
// the same as the first address of the derived class object.
// But we generally assume that for empty hasher we can return any valid
// pointer.
return &common;
}
size_t PrepareInsertNonSoo(CommonFields& common, size_t hash, FindInfo target,
const PolicyFunctions& policy) {
// When there are no deleted slots in the table
// and growth_left is positive, we can insert at the first
// empty slot in the probe sequence (target).
const bool use_target_hint =
// Optimization is disabled when generations are enabled.
// We have to rehash even sparse tables randomly in such mode.
!SwisstableGenerationsEnabled() &&
common.growth_info().HasNoDeletedAndGrowthLeft();
if (ABSL_PREDICT_FALSE(!use_target_hint)) {
// Notes about optimized mode when generations are disabled:
// We do not enter this branch if table has no deleted slots
// and growth_left is positive.
// We enter this branch in the following cases listed in decreasing
// frequency:
// 1. Table without deleted slots (>95% cases) that needs to be resized.
// 2. Table with deleted slots that has space for the inserting element.
// 3. Table with deleted slots that needs to be rehashed or resized.
if (ABSL_PREDICT_TRUE(common.growth_info().HasNoGrowthLeftAndNoDeleted())) {
const size_t old_capacity = common.capacity();
policy.resize(common, NextCapacity(old_capacity), HashtablezInfoHandle{});
target = HashSetResizeHelper::FindFirstNonFullAfterResize(
common, old_capacity, hash);
} else {
// Note: the table may have no deleted slots here when generations
// are enabled.
const bool rehash_for_bug_detection =
common.should_rehash_for_bug_detection_on_insert();
if (rehash_for_bug_detection) {
// Move to a different heap allocation in order to detect bugs.
const size_t cap = common.capacity();
policy.resize(common,
common.growth_left() > 0 ? cap : NextCapacity(cap),
HashtablezInfoHandle{});
}
if (ABSL_PREDICT_TRUE(common.growth_left() > 0)) {
target = find_first_non_full(common, hash);
} else {
target = FindInsertPositionWithGrowthOrRehash(common, hash, policy);
}
}
}
PrepareInsertCommon(common);
common.growth_info().OverwriteControlAsFull(common.control()[target.offset]);
SetCtrl(common, target.offset, H2(hash), policy.slot_size);
common.infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl

File diff suppressed because it is too large Load Diff

View File

@@ -12,10 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <array>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <numeric>
#include <random>
#include <string>
@@ -27,6 +29,7 @@
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h"
#include "absl/container/internal/raw_hash_set.h"
#include "absl/random/random.h"
#include "absl/strings/str_format.h"
#include "benchmark/benchmark.h"
@@ -457,6 +460,19 @@ void BM_Group_Match(benchmark::State& state) {
}
BENCHMARK(BM_Group_Match);
void BM_GroupPortable_Match(benchmark::State& state) {
std::array<ctrl_t, GroupPortableImpl::kWidth> group;
Iota(group.begin(), group.end(), -4);
GroupPortableImpl g{group.data()};
h2_t h = 1;
for (auto _ : state) {
::benchmark::DoNotOptimize(h);
::benchmark::DoNotOptimize(g);
::benchmark::DoNotOptimize(g.Match(h));
}
}
BENCHMARK(BM_GroupPortable_Match);
void BM_Group_MaskEmpty(benchmark::State& state) {
std::array<ctrl_t, Group::kWidth> group;
Iota(group.begin(), group.end(), -4);
@@ -562,6 +578,67 @@ void BM_Resize(benchmark::State& state) {
}
BENCHMARK(BM_Resize);
void BM_EraseIf(benchmark::State& state) {
int64_t num_elements = state.range(0);
size_t num_erased = static_cast<size_t>(state.range(1));
constexpr size_t kRepetitions = 64;
absl::BitGen rng;
std::vector<std::vector<int64_t>> keys(kRepetitions);
std::vector<IntTable> tables;
std::vector<int64_t> threshold;
for (auto& k : keys) {
tables.push_back(IntTable());
auto& table = tables.back();
for (int64_t i = 0; i < num_elements; i++) {
// We use random keys to reduce noise.
k.push_back(
absl::Uniform<int64_t>(rng, 0, std::numeric_limits<int64_t>::max()));
if (!table.insert(k.back()).second) {
k.pop_back();
--i; // duplicated value, retrying
}
}
std::sort(k.begin(), k.end());
threshold.push_back(static_cast<int64_t>(num_erased) < num_elements
? k[num_erased]
: std::numeric_limits<int64_t>::max());
}
while (state.KeepRunningBatch(static_cast<int64_t>(kRepetitions) *
std::max(num_elements, int64_t{1}))) {
benchmark::DoNotOptimize(tables);
for (size_t t_id = 0; t_id < kRepetitions; t_id++) {
auto& table = tables[t_id];
benchmark::DoNotOptimize(num_erased);
auto pred = [t = threshold[t_id]](int64_t key) { return key < t; };
benchmark::DoNotOptimize(pred);
benchmark::DoNotOptimize(table);
absl::container_internal::EraseIf(pred, &table);
}
state.PauseTiming();
for (size_t t_id = 0; t_id < kRepetitions; t_id++) {
auto& k = keys[t_id];
auto& table = tables[t_id];
for (size_t i = 0; i < num_erased; i++) {
table.insert(k[i]);
}
}
state.ResumeTiming();
}
}
BENCHMARK(BM_EraseIf)
->ArgNames({"num_elements", "num_erased"})
->ArgPair(10, 0)
->ArgPair(1000, 0)
->ArgPair(10, 5)
->ArgPair(1000, 500)
->ArgPair(10, 10)
->ArgPair(1000, 1000);
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END

File diff suppressed because it is too large Load Diff

View File

@@ -32,22 +32,25 @@
// migration, because it guarantees pointer stability. Consider migrating to
// `node_hash_map` and perhaps converting to a more efficient `flat_hash_map`
// upon further review.
//
// `node_hash_map` is not exception-safe.
#ifndef ABSL_CONTAINER_NODE_HASH_MAP_H_
#define ABSL_CONTAINER_NODE_HASH_MAP_H_
#include <cstddef>
#include <tuple>
#include <memory>
#include <type_traits>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/base/macros.h"
#include "absl/base/attributes.h"
#include "absl/container/hash_container_defaults.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/node_slot_policy.h"
#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -114,11 +117,10 @@ class NodeHashMapPolicy;
// if (result != ducks.end()) {
// std::cout << "Result: " << result->second << std::endl;
// }
template <class Key, class Value,
class Hash = absl::container_internal::hash_default_hash<Key>,
class Eq = absl::container_internal::hash_default_eq<Key>,
template <class Key, class Value, class Hash = DefaultHashContainerHash<Key>,
class Eq = DefaultHashContainerEq<Key>,
class Alloc = std::allocator<std::pair<const Key, Value>>>
class node_hash_map
class ABSL_INTERNAL_ATTRIBUTE_OWNER node_hash_map
: public absl::container_internal::raw_hash_map<
absl::container_internal::NodeHashMapPolicy<Key, Value>, Hash, Eq,
Alloc> {
@@ -415,8 +417,7 @@ class node_hash_map
// node_hash_map::swap(node_hash_map& other)
//
// Exchanges the contents of this `node_hash_map` with those of the `other`
// node hash map, avoiding invocation of any move, copy, or swap operations on
// individual elements.
// node hash map.
//
// All iterators and references on the `node_hash_map` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
@@ -556,6 +557,53 @@ typename node_hash_map<K, V, H, E, A>::size_type erase_if(
return container_internal::EraseIf(pred, &c);
}
// swap(node_hash_map<>, node_hash_map<>)
//
// Swaps the contents of two `node_hash_map` containers.
//
// NOTE: we need to define this function template in order for
// `flat_hash_set::swap` to be called instead of `std::swap`. Even though we
// have `swap(raw_hash_set&, raw_hash_set&)` defined, that function requires a
// derived-to-base conversion, whereas `std::swap` is a function template so
// `std::swap` will be preferred by compiler.
template <typename K, typename V, typename H, typename E, typename A>
void swap(node_hash_map<K, V, H, E, A>& x,
node_hash_map<K, V, H, E, A>& y) noexcept(noexcept(x.swap(y))) {
return x.swap(y);
}
namespace container_internal {
// c_for_each_fast(node_hash_map<>, Function)
//
// Container-based version of the <algorithm> `std::for_each()` function to
// apply a function to a container's elements.
// There is no guarantees on the order of the function calls.
// Erasure and/or insertion of elements in the function is not allowed.
template <typename K, typename V, typename H, typename E, typename A,
typename Function>
decay_t<Function> c_for_each_fast(const node_hash_map<K, V, H, E, A>& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename K, typename V, typename H, typename E, typename A,
typename Function>
decay_t<Function> c_for_each_fast(node_hash_map<K, V, H, E, A>& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename K, typename V, typename H, typename E, typename A,
typename Function>
decay_t<Function> c_for_each_fast(node_hash_map<K, V, H, E, A>&& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
} // namespace container_internal
namespace container_internal {
template <class Key, class Value>

View File

@@ -14,6 +14,18 @@
#include "absl/container/node_hash_map.h"
#include <cstddef>
#include <new>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/container/internal/tracked.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
@@ -29,6 +41,7 @@ using ::testing::Field;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
using MapTypes = ::testing::Types<
absl::node_hash_map<int, int, StatefulTestingHash, StatefulTestingEqual,
@@ -257,6 +270,58 @@ TEST(NodeHashMap, EraseIf) {
}
}
TEST(NodeHashMap, CForEach) {
node_hash_map<int, int> m;
std::vector<std::pair<int, int>> expected;
for (int i = 0; i < 100; ++i) {
{
SCOPED_TRACE("mutable object iteration");
std::vector<std::pair<int, int>> v;
absl::container_internal::c_for_each_fast(
m, [&v](std::pair<const int, int>& p) { v.push_back(p); });
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("const object iteration");
std::vector<std::pair<int, int>> v;
const node_hash_map<int, int>& cm = m;
absl::container_internal::c_for_each_fast(
cm, [&v](const std::pair<const int, int>& p) { v.push_back(p); });
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("const object iteration");
std::vector<std::pair<int, int>> v;
absl::container_internal::c_for_each_fast(
node_hash_map<int, int>(m),
[&v](std::pair<const int, int>& p) { v.push_back(p); });
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
}
m[i] = i;
expected.emplace_back(i, i);
}
}
TEST(NodeHashMap, CForEachMutate) {
node_hash_map<int, int> s;
std::vector<std::pair<int, int>> expected;
for (int i = 0; i < 100; ++i) {
std::vector<std::pair<int, int>> v;
absl::container_internal::c_for_each_fast(
s, [&v](std::pair<const int, int>& p) {
v.push_back(p);
p.second++;
});
EXPECT_THAT(v, UnorderedElementsAreArray(expected));
for (auto& p : expected) {
p.second++;
}
EXPECT_THAT(s, UnorderedElementsAreArray(expected));
s[i] = i;
expected.emplace_back(i, i);
}
}
// This test requires std::launder for mutable key access in node handles.
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
TEST(NodeHashMap, NodeHandleMutableKeyAccess) {

View File

@@ -31,20 +31,24 @@
// `node_hash_set` should be an easy migration. Consider migrating to
// `node_hash_set` and perhaps converting to a more efficient `flat_hash_set`
// upon further review.
//
// `node_hash_set` is not exception-safe.
#ifndef ABSL_CONTAINER_NODE_HASH_SET_H_
#define ABSL_CONTAINER_NODE_HASH_SET_H_
#include <cstddef>
#include <memory>
#include <type_traits>
#include "absl/algorithm/container.h"
#include "absl/base/macros.h"
#include "absl/base/attributes.h"
#include "absl/container/hash_container_defaults.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
#include "absl/container/internal/node_slot_policy.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -109,10 +113,9 @@ struct NodeHashSetPolicy;
// if (ducks.contains("dewey")) {
// std::cout << "We found dewey!" << std::endl;
// }
template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
class Eq = absl::container_internal::hash_default_eq<T>,
class Alloc = std::allocator<T>>
class node_hash_set
template <class T, class Hash = DefaultHashContainerHash<T>,
class Eq = DefaultHashContainerEq<T>, class Alloc = std::allocator<T>>
class ABSL_INTERNAL_ATTRIBUTE_OWNER node_hash_set
: public absl::container_internal::raw_hash_set<
absl::container_internal::NodeHashSetPolicy<T>, Hash, Eq, Alloc> {
using Base = typename node_hash_set::raw_hash_set;
@@ -346,8 +349,7 @@ class node_hash_set
// node_hash_set::swap(node_hash_set& other)
//
// Exchanges the contents of this `node_hash_set` with those of the `other`
// node hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
// node hash set.
//
// All iterators and references on the `node_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
@@ -464,6 +466,48 @@ typename node_hash_set<T, H, E, A>::size_type erase_if(
return container_internal::EraseIf(pred, &c);
}
// swap(node_hash_set<>, node_hash_set<>)
//
// Swaps the contents of two `node_hash_set` containers.
//
// NOTE: we need to define this function template in order for
// `flat_hash_set::swap` to be called instead of `std::swap`. Even though we
// have `swap(raw_hash_set&, raw_hash_set&)` defined, that function requires a
// derived-to-base conversion, whereas `std::swap` is a function template so
// `std::swap` will be preferred by compiler.
template <typename T, typename H, typename E, typename A>
void swap(node_hash_set<T, H, E, A>& x,
node_hash_set<T, H, E, A>& y) noexcept(noexcept(x.swap(y))) {
return x.swap(y);
}
namespace container_internal {
// c_for_each_fast(node_hash_set<>, Function)
//
// Container-based version of the <algorithm> `std::for_each()` function to
// apply a function to a container's elements.
// There is no guarantees on the order of the function calls.
// Erasure and/or insertion of elements in the function is not allowed.
template <typename T, typename H, typename E, typename A, typename Function>
decay_t<Function> c_for_each_fast(const node_hash_set<T, H, E, A>& c,
Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename T, typename H, typename E, typename A, typename Function>
decay_t<Function> c_for_each_fast(node_hash_set<T, H, E, A>& c, Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
template <typename T, typename H, typename E, typename A, typename Function>
decay_t<Function> c_for_each_fast(node_hash_set<T, H, E, A>&& c, Function&& f) {
container_internal::ForEach(f, &c);
return f;
}
} // namespace container_internal
namespace container_internal {
template <class T>

View File

@@ -14,10 +14,22 @@
#include "absl/container/node_hash_set.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
#include "absl/container/internal/unordered_set_members_test.h"
#include "absl/container/internal/unordered_set_modifiers_test.h"
#include "absl/memory/memory.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -28,6 +40,7 @@ using ::absl::container_internal::hash_internal::EnumClass;
using ::testing::IsEmpty;
using ::testing::Pointee;
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
using SetTypes = ::testing::Types<
node_hash_set<int, StatefulTestingHash, StatefulTestingEqual, Alloc<int>>,
@@ -137,6 +150,39 @@ TEST(NodeHashSet, EraseIf) {
}
}
TEST(NodeHashSet, CForEach) {
using ValueType = std::pair<int, int>;
node_hash_set<ValueType> s;
std::vector<ValueType> expected;
for (int i = 0; i < 100; ++i) {
{
SCOPED_TRACE("mutable object iteration");
std::vector<ValueType> v;
absl::container_internal::c_for_each_fast(
s, [&v](const ValueType& p) { v.push_back(p); });
ASSERT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("const object iteration");
std::vector<ValueType> v;
const node_hash_set<ValueType>& cs = s;
absl::container_internal::c_for_each_fast(
cs, [&v](const ValueType& p) { v.push_back(p); });
ASSERT_THAT(v, UnorderedElementsAreArray(expected));
}
{
SCOPED_TRACE("temporary object iteration");
std::vector<ValueType> v;
absl::container_internal::c_for_each_fast(
node_hash_set<ValueType>(s),
[&v](const ValueType& p) { v.push_back(p); });
ASSERT_THAT(v, UnorderedElementsAreArray(expected));
}
s.emplace(i, i);
expected.emplace_back(i, i);
}
}
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END

View File

@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cstddef>
#include <unordered_set>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
@@ -38,15 +43,16 @@ void TestInlineElementSize(
// set cannot be flat_hash_set, however, since that would introduce a mutex
// deadlock.
std::unordered_set<const HashtablezInfo*>& preexisting_info, // NOLINT
std::vector<Table>& tables, const typename Table::value_type& elt,
std::vector<Table>& tables,
const std::vector<typename Table::value_type>& values,
size_t expected_element_size) {
for (int i = 0; i < 10; ++i) {
// We create a new table and must store it somewhere so that when we store
// a pointer to the resulting `HashtablezInfo` into `preexisting_info`
// that we aren't storing a dangling pointer.
tables.emplace_back();
// We must insert an element to get a hashtablez to instantiate.
tables.back().insert(elt);
// We must insert elements to get a hashtablez to instantiate.
tables.back().insert(values.begin(), values.end());
}
size_t new_count = 0;
sampler.Iterate([&](const HashtablezInfo& info) {
@@ -82,6 +88,9 @@ TEST(FlatHashMap, SampleElementSize) {
std::vector<flat_hash_set<bigstruct>> flat_set_tables;
std::vector<node_hash_map<int, bigstruct>> node_map_tables;
std::vector<node_hash_set<bigstruct>> node_set_tables;
std::vector<bigstruct> set_values = {bigstruct{{0}}, bigstruct{{1}}};
std::vector<std::pair<const int, bigstruct>> map_values = {{0, bigstruct{}},
{1, bigstruct{}}};
// It takes thousands of new tables after changing the sampling parameters
// before you actually get some instrumentation. And if you must actually
@@ -97,14 +106,14 @@ TEST(FlatHashMap, SampleElementSize) {
std::unordered_set<const HashtablezInfo*> preexisting_info; // NOLINT
sampler.Iterate(
[&](const HashtablezInfo& info) { preexisting_info.insert(&info); });
TestInlineElementSize(sampler, preexisting_info, flat_map_tables,
{0, bigstruct{}}, sizeof(int) + sizeof(bigstruct));
TestInlineElementSize(sampler, preexisting_info, node_map_tables,
{0, bigstruct{}}, sizeof(void*));
TestInlineElementSize(sampler, preexisting_info, flat_set_tables, //
bigstruct{}, sizeof(bigstruct));
TestInlineElementSize(sampler, preexisting_info, node_set_tables, //
bigstruct{}, sizeof(void*));
TestInlineElementSize(sampler, preexisting_info, flat_map_tables, map_values,
sizeof(int) + sizeof(bigstruct));
TestInlineElementSize(sampler, preexisting_info, node_map_tables, map_values,
sizeof(void*));
TestInlineElementSize(sampler, preexisting_info, flat_set_tables, set_values,
sizeof(bigstruct));
TestInlineElementSize(sampler, preexisting_info, node_set_tables, set_values,
sizeof(void*));
#endif
}

View File

@@ -3,7 +3,7 @@ include(GENERATED_AbseilCopts)
set(ABSL_DEFAULT_LINKOPTS "")
if (BUILD_SHARED_LIBS AND MSVC)
if (BUILD_SHARED_LIBS AND (MSVC OR ABSL_BUILD_MONOLITHIC_SHARED_LIBS))
set(ABSL_BUILD_DLL TRUE)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
else()
@@ -42,7 +42,7 @@ if(APPLE AND CMAKE_CXX_COMPILER_ID MATCHES [[Clang]])
string(TOUPPER "${_arch}" _arch_uppercase)
string(REPLACE "X86_64" "X64" _arch_uppercase ${_arch_uppercase})
foreach(_flag IN LISTS ABSL_RANDOM_HWAES_${_arch_uppercase}_FLAGS)
list(APPEND ABSL_RANDOM_RANDEN_COPTS "-Xarch_${_arch}" "${_flag}")
list(APPEND ABSL_RANDOM_RANDEN_COPTS "SHELL:-Xarch_${_arch} ${_flag}")
endforeach()
endforeach()
# If a compiler happens to deal with an argument for a currently unused

View File

@@ -44,6 +44,7 @@ list(APPEND ABSL_GCC_FLAGS
"-Wconversion-null"
"-Wformat-security"
"-Wmissing-declarations"
"-Wnon-virtual-dtor"
"-Woverlength-strings"
"-Wpointer-arith"
"-Wundef"
@@ -61,6 +62,7 @@ list(APPEND ABSL_GCC_TEST_FLAGS
"-Wcast-qual"
"-Wconversion-null"
"-Wformat-security"
"-Wnon-virtual-dtor"
"-Woverlength-strings"
"-Wpointer-arith"
"-Wundef"
@@ -82,9 +84,10 @@ list(APPEND ABSL_GCC_TEST_FLAGS
list(APPEND ABSL_LLVM_FLAGS
"-Wall"
"-Wextra"
"-Wc++98-compat-extra-semi"
"-Wcast-qual"
"-Wconversion"
"-Wdead-code-aggressive"
"-Wdeprecated-pragma"
"-Wfloat-overflow-conversion"
"-Wfloat-zero-conversion"
"-Wfor-loop-analysis"
@@ -121,9 +124,10 @@ list(APPEND ABSL_LLVM_FLAGS
list(APPEND ABSL_LLVM_TEST_FLAGS
"-Wall"
"-Wextra"
"-Wc++98-compat-extra-semi"
"-Wcast-qual"
"-Wconversion"
"-Wdead-code-aggressive"
"-Wdeprecated-pragma"
"-Wfloat-overflow-conversion"
"-Wfloat-zero-conversion"
"-Wfor-loop-analysis"

View File

@@ -45,6 +45,7 @@ ABSL_GCC_FLAGS = [
"-Wconversion-null",
"-Wformat-security",
"-Wmissing-declarations",
"-Wnon-virtual-dtor",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wundef",
@@ -62,6 +63,7 @@ ABSL_GCC_TEST_FLAGS = [
"-Wcast-qual",
"-Wconversion-null",
"-Wformat-security",
"-Wnon-virtual-dtor",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wundef",
@@ -83,9 +85,10 @@ ABSL_GCC_TEST_FLAGS = [
ABSL_LLVM_FLAGS = [
"-Wall",
"-Wextra",
"-Wc++98-compat-extra-semi",
"-Wcast-qual",
"-Wconversion",
"-Wdead-code-aggressive",
"-Wdeprecated-pragma",
"-Wfloat-overflow-conversion",
"-Wfloat-zero-conversion",
"-Wfor-loop-analysis",
@@ -122,9 +125,10 @@ ABSL_LLVM_FLAGS = [
ABSL_LLVM_TEST_FLAGS = [
"-Wall",
"-Wextra",
"-Wc++98-compat-extra-semi",
"-Wcast-qual",
"-Wconversion",
"-Wdead-code-aggressive",
"-Wdeprecated-pragma",
"-Wfloat-overflow-conversion",
"-Wfloat-zero-conversion",
"-Wfor-loop-analysis",

View File

@@ -18,6 +18,7 @@ ABSL_GCC_FLAGS = [
"-Wconversion-null",
"-Wformat-security",
"-Wmissing-declarations",
"-Wnon-virtual-dtor",
"-Woverlength-strings",
"-Wpointer-arith",
"-Wundef",
@@ -43,9 +44,10 @@ ABSL_GCC_TEST_ADDITIONAL_FLAGS = [
ABSL_LLVM_FLAGS = [
"-Wall",
"-Wextra",
"-Wc++98-compat-extra-semi",
"-Wcast-qual",
"-Wconversion",
"-Wdead-code-aggressive",
"-Wdeprecated-pragma",
"-Wfloat-overflow-conversion",
"-Wfloat-zero-conversion",
"-Wfor-loop-analysis",

View File

@@ -121,7 +121,9 @@ cc_library(
hdrs = ["internal/non_temporal_arm_intrinsics.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//visibility:private"],
visibility = [
":__pkg__",
],
deps = [
"//absl/base:config",
],
@@ -132,7 +134,9 @@ cc_library(
hdrs = ["internal/non_temporal_memcpy.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
visibility = ["//visibility:private"],
visibility = [
":__pkg__",
],
deps = [
":non_temporal_arm_intrinsics",
"//absl/base:config",

View File

@@ -102,10 +102,11 @@ V128 V128_Xor(const V128 l, const V128 r);
// Produces an AND operation of |l| and |r|.
V128 V128_And(const V128 l, const V128 r);
// Sets two 64 bit integers to one 128 bit vector. The order is reverse.
// Sets the lower half of a 128 bit register to the given 64-bit value and
// zeroes the upper half.
// dst[63:0] := |r|
// dst[127:64] := |l|
V128 V128_From2x64(const uint64_t l, const uint64_t r);
// dst[127:64] := |0|
V128 V128_From64WithZeroFill(const uint64_t r);
// Shift |l| right by |imm| bytes while shifting in zeros.
template <int imm>
@@ -122,8 +123,8 @@ uint64_t V128_Extract64(const V128 l);
// Extracts the low 64 bits from V128.
int64_t V128_Low64(const V128 l);
// Left-shifts packed 64-bit integers in l by r.
V128 V128_ShiftLeft64(const V128 l, const V128 r);
// Add packed 64-bit integers in |l| and |r|.
V128 V128_Add64(const V128 l, const V128 r);
#endif
@@ -171,8 +172,8 @@ inline V128 V128_Xor(const V128 l, const V128 r) { return _mm_xor_si128(l, r); }
inline V128 V128_And(const V128 l, const V128 r) { return _mm_and_si128(l, r); }
inline V128 V128_From2x64(const uint64_t l, const uint64_t r) {
return _mm_set_epi64x(static_cast<int64_t>(l), static_cast<int64_t>(r));
inline V128 V128_From64WithZeroFill(const uint64_t r) {
return _mm_set_epi64x(static_cast<int64_t>(0), static_cast<int64_t>(r));
}
template <int imm>
@@ -192,8 +193,8 @@ inline uint64_t V128_Extract64(const V128 l) {
inline int64_t V128_Low64(const V128 l) { return _mm_cvtsi128_si64(l); }
inline V128 V128_ShiftLeft64(const V128 l, const V128 r) {
return _mm_sll_epi64(l, r);
inline V128 V128_Add64(const V128 l, const V128 r) {
return _mm_add_epi64(l, r);
}
#elif defined(ABSL_CRC_INTERNAL_HAVE_ARM_SIMD)
@@ -262,10 +263,12 @@ inline V128 V128_Xor(const V128 l, const V128 r) { return veorq_u64(l, r); }
inline V128 V128_And(const V128 l, const V128 r) { return vandq_u64(l, r); }
inline V128 V128_From2x64(const uint64_t l, const uint64_t r) {
return vcombine_u64(vcreate_u64(r), vcreate_u64(l));
inline V128 V128_From64WithZeroFill(const uint64_t r){
constexpr uint64x2_t kZero = {0, 0};
return vsetq_lane_u64(r, kZero, 0);
}
template <int imm>
inline V128 V128_ShiftRight(const V128 l) {
return vreinterpretq_u64_s8(
@@ -286,9 +289,7 @@ inline int64_t V128_Low64(const V128 l) {
return vgetq_lane_s64(vreinterpretq_s64_u64(l), 0);
}
inline V128 V128_ShiftLeft64(const V128 l, const V128 r) {
return vshlq_u64(l, vreinterpretq_s64_u64(r));
}
inline V128 V128_Add64(const V128 l, const V128 r) { return vaddq_u64(l, r); }
#endif

View File

@@ -12,12 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cstdint>
#include <cstring>
#include <memory>
#include "absl/base/config.h"
#include "absl/crc/crc32c.h"
#include "absl/crc/internal/crc_memcpy.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN

View File

@@ -52,6 +52,7 @@
#include <cstring>
#include <memory>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/optimization.h"
#include "absl/base/prefetch.h"
@@ -88,9 +89,11 @@ inline crc32c_t ShortCrcCopy(char* dst, const char* src, std::size_t length,
constexpr size_t kIntLoadsPerVec = sizeof(V128) / sizeof(uint64_t);
// Common function for copying the tails of multiple large regions.
// Disable ubsan for benign unaligned access. See b/254108538.
template <size_t vec_regions, size_t int_regions>
inline void LargeTailCopy(crc32c_t* crcs, char** dst, const char** src,
size_t region_size, size_t copy_rounds) {
ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED inline void LargeTailCopy(
crc32c_t* crcs, char** dst, const char** src, size_t region_size,
size_t copy_rounds) {
std::array<V128, vec_regions> data;
std::array<uint64_t, kIntLoadsPerVec * int_regions> int_data;
@@ -127,8 +130,8 @@ inline void LargeTailCopy(crc32c_t* crcs, char** dst, const char** src,
size_t data_index = i * kIntLoadsPerVec + j;
int_data[data_index] = *(usrc + j);
crcs[region] = crc32c_t{static_cast<uint32_t>(CRC32_u64(
static_cast<uint32_t>(crcs[region]), int_data[data_index]))};
crcs[region] = crc32c_t{CRC32_u64(static_cast<uint32_t>(crcs[region]),
int_data[data_index])};
*(udst + j) = int_data[data_index];
}
@@ -155,8 +158,10 @@ class AcceleratedCrcMemcpyEngine : public CrcMemcpyEngine {
std::size_t length, crc32c_t initial_crc) const override;
};
// Disable ubsan for benign unaligned access. See b/254108538.
template <size_t vec_regions, size_t int_regions>
crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED crc32c_t
AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
void* __restrict dst, const void* __restrict src, std::size_t length,
crc32c_t initial_crc) const {
constexpr std::size_t kRegions = vec_regions + int_regions;
@@ -196,7 +201,6 @@ crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
// Start work on the CRC: undo the XOR from the previous calculation or set up
// the initial value of the CRC.
// initial_crc ^= kCrcDataXor;
initial_crc = crc32c_t{static_cast<uint32_t>(initial_crc) ^ kCrcDataXor};
// Do an initial alignment copy, so we can use aligned store instructions to
@@ -295,8 +299,8 @@ crc32c_t AcceleratedCrcMemcpyEngine<vec_regions, int_regions>::Compute(
// Load and CRC the data.
int_data[data_index] = *(usrc + i * kIntLoadsPerVec + k);
crcs[region] = crc32c_t{static_cast<uint32_t>(CRC32_u64(
static_cast<uint32_t>(crcs[region]), int_data[data_index]))};
crcs[region] = crc32c_t{CRC32_u64(static_cast<uint32_t>(crcs[region]),
int_data[data_index])};
// Store the data.
*(udst + i * kIntLoadsPerVec + k) = int_data[data_index];

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cstdint>
#include <cstddef>
#include "absl/base/config.h"
#include "absl/crc/crc32c.h"

View File

@@ -101,13 +101,17 @@ constexpr size_t kMediumCutoff = 2048;
namespace {
uint32_t multiply(uint32_t a, uint32_t b) {
V128 shifts = V128_From2x64(0, 1);
V128 power = V128_From2x64(0, a);
V128 crc = V128_From2x64(0, b);
V128 power = V128_From64WithZeroFill(a);
V128 crc = V128_From64WithZeroFill(b);
V128 res = V128_PMulLow(power, crc);
// Combine crc values
res = V128_ShiftLeft64(res, shifts);
// Combine crc values.
//
// Adding res to itself is equivalent to multiplying by 2,
// or shifting left by 1. Addition is used as not all compilers
// are able to generate optimal code without this hint.
// https://godbolt.org/z/rr3fMnf39
res = V128_Add64(res, res);
return static_cast<uint32_t>(V128_Extract32<1>(res)) ^
CRC32_u32(0, static_cast<uint32_t>(V128_Low64(res)));
}
@@ -444,11 +448,11 @@ class CRC32AcceleratedX86ARMCombinedMultipleStreams
V128 magic = *(reinterpret_cast<const V128*>(kClmulConstants) + bs - 1);
V128 tmp = V128_From2x64(0, l64);
V128 tmp = V128_From64WithZeroFill(l64);
V128 res1 = V128_PMulLow(tmp, magic);
tmp = V128_From2x64(0, l641);
tmp = V128_From64WithZeroFill(l641);
V128 res2 = V128_PMul10(tmp, magic);
V128 x = V128_Xor(res1, res2);

View File

@@ -19,19 +19,8 @@
#include <intrin.h>
#endif
#ifdef __SSE__
#include <xmmintrin.h>
#endif
#ifdef __SSE2__
#include <emmintrin.h>
#endif
#ifdef __SSE3__
#include <pmmintrin.h>
#endif
#ifdef __AVX__
#if defined(__SSE__) || defined(__AVX__)
// Pulls in both SSE and AVX intrinsics.
#include <immintrin.h>
#endif
@@ -44,6 +33,7 @@
#include <cstdint>
#include <cstring>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/optimization.h"
@@ -57,7 +47,9 @@ namespace crc_internal {
// memcpy can save 1 DRAM load of the destination cacheline.
constexpr size_t kCacheLineSize = ABSL_CACHELINE_SIZE;
// If the objects overlap, the behavior is undefined.
// If the objects overlap, the behavior is undefined. Uses regular memcpy
// instead of non-temporal memcpy if the required CPU intrinsics are unavailable
// at compile time.
inline void *non_temporal_store_memcpy(void *__restrict dst,
const void *__restrict src, size_t len) {
#if defined(__SSE3__) || defined(__aarch64__) || \
@@ -119,10 +111,20 @@ inline void *non_temporal_store_memcpy(void *__restrict dst,
#endif // __SSE3__ || __aarch64__ || (_MSC_VER && __AVX__)
}
// If the objects overlap, the behavior is undefined. Uses regular memcpy
// instead of non-temporal memcpy if the required CPU intrinsics are unavailable
// at compile time.
#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::target) && \
(defined(__x86_64__) || defined(__i386__))
[[gnu::target("avx")]]
#endif
inline void *non_temporal_store_memcpy_avx(void *__restrict dst,
const void *__restrict src,
size_t len) {
#ifdef __AVX__
// This function requires AVX. For clang and gcc we compile it with AVX even
// if the translation unit isn't built with AVX support. This works because we
// only select this implementation at runtime if the CPU supports AVX.
#if defined(__SSE3__) || (defined(_MSC_VER) && defined(__AVX__))
uint8_t *d = reinterpret_cast<uint8_t *>(dst);
const uint8_t *s = reinterpret_cast<const uint8_t *>(src);
@@ -168,9 +170,8 @@ inline void *non_temporal_store_memcpy_avx(void *__restrict dst,
}
return dst;
#else
// Fallback to regular memcpy when AVX is not available.
return memcpy(dst, src, len);
#endif // __AVX__
#endif // __SSE3__ || (_MSC_VER && __AVX__)
}
} // namespace crc_internal

View File

@@ -228,9 +228,12 @@ cc_library(
"//absl/debugging:__pkg__",
],
deps = [
":demangle_rust",
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:nullability",
"//absl/numeric:bits",
],
)
@@ -251,6 +254,106 @@ cc_test(
],
)
cc_library(
name = "bounded_utf8_length_sequence",
hdrs = ["internal/bounded_utf8_length_sequence.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
"//absl/base:config",
"//absl/numeric:bits",
],
)
cc_test(
name = "bounded_utf8_length_sequence_test",
srcs = ["internal/bounded_utf8_length_sequence_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":bounded_utf8_length_sequence",
"//absl/base:config",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "decode_rust_punycode",
srcs = ["internal/decode_rust_punycode.cc"],
hdrs = ["internal/decode_rust_punycode.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":bounded_utf8_length_sequence",
":utf8_for_code_point",
"//absl/base:config",
"//absl/base:nullability",
],
)
cc_test(
name = "decode_rust_punycode_test",
srcs = ["internal/decode_rust_punycode_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":decode_rust_punycode",
"//absl/base:config",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "demangle_rust",
srcs = ["internal/demangle_rust.cc"],
hdrs = ["internal/demangle_rust.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":decode_rust_punycode",
"//absl/base:config",
"//absl/base:core_headers",
],
)
cc_test(
name = "demangle_rust_test",
srcs = ["internal/demangle_rust_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":demangle_rust",
"//absl/base:config",
"//absl/base:core_headers",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "utf8_for_code_point",
srcs = ["internal/utf8_for_code_point.cc"],
hdrs = ["internal/utf8_for_code_point.h"],
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = ["//absl/base:config"],
)
cc_test(
name = "utf8_for_code_point_test",
srcs = ["internal/utf8_for_code_point_test.cc"],
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":utf8_for_code_point",
"//absl/base:config",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "leak_check",
srcs = ["leak_check.cc"],

View File

@@ -30,7 +30,7 @@ absl_source_set("stacktrace") {
}
absl_test("stacktrace_test") {
sources = ["stacktrace_test.cc"]
sources = [ "stacktrace_test.cc" ]
deps = [
":stacktrace",
"//third_party/abseil-cpp/absl/base:core_headers",
@@ -126,9 +126,93 @@ absl_source_set("demangle_internal") {
"//third_party/abseil-cpp/absl/debugging:*",
]
deps = [
":demangle_rust",
"//third_party/abseil-cpp/absl/base",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:core_headers",
"//third_party/abseil-cpp/absl/base:nullability",
"//third_party/abseil-cpp/absl/numeric:bits",
]
}
# Disabled because this test relies on RTTI
# absl_test("demangle_test") {
# sources = [ "internal/demangle_test.cc" ]
# deps = [
# ":demangle_internal",
# ":stack_consumption",
# "//third_party/abseil-cpp/absl/base:config",
# "//third_party/abseil-cpp/absl/base:core_headers",
# "//third_party/abseil-cpp/absl/log",
# "//third_party/abseil-cpp/absl/memory",
# ]
# }
absl_source_set("bounded_utf8_length_sequence") {
public = [ "internal/bounded_utf8_length_sequence.h" ]
deps = [
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/numeric:bits",
]
}
absl_test("bounded_utf8_length_sequence_test") {
sources = [ "internal/bounded_utf8_length_sequence_test.cc" ]
deps = [
":bounded_utf8_length_sequence",
"//third_party/abseil-cpp/absl/base:config",
]
}
absl_source_set("decode_rust_punycode") {
sources = [ "internal/decode_rust_punycode.cc" ]
public = [ "internal/decode_rust_punycode.h" ]
deps = [
":bounded_utf8_length_sequence",
":utf8_for_code_point",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:nullability",
]
}
absl_test("decode_rust_punycode_test") {
sources = [ "internal/decode_rust_punycode_test.cc" ]
deps = [
":decode_rust_punycode",
"//third_party/abseil-cpp/absl/base:config",
]
}
absl_source_set("demangle_rust") {
sources = [ "internal/demangle_rust.cc" ]
public = [ "internal/demangle_rust.h" ]
deps = [
":decode_rust_punycode",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:core_headers",
]
}
absl_test("demangle_rust_test") {
sources = [ "internal/demangle_rust_test.cc" ]
deps = [
":demangle_rust",
"//third_party/abseil-cpp/absl/base:config",
"//third_party/abseil-cpp/absl/base:core_headers",
]
}
absl_source_set("utf8_for_code_point") {
sources = [ "internal/utf8_for_code_point.cc" ]
public = [ "internal/utf8_for_code_point.h" ]
deps = [ "//third_party/abseil-cpp/absl/base:config" ]
}
absl_test("utf8_for_code_point_test") {
sources = [ "internal/utf8_for_code_point_test.cc" ]
deps = [
":utf8_for_code_point",
"//third_party/abseil-cpp/absl/base:config",
]
}

View File

@@ -201,8 +201,8 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::base
absl::core_headers
absl::config
absl::demangle_rust
PUBLIC
)
@@ -223,6 +223,118 @@ absl_cc_test(
GTest::gmock_main
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
bounded_utf8_length_sequence
HDRS
"internal/bounded_utf8_length_sequence.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::bits
absl::config
)
absl_cc_test(
NAME
bounded_utf8_length_sequence_test
SRCS
"internal/bounded_utf8_length_sequence_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::bounded_utf8_length_sequence
absl::config
GTest::gmock_main
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
decode_rust_punycode
HDRS
"internal/decode_rust_punycode.h"
SRCS
"internal/decode_rust_punycode.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::bounded_utf8_length_sequence
absl::config
absl::nullability
absl::utf8_for_code_point
)
absl_cc_test(
NAME
decode_rust_punycode_test
SRCS
"internal/decode_rust_punycode_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::decode_rust_punycode
absl::config
GTest::gmock_main
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
demangle_rust
HDRS
"internal/demangle_rust.h"
SRCS
"internal/demangle_rust.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
absl::core_headers
absl::decode_rust_punycode
)
absl_cc_test(
NAME
demangle_rust_test
SRCS
"internal/demangle_rust_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::demangle_rust
absl::config
GTest::gmock_main
)
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
utf8_for_code_point
HDRS
"internal/utf8_for_code_point.h"
SRCS
"internal/utf8_for_code_point.cc"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
)
absl_cc_test(
NAME
utf8_for_code_point_test
SRCS
"internal/utf8_for_code_point_test.cc"
COPTS
${ABSL_TEST_COPTS}
DEPS
absl::utf8_for_code_point
absl::config
GTest::gmock_main
)
absl_cc_library(
NAME
leak_check

View File

@@ -0,0 +1,126 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
#define ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_
#include <cstdint>
#include "absl/base/config.h"
#include "absl/numeric/bits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
// A sequence of up to max_elements integers between 1 and 4 inclusive, whose
// insertion operation computes the sum of all the elements before the insertion
// point. This is useful in decoding Punycode, where one needs to know where in
// a UTF-8 byte stream the n-th code point begins.
//
// BoundedUtf8LengthSequence is async-signal-safe and suitable for use in
// symbolizing stack traces in a signal handler, provided max_elements is not
// improvidently large. For inputs of lengths accepted by the Rust demangler,
// up to a couple hundred code points, InsertAndReturnSumOfPredecessors should
// run in a few dozen clock cycles, on par with the other arithmetic required
// for Punycode decoding.
template <uint32_t max_elements>
class BoundedUtf8LengthSequence {
public:
// Constructs an empty sequence.
BoundedUtf8LengthSequence() = default;
// Inserts `utf_length` at position `index`, shifting any existing elements at
// or beyond `index` one position to the right. If the sequence is already
// full, the rightmost element is discarded.
//
// Returns the sum of the elements at positions 0 to `index - 1` inclusive.
// If `index` is greater than the number of elements already inserted, the
// excess positions in the range count 1 apiece.
//
// REQUIRES: index < max_elements and 1 <= utf8_length <= 4.
uint32_t InsertAndReturnSumOfPredecessors(
uint32_t index, uint32_t utf8_length) {
// The caller shouldn't pass out-of-bounds inputs, but if it does happen,
// clamp the values and try to continue. If we're being called from a
// signal handler, the last thing we want to do is crash. Emitting
// malformed UTF-8 is a lesser evil.
if (index >= max_elements) index = max_elements - 1;
if (utf8_length == 0 || utf8_length > 4) utf8_length = 1;
const uint32_t word_index = index/32;
const uint32_t bit_index = 2 * (index % 32);
const uint64_t ones_bit = uint64_t{1} << bit_index;
// Compute the sum of predecessors.
// - Each value from 1 to 4 is represented by a bit field with value from
// 0 to 3, so the desired sum is index plus the sum of the
// representations actually stored.
// - For each bit field, a set low bit should contribute 1 to the sum, and
// a set high bit should contribute 2.
// - Another way to say the same thing is that each set bit contributes 1,
// and each set high bit contributes an additional 1.
// - So the sum we want is index + popcount(everything) + popcount(bits in
// odd positions).
const uint64_t odd_bits_mask = 0xaaaaaaaaaaaaaaaa;
const uint64_t lower_seminibbles_mask = ones_bit - 1;
const uint64_t higher_seminibbles_mask = ~lower_seminibbles_mask;
const uint64_t same_word_bits_below_insertion =
rep_[word_index] & lower_seminibbles_mask;
int full_popcount = absl::popcount(same_word_bits_below_insertion);
int odd_popcount =
absl::popcount(same_word_bits_below_insertion & odd_bits_mask);
for (uint32_t j = word_index; j > 0; --j) {
const uint64_t word_below_insertion = rep_[j - 1];
full_popcount += absl::popcount(word_below_insertion);
odd_popcount += absl::popcount(word_below_insertion & odd_bits_mask);
}
const uint32_t sum_of_predecessors =
index + static_cast<uint32_t>(full_popcount + odd_popcount);
// Now insert utf8_length's representation, shifting successors up one
// place.
for (uint32_t j = max_elements/32 - 1; j > word_index; --j) {
rep_[j] = (rep_[j] << 2) | (rep_[j - 1] >> 62);
}
rep_[word_index] =
(rep_[word_index] & lower_seminibbles_mask) |
(uint64_t{utf8_length - 1} << bit_index) |
((rep_[word_index] & higher_seminibbles_mask) << 2);
return sum_of_predecessors;
}
private:
// If the (32 * i + j)-th element of the represented sequence has the value k
// (0 <= j < 32, 1 <= k <= 4), then bits 2 * j and 2 * j + 1 of rep_[i]
// contain the seminibble (k - 1).
//
// In particular, the zero-initialization of rep_ makes positions not holding
// any inserted element count as 1 in InsertAndReturnSumOfPredecessors.
//
// Example: rep_ = {0xb1, ... the rest zeroes ...} represents the sequence
// (2, 1, 4, 3, ... the rest 1's ...). Constructing the sequence of Unicode
// code points "Àa🂻中" = {U+00C0, U+0061, U+1F0BB, U+4E2D} (among many
// other examples) would yield this value of rep_.
static_assert(max_elements > 0 && max_elements % 32 == 0,
"max_elements must be a positive multiple of 32");
uint64_t rep_[max_elements/32] = {};
};
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_DEBUGGING_INTERNAL_BOUNDED_UTF8_LENGTH_SEQUENCE_H_

View File

@@ -0,0 +1,126 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/debugging/internal/bounded_utf8_length_sequence.h"
#include <cstdint>
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfOneCorrectly) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 1);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfTwoCorrectly) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 2), 0);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 2);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfThreeCorrectly) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 3), 0);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 3);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersAValueOfFourCorrectly) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 4), 0);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 4);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersSeveralAppendedValues) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 4), 1);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(2, 2), 5);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(3, 3), 7);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(4, 1), 10);
}
TEST(BoundedUtf8LengthSequenceTest, RemembersSeveralPrependedValues) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 4), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 3), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 2), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(4, 1), 10);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(3, 1), 6);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(2, 1), 3);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(1, 1), 1);
}
TEST(BoundedUtf8LengthSequenceTest, RepeatedInsertsShiftValuesOutTheRightEnd) {
BoundedUtf8LengthSequence<32> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 2), 0);
for (uint32_t i = 1; i < 31; ++i) {
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0)
<< "while moving the 2 into position " << i;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 1), 32)
<< "after moving the 2 into position " << i;
}
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0)
<< "while moving the 2 into position 31";
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 1), 31)
<< "after moving the 2 into position 31";
}
TEST(BoundedUtf8LengthSequenceTest, InsertsIntoWord1LeaveWord0Untouched) {
BoundedUtf8LengthSequence<64> seq;
for (uint32_t i = 0; i < 32; ++i) {
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(i, 2), 2 * i)
<< "at index " << i;
}
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 64);
EXPECT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 64);
}
TEST(BoundedUtf8LengthSequenceTest, InsertsIntoWord0ShiftValuesIntoWord1) {
BoundedUtf8LengthSequence<64> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(29, 2), 29);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(30, 3), 31);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 4), 34);
// Pushing two 1's on the front moves the 3 and 4 into the high word.
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(34, 1), 31 + 2 + 3 + 4);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 31 + 2);
}
TEST(BoundedUtf8LengthSequenceTest, ValuesAreShiftedCorrectlyAmongThreeWords) {
BoundedUtf8LengthSequence<96> seq;
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(31, 3), 31);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(63, 4), 62 + 3);
// This insertion moves both the 3 and the 4 up a word.
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(0, 1), 0);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(65, 1), 63 + 3 + 4);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(64, 1), 63 + 3);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(33, 1), 32 + 3);
ASSERT_EQ(seq.InsertAndReturnSumOfPredecessors(32, 1), 32);
}
} // namespace
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl

View File

@@ -0,0 +1,258 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/debugging/internal/decode_rust_punycode.h"
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "absl/base/config.h"
#include "absl/base/nullability.h"
#include "absl/debugging/internal/bounded_utf8_length_sequence.h"
#include "absl/debugging/internal/utf8_for_code_point.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
// Decoding Punycode requires repeated random-access insertion into a stream of
// variable-length UTF-8 code-point encodings. We need this to be tolerably
// fast (no N^2 slowdown for unfortunate inputs), and we can't allocate any data
// structures on the heap (async-signal-safety).
//
// It is pragmatic to impose a moderately low limit on the identifier length and
// bail out if we ever hit it. Then BoundedUtf8LengthSequence efficiently
// determines where to insert the next code point, and memmove efficiently makes
// room for it.
//
// The chosen limit is a round number several times larger than identifiers
// expected in practice, yet still small enough that a memmove of this many
// UTF-8 characters is not much more expensive than the division and modulus
// operations that Punycode decoding requires.
constexpr uint32_t kMaxChars = 256;
// Constants from RFC 3492 section 5.
constexpr uint32_t kBase = 36, kTMin = 1, kTMax = 26, kSkew = 38, kDamp = 700;
constexpr uint32_t kMaxCodePoint = 0x10ffff;
// Overflow threshold in DecodeRustPunycode's inner loop; see comments there.
constexpr uint32_t kMaxI = 1 << 30;
// If punycode_begin .. punycode_end begins with a prefix matching the regular
// expression [0-9a-zA-Z_]+_, removes that prefix, copies all but the final
// underscore into out_begin .. out_end, sets num_ascii_chars to the number of
// bytes copied, and returns true. (A prefix of this sort represents the
// nonempty subsequence of ASCII characters in the corresponding plaintext.)
//
// If punycode_begin .. punycode_end does not contain an underscore, sets
// num_ascii_chars to zero and returns true. (The encoding of a plaintext
// without any ASCII characters does not carry such a prefix.)
//
// Returns false and zeroes num_ascii_chars on failure (either parse error or
// not enough space in the output buffer).
bool ConsumeOptionalAsciiPrefix(const char*& punycode_begin,
const char* const punycode_end,
char* const out_begin,
char* const out_end,
uint32_t& num_ascii_chars) {
num_ascii_chars = 0;
// Remember the last underscore if any. Also use the same string scan to
// reject any ASCII bytes that do not belong in an identifier, including NUL,
// as well as non-ASCII bytes, which should have been delta-encoded instead.
int last_underscore = -1;
for (int i = 0; i < punycode_end - punycode_begin; ++i) {
const char c = punycode_begin[i];
if (c == '_') {
last_underscore = i;
continue;
}
// We write out the meaning of absl::ascii_isalnum rather than call that
// function because its documentation does not promise it will remain
// async-signal-safe under future development.
if ('a' <= c && c <= 'z') continue;
if ('A' <= c && c <= 'Z') continue;
if ('0' <= c && c <= '9') continue;
return false;
}
// If there was no underscore, that means there were no ASCII characters in
// the plaintext, so there is no prefix to consume. Our work is done.
if (last_underscore < 0) return true;
// Otherwise there will be an underscore delimiter somewhere. It can't be
// initial because then there would be no ASCII characters to its left, and no
// delimiter would have been added in that case.
if (last_underscore == 0) return false;
// Any other position is reasonable. Make sure there's room in the buffer.
if (last_underscore + 1 > out_end - out_begin) return false;
// Consume and write out the ASCII characters.
num_ascii_chars = static_cast<uint32_t>(last_underscore);
std::memcpy(out_begin, punycode_begin, num_ascii_chars);
out_begin[num_ascii_chars] = '\0';
punycode_begin += num_ascii_chars + 1;
return true;
}
// Returns the value of `c` as a base-36 digit according to RFC 3492 section 5,
// or -1 if `c` is not such a digit.
int DigitValue(char c) {
if ('0' <= c && c <= '9') return c - '0' + 26;
if ('a' <= c && c <= 'z') return c - 'a';
if ('A' <= c && c <= 'Z') return c - 'A';
return -1;
}
// Consumes the next delta encoding from punycode_begin .. punycode_end,
// updating i accordingly. Returns true on success. Returns false on parse
// failure or arithmetic overflow.
bool ScanNextDelta(const char*& punycode_begin, const char* const punycode_end,
uint32_t bias, uint32_t& i) {
uint64_t w = 1; // 64 bits to prevent overflow in w *= kBase - t
// "for k = base to infinity in steps of base do begin ... end" in RFC 3492
// section 6.2. Each loop iteration scans one digit of the delta.
for (uint32_t k = kBase; punycode_begin != punycode_end; k += kBase) {
const int digit_value = DigitValue(*punycode_begin++);
if (digit_value < 0) return false;
// Compute this in 64-bit arithmetic so we can check for overflow afterward.
const uint64_t new_i = i + static_cast<uint64_t>(digit_value) * w;
// Valid deltas are bounded by (#chars already emitted) * kMaxCodePoint, but
// invalid input could encode an arbitrarily large delta. Nip that in the
// bud here.
static_assert(
kMaxI >= kMaxChars * kMaxCodePoint,
"kMaxI is too small to prevent spurious failures on good input");
if (new_i > kMaxI) return false;
static_assert(
kMaxI < (uint64_t{1} << 32),
"Make kMaxI smaller or i 64 bits wide to prevent silent wraparound");
i = static_cast<uint32_t>(new_i);
// Compute the threshold that determines whether this is the last digit and
// (if not) what the next digit's place value will be. This logic from RFC
// 3492 section 6.2 is explained in section 3.3.
uint32_t t;
if (k <= bias + kTMin) {
t = kTMin;
} else if (k >= bias + kTMax) {
t = kTMax;
} else {
t = k - bias;
}
if (static_cast<uint32_t>(digit_value) < t) return true;
// If this gets too large, the range check on new_i in the next iteration
// will catch it. We know this multiplication will not overwrap because w
// is 64 bits wide.
w *= kBase - t;
}
return false;
}
} // namespace
absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options) {
const char* punycode_begin = options.punycode_begin;
const char* const punycode_end = options.punycode_end;
char* const out_begin = options.out_begin;
char* const out_end = options.out_end;
// Write a NUL terminator first. Later memcpy calls will keep bumping it
// along to its new right place.
const size_t out_size = static_cast<size_t>(out_end - out_begin);
if (out_size == 0) return nullptr;
*out_begin = '\0';
// RFC 3492 section 6.2 begins here. We retain the names of integer variables
// appearing in that text.
uint32_t n = 128, i = 0, bias = 72, num_chars = 0;
// If there are any ASCII characters, consume them and their trailing
// underscore delimiter.
if (!ConsumeOptionalAsciiPrefix(punycode_begin, punycode_end,
out_begin, out_end, num_chars)) {
return nullptr;
}
uint32_t total_utf8_bytes = num_chars;
BoundedUtf8LengthSequence<kMaxChars> utf8_lengths;
// "while the input is not exhausted do begin ... end"
while (punycode_begin != punycode_end) {
if (num_chars >= kMaxChars) return nullptr;
const uint32_t old_i = i;
if (!ScanNextDelta(punycode_begin, punycode_end, bias, i)) return nullptr;
// Update bias as in RFC 3492 section 6.1. (We have inlined adapt.)
uint32_t delta = i - old_i;
delta /= (old_i == 0 ? kDamp : 2);
delta += delta/(num_chars + 1);
bias = 0;
while (delta > ((kBase - kTMin) * kTMax)/2) {
delta /= kBase - kTMin;
bias += kBase;
}
bias += ((kBase - kTMin + 1) * delta)/(delta + kSkew);
// Back in section 6.2, compute the new code point and insertion index.
static_assert(
kMaxI + kMaxCodePoint < (uint64_t{1} << 32),
"Make kMaxI smaller or n 64 bits wide to prevent silent wraparound");
n += i/(num_chars + 1);
i %= num_chars + 1;
// To actually insert, we need to convert the code point n to UTF-8 and the
// character index i to an index into the byte stream emitted so far. First
// prepare the UTF-8 encoding for n, rejecting surrogates, overlarge values,
// and anything that won't fit into the remaining output storage.
Utf8ForCodePoint utf8_for_code_point(n);
if (!utf8_for_code_point.ok()) return nullptr;
if (total_utf8_bytes + utf8_for_code_point.length + 1 > out_size) {
return nullptr;
}
// Now insert the new character into both our length map and the output.
uint32_t n_index =
utf8_lengths.InsertAndReturnSumOfPredecessors(
i, utf8_for_code_point.length);
std::memmove(
out_begin + n_index + utf8_for_code_point.length, out_begin + n_index,
total_utf8_bytes + 1 - n_index);
std::memcpy(out_begin + n_index, utf8_for_code_point.bytes,
utf8_for_code_point.length);
total_utf8_bytes += utf8_for_code_point.length;
++num_chars;
// Finally, advance to the next state before continuing.
++i;
}
return out_begin + total_utf8_bytes;
}
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl

View File

@@ -0,0 +1,55 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef ABSL_DEBUGGING_INTERNAL_DECODE_RUST_PUNYCODE_H_
#define ABSL_DEBUGGING_INTERNAL_DECODE_RUST_PUNYCODE_H_
#include "absl/base/config.h"
#include "absl/base/nullability.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
struct DecodeRustPunycodeOptions {
const char* punycode_begin;
const char* punycode_end;
char* out_begin;
char* out_end;
};
// Given Rust Punycode in `punycode_begin .. punycode_end`, writes the
// corresponding UTF-8 plaintext into `out_begin .. out_end`, followed by a NUL
// character, and returns a pointer to that final NUL on success. On failure
// returns a null pointer, and the contents of `out_begin .. out_end` are
// unspecified.
//
// Failure occurs in precisely these cases:
// - Any input byte does not match [0-9a-zA-Z_].
// - The first input byte is an underscore, but no other underscore appears in
// the input.
// - The delta sequence does not represent a valid sequence of code-point
// insertions.
// - The plaintext would contain more than 256 code points.
//
// DecodeRustPunycode is async-signal-safe with bounded runtime and a small
// stack footprint, making it suitable for use in demangling Rust symbol names
// from a signal handler.
absl::Nullable<char*> DecodeRustPunycode(DecodeRustPunycodeOptions options);
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl
#endif // ABSL_DEBUGGING_INTERNAL_DECODE_RUST_PUNYCODE_H_

View File

@@ -0,0 +1,606 @@
// Copyright 2024 The Abseil Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "absl/debugging/internal/decode_rust_punycode.h"
#include <cstddef>
#include <cstring>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace debugging_internal {
namespace {
using ::testing::AllOf;
using ::testing::Eq;
using ::testing::IsNull;
using ::testing::Pointee;
using ::testing::ResultOf;
using ::testing::StrEq;
class DecodeRustPunycodeTest : public ::testing::Test {
protected:
void FillBufferWithNonzeroBytes() {
// The choice of nonzero value to fill with is arbitrary. The point is just
// to fail tests if DecodeRustPunycode forgets to write the final NUL
// character.
std::memset(buffer_storage_, 0xab, sizeof(buffer_storage_));
}
DecodeRustPunycodeOptions WithAmpleSpace() {
FillBufferWithNonzeroBytes();
DecodeRustPunycodeOptions options;
options.punycode_begin = punycode_.data();
options.punycode_end = punycode_.data() + punycode_.size();
options.out_begin = buffer_storage_;
options.out_end = buffer_storage_ + sizeof(buffer_storage_);
return options;
}
DecodeRustPunycodeOptions WithJustEnoughSpace() {
FillBufferWithNonzeroBytes();
const size_t begin_offset = sizeof(buffer_storage_) - plaintext_.size() - 1;
DecodeRustPunycodeOptions options;
options.punycode_begin = punycode_.data();
options.punycode_end = punycode_.data() + punycode_.size();
options.out_begin = buffer_storage_ + begin_offset;
options.out_end = buffer_storage_ + sizeof(buffer_storage_);
return options;
}
DecodeRustPunycodeOptions WithOneByteTooFew() {
FillBufferWithNonzeroBytes();
const size_t begin_offset = sizeof(buffer_storage_) - plaintext_.size();
DecodeRustPunycodeOptions options;
options.punycode_begin = punycode_.data();
options.punycode_end = punycode_.data() + punycode_.size();
options.out_begin = buffer_storage_ + begin_offset;
options.out_end = buffer_storage_ + sizeof(buffer_storage_);
return options;
}
// Matches a correct return value of DecodeRustPunycode when `golden` is the
// expected plaintext output.
auto PointsToTheNulAfter(const std::string& golden) {
const size_t golden_size = golden.size();
return AllOf(
Pointee(Eq('\0')),
ResultOf("preceding string body",
[golden_size](const char* p) { return p - golden_size; },
StrEq(golden)));
}
std::string punycode_;
std::string plaintext_;
char buffer_storage_[1024];
};
TEST_F(DecodeRustPunycodeTest, MapsEmptyToEmpty) {
punycode_ = "";
plaintext_ = "";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest,
StripsTheTrailingDelimiterFromAPureRunOfBasicChars) {
punycode_ = "foo_";
plaintext_ = "foo";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, TreatsTheLastUnderscoreAsTheDelimiter) {
punycode_ = "foo_bar_";
plaintext_ = "foo_bar";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsALeadingUnderscoreIfNotTheDelimiter) {
punycode_ = "_foo_";
plaintext_ = "_foo";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsALeadingUnderscoreDelimiter) {
punycode_ = "_foo";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsEmbeddedNul) {
punycode_ = std::string("foo\0bar_", 8);
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsAsciiCharsOtherThanIdentifierChars) {
punycode_ = "foo\007_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "foo-_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "foo;_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "foo\177_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsRawNonAsciiChars) {
punycode_ = "\x80";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "\x80_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "\xff";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "\xff_";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RecognizesU0080) {
// a encodes 0, so the output is the smallest non-ASCII code point standing
// alone. (U+0080 PAD is not an identifier character, but DecodeRustPunycode
// does not check whether non-ASCII characters could belong to an identifier.)
punycode_ = "a";
plaintext_ = "\xc2\x80";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, OneByteDeltaSequencesMustBeA) {
// Because bias = 72 for the first code point, any digit but a/A is nonfinal
// in one of the first two bytes of a delta sequence.
punycode_ = "b";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "z";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "0";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "9";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsDeltaSequenceBA) {
punycode_ = "ba";
plaintext_ = "\xc2\x81";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsOtherDeltaSequencesWithSecondByteA) {
punycode_ = "ca";
plaintext_ = "\xc2\x82";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "za";
plaintext_ = "\xc2\x99";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "0a";
plaintext_ = "\xc2\x9a";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "1a";
plaintext_ = "\xc2\x9b";
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "9a";
plaintext_ = "£"; // Pound sign, U+00A3
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
}
TEST_F(DecodeRustPunycodeTest, RejectsDeltaWhereTheSecondAndLastDigitIsNotA) {
punycode_ = "bb";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "zz";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "00";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
punycode_ = "99";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsDeltasWithSecondByteBFollowedByA) {
punycode_ = "bba";
plaintext_ = "¤"; // U+00A4
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "cba";
plaintext_ = "¥"; // U+00A5
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "zba";
plaintext_ = "¼"; // U+00BC
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "0ba";
plaintext_ = "½"; // U+00BD
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "1ba";
plaintext_ = "¾"; // U+00BE
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
punycode_ = "9ba";
plaintext_ = "Æ"; // U+00C6
EXPECT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
}
// Tests beyond this point use characters allowed in identifiers, so you can
// prepend _RNvC1cu<decimal length><underscore if [0-9_] follows> to a test
// input and run it through another Rust demangler to verify that the
// corresponding golden output is correct.
TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharAlone) {
punycode_ = "0ca";
plaintext_ = "à";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharBeforeBasicChars) {
punycode_ = "_la_mode_yya";
plaintext_ = "à_la_mode";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharAmidBasicChars) {
punycode_ = "verre__vin_m4a";
plaintext_ = "verre_à_vin";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsTwoByteCharAfterBasicChars) {
punycode_ = "belt_3na";
plaintext_ = "beltà";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsRepeatedTwoByteChar) {
punycode_ = "0caaaa";
plaintext_ = "àààà";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyTwoByteCharsInOrder) {
punycode_ = "3camsuz";
plaintext_ = "ãéïôù";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyTwoByteCharsOutOfOrder) {
punycode_ = "3caltsx";
plaintext_ = "ùéôãï";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsThreeByteCharAlone) {
punycode_ = "fiq";
plaintext_ = "";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsRepeatedThreeByteChar) {
punycode_ = "fiqaaaa";
plaintext_ = "中中中中中";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsThreeByteCharsInOrder) {
punycode_ = "fiq228c";
plaintext_ = "中文";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyThreeByteCharsOutOfOrder) {
punycode_ = "fiq128c";
plaintext_ = "文中";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharAlone) {
punycode_ = "uy7h";
plaintext_ = "🂻";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharBeforeBasicChars) {
punycode_ = "jack__uh63d";
plaintext_ = "jack_🂻";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharAmidBasicChars) {
punycode_ = "jack__of_hearts_ki37n";
plaintext_ = "jack_🂻_of_hearts";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsFourByteCharAfterBasicChars) {
punycode_ = "_of_hearts_kz45i";
plaintext_ = "🂻_of_hearts";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsRepeatedFourByteChar) {
punycode_ = "uy7haaaa";
plaintext_ = "🂻🂻🂻🂻🂻";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyFourByteCharsInOrder) {
punycode_ = "8x7hcjmf";
plaintext_ = "🂦🂧🂪🂭🂮";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsNearbyFourByteCharsOutOfOrder) {
punycode_ = "8x7hcild";
plaintext_ = "🂮🂦🂭🂪🂧";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, AcceptsAMixtureOfByteLengths) {
punycode_ = "3caltsx2079ivf8aiuy7cja3a6ak";
plaintext_ = "ùéôãï中文🂮🂦🂭🂪🂧";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
TEST_F(DecodeRustPunycodeTest, RejectsOverlargeDeltas) {
punycode_ = "123456789a";
EXPECT_THAT(DecodeRustPunycode(WithAmpleSpace()), IsNull());
}
// Finally, we test on a few prose and poetry snippets as a defense in depth.
// If our artificial short test inputs did not exercise a bug that is tickled by
// patterns typical of real human writing, maybe real human writing will catch
// that.
//
// These test inputs are extracted from texts old enough to be out of copyright
// that probe a variety of ranges of code-point space. All are longer than 32
// code points, so they exercise the carrying of seminibbles from one uint64_t
// to the next higher one in BoundedUtf8LengthSequence.
// The first three lines of the Old English epic _Beowulf_, mostly ASCII with a
// few archaic two-byte letters interspersed.
TEST_F(DecodeRustPunycodeTest, Beowulf) {
punycode_ = "hwt_we_gardena_in_geardagum_"
"eodcyninga_rym_gefrunon_"
"hu_a_elingas_ellen_fremedon_hxg9c70do9alau";
plaintext_ = "hwæt_we_gardena_in_geardagum_"
"þeodcyninga_þrym_gefrunon_"
"hu_ða_æþelingas_ellen_fremedon";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
// The whole of 過故人莊 by the 8th-century Chinese poet 孟浩然
// (Meng Haoran), exercising three-byte-character processing.
TEST_F(DecodeRustPunycodeTest, MengHaoran) {
punycode_ = "gmq4ss0cfvao1e2wg8mcw8b0wkl9a7tt90a8riuvbk7t8kbv9a66ogofvzlf6"
"3d01ybn1u28dyqi5q2cxyyxnk5d2gx1ks9ddvfm17bk6gbsd6wftrav60u4ta";
plaintext_ = "故人具雞黍" "邀我至田家"
"綠樹村邊合" "青山郭外斜"
"開軒面場圃" "把酒話桑麻"
"待到重陽日" "還來就菊花";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
// A poem of the 8th-century Japanese poet 山上憶良 (Yamanoue no Okura).
// Japanese mixes two-byte and three-byte characters: a good workout for codecs.
TEST_F(DecodeRustPunycodeTest, YamanoueNoOkura) {
punycode_ = "48jdaa3a6ccpepjrsmlb0q4bwcdtid8fg6c0cai9822utqeruk3om0u4f2wbp0"
"em23do0op23cc2ff70mb6tae8aq759gja";
plaintext_ = "瓜食めば"
"子ども思ほゆ"
"栗食めば"
"まして偲はゆ"
"何処より"
"来りしものそ"
"眼交に"
"もとな懸りて"
"安眠し寝さぬ";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
// The first two lines of the Phoenician-language inscription on the sarcophagus
// of Eshmunazar II of Sidon, 6th century BCE. Phoenician and many other
// archaic scripts are allocated in the Supplemental Multilingual Plane (U+10000
// through U+1FFFF) and thus exercise four-byte-character processing.
TEST_F(DecodeRustPunycodeTest, EshmunazarSarcophagus) {
punycode_ = "wj9caaabaabbaaohcacxvhdc7bgxbccbdcjeacddcedcdlddbdbddcdbdcknfcee"
"ifel8del2a7inq9fhcpxikms7a4a9ac9ataaa0g";
plaintext_ = "𐤁𐤉𐤓𐤇𐤁𐤋𐤁𐤔𐤍𐤕𐤏𐤎𐤓"
"𐤅𐤀𐤓𐤁𐤏𐤗𐤖𐤖𐤖𐤖𐤋𐤌𐤋𐤊𐤉𐤌𐤋𐤊"
"𐤀𐤔𐤌𐤍𐤏𐤆𐤓𐤌𐤋𐤊𐤑𐤃𐤍𐤌"
"𐤁𐤍𐤌𐤋𐤊𐤕𐤁𐤍𐤕𐤌𐤋𐤊𐤑𐤃𐤍𐤌"
"𐤃𐤁𐤓𐤌𐤋𐤊𐤀𐤔𐤌𐤍𐤏𐤆𐤓𐤌𐤋𐤊"
"𐤑𐤃𐤍𐤌𐤋𐤀𐤌𐤓𐤍𐤂𐤆𐤋𐤕";
ASSERT_THAT(DecodeRustPunycode(WithAmpleSpace()),
PointsToTheNulAfter(plaintext_));
ASSERT_THAT(DecodeRustPunycode(WithJustEnoughSpace()),
PointsToTheNulAfter(plaintext_));
EXPECT_THAT(DecodeRustPunycode(WithOneByteTooFew()), IsNull());
}
} // namespace
} // namespace debugging_internal
ABSL_NAMESPACE_END
} // namespace absl

Some files were not shown because too many files have changed in this diff Show More