Răsfoiți Sursa

refactor: add protobuf_src & abceil

peilin.wu 1 an în urmă
părinte
comite
114664e1a9
100 a modificat fișierele cu 20556 adăugiri și 0 ștergeri
  1. 65 0
      Sample/lib/node_add_on/protobuf_src/BUILD.bazel
  2. 135 0
      Sample/lib/node_add_on/protobuf_src/README.md
  3. 151 0
      Sample/lib/node_add_on/protobuf_src/absl/BUILD.bazel
  4. 44 0
      Sample/lib/node_add_on/protobuf_src/absl/CMakeLists.txt
  5. 243 0
      Sample/lib/node_add_on/protobuf_src/absl/abseil.podspec.gen.py
  6. 88 0
      Sample/lib/node_add_on/protobuf_src/absl/algorithm/BUILD.bazel
  7. 71 0
      Sample/lib/node_add_on/protobuf_src/absl/algorithm/CMakeLists.txt
  8. 64 0
      Sample/lib/node_add_on/protobuf_src/absl/algorithm/algorithm.h
  9. 50 0
      Sample/lib/node_add_on/protobuf_src/absl/algorithm/algorithm_test.cc
  10. 1761 0
      Sample/lib/node_add_on/protobuf_src/absl/algorithm/container.h
  11. 1147 0
      Sample/lib/node_add_on/protobuf_src/absl/algorithm/container_test.cc
  12. 883 0
      Sample/lib/node_add_on/protobuf_src/absl/base/BUILD.bazel
  13. 740 0
      Sample/lib/node_add_on/protobuf_src/absl/base/CMakeLists.txt
  14. 952 0
      Sample/lib/node_add_on/protobuf_src/absl/base/attributes.h
  15. 109 0
      Sample/lib/node_add_on/protobuf_src/absl/base/bit_cast_test.cc
  16. 225 0
      Sample/lib/node_add_on/protobuf_src/absl/base/call_once.h
  17. 107 0
      Sample/lib/node_add_on/protobuf_src/absl/base/call_once_test.cc
  18. 180 0
      Sample/lib/node_add_on/protobuf_src/absl/base/casts.h
  19. 964 0
      Sample/lib/node_add_on/protobuf_src/absl/base/config.h
  20. 60 0
      Sample/lib/node_add_on/protobuf_src/absl/base/config_test.cc
  21. 76 0
      Sample/lib/node_add_on/protobuf_src/absl/base/const_init.h
  22. 480 0
      Sample/lib/node_add_on/protobuf_src/absl/base/dynamic_annotations.h
  23. 962 0
      Sample/lib/node_add_on/protobuf_src/absl/base/exception_safety_testing_test.cc
  24. 64 0
      Sample/lib/node_add_on/protobuf_src/absl/base/inline_variable_test.cc
  25. 27 0
      Sample/lib/node_add_on/protobuf_src/absl/base/inline_variable_test_a.cc
  26. 27 0
      Sample/lib/node_add_on/protobuf_src/absl/base/inline_variable_test_b.cc
  27. 200 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/atomic_hook.h
  28. 97 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/atomic_hook_test.cc
  29. 32 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/atomic_hook_test_helper.cc
  30. 34 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/atomic_hook_test_helper.h
  31. 22 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/cmake_thread_test.cc
  32. 77 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/cycleclock.cc
  33. 144 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/cycleclock.h
  34. 55 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/cycleclock_config.h
  35. 170 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/direct_mmap.h
  36. 398 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/dynamic_annotations.h
  37. 283 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/endian.h
  38. 263 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/endian_test.cc
  39. 43 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/errno_saver.h
  40. 45 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/errno_saver_test.cc
  41. 79 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/exception_safety_testing.cc
  42. 1109 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/exception_safety_testing.h
  43. 42 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/exception_testing.h
  44. 50 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/fast_type_id.h
  45. 123 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/fast_type_id_test.cc
  46. 51 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/hide_ptr.h
  47. 39 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/identity.h
  48. 108 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/inline_variable.h
  49. 46 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/inline_variable_testing.h
  50. 241 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/invoke.h
  51. 631 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/low_level_alloc.cc
  52. 127 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/low_level_alloc.h
  53. 180 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/low_level_alloc_test.cc
  54. 134 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/low_level_scheduling.h
  55. 106 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/nullability_impl.h
  56. 52 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/per_thread_tls.h
  57. 33 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/pretty_function.h
  58. 280 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/raw_logging.cc
  59. 217 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/raw_logging.h
  60. 58 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/scheduling_mode.h
  61. 81 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/scoped_set_env.cc
  62. 45 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/scoped_set_env.h
  63. 99 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/scoped_set_env_test.cc
  64. 232 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock.cc
  65. 275 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock.h
  66. 35 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_akaros.inc
  67. 80 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_benchmark.cc
  68. 71 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_linux.inc
  69. 46 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_posix.inc
  70. 81 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_wait.cc
  71. 95 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_wait.h
  72. 40 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_win32.inc
  73. 88 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/strerror.cc
  74. 39 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/strerror.h
  75. 29 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/strerror_benchmark.cc
  76. 88 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/strerror_test.cc
  77. 489 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/sysinfo.cc
  78. 74 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/sysinfo.h
  79. 88 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/sysinfo_test.cc
  80. 163 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/thread_identity.cc
  81. 269 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/thread_identity.h
  82. 38 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/thread_identity_benchmark.cc
  83. 129 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/thread_identity_test.cc
  84. 203 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/throw_delegate.cc
  85. 75 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/throw_delegate.h
  86. 68 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/tsan_mutex_interface.h
  87. 89 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/unaligned_access.h
  88. 77 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/unique_small_name_test.cc
  89. 140 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/unscaledcycleclock.cc
  90. 96 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/unscaledcycleclock.h
  91. 62 0
      Sample/lib/node_add_on/protobuf_src/absl/base/internal/unscaledcycleclock_config.h
  92. 331 0
      Sample/lib/node_add_on/protobuf_src/absl/base/invoke_test.cc
  93. 56 0
      Sample/lib/node_add_on/protobuf_src/absl/base/log_severity.cc
  94. 185 0
      Sample/lib/node_add_on/protobuf_src/absl/base/log_severity.h
  95. 251 0
      Sample/lib/node_add_on/protobuf_src/absl/base/log_severity_test.cc
  96. 189 0
      Sample/lib/node_add_on/protobuf_src/absl/base/macros.h
  97. 218 0
      Sample/lib/node_add_on/protobuf_src/absl/base/no_destructor.h
  98. 165 0
      Sample/lib/node_add_on/protobuf_src/absl/base/no_destructor_benchmark.cc
  99. 209 0
      Sample/lib/node_add_on/protobuf_src/absl/base/no_destructor_test.cc
  100. 224 0
      Sample/lib/node_add_on/protobuf_src/absl/base/nullability.h

+ 65 - 0
Sample/lib/node_add_on/protobuf_src/BUILD.bazel

@@ -0,0 +1,65 @@
+################################################################################
+# Protocol Buffers: C++ Runtime
+################################################################################
+
+# Most rules are under google/protobuf. This package exists for convenience.
+load("@rules_pkg//:mappings.bzl", "pkg_filegroup", "pkg_files", "strip_prefix")
+load("@upb//cmake:build_defs.bzl", "staleness_test")
+load("//conformance:defs.bzl", "conformance_test")
+
+pkg_files(
+    name = "dist_files",
+    srcs = glob(["**"]),
+    strip_prefix = strip_prefix.from_root(""),
+    visibility = ["//src:__pkg__"],
+)
+
+pkg_filegroup(
+    name = "all_dist_files",
+    srcs = [
+        ":dist_files",
+        "//src/google/protobuf:dist_files",
+        "//src/google/protobuf/compiler:dist_files",
+        "//src/google/protobuf/compiler/cpp:dist_files",
+        "//src/google/protobuf/compiler/csharp:dist_files",
+        "//src/google/protobuf/compiler/java:dist_files",
+        "//src/google/protobuf/compiler/objectivec:dist_files",
+        "//src/google/protobuf/compiler/php:dist_files",
+        "//src/google/protobuf/compiler/python:dist_files",
+        "//src/google/protobuf/compiler/ruby:dist_files",
+        "//src/google/protobuf/io:dist_files",
+        "//src/google/protobuf/stubs:dist_files",
+        "//src/google/protobuf/testing:dist_files",
+        "//src/google/protobuf/util:dist_files",
+    ],
+    visibility = ["//pkg:__pkg__"],
+)
+
+conformance_test(
+    name = "conformance_test",
+    failure_list = "//conformance:failure_list_cpp.txt",
+    testee = "//conformance:conformance_cpp",
+    text_format_failure_list = "//conformance:text_format_failure_list_cpp.txt",
+)
+
+# Copy the generated file_lists.cmake into a place where the staleness test
+# below can use it.
+genrule(
+    name = "copy_cmake_lists",
+    srcs = ["//pkg:gen_src_file_lists"],
+    outs = ["cmake_copy/file_lists.cmake"],
+    cmd = "cp $< $@",
+    visibility = ["//visibility:private"],
+    tags = ["manual"],
+)
+
+staleness_test(
+    name = "cmake_lists_staleness_test",
+    outs = ["file_lists.cmake"],
+    generated_pattern = "cmake_copy/%s",
+    # Only run this test if it is explicitly specified on the command line (not
+    # via //src:all or ...). This file will be automatically updated in a
+    # GitHub action, so developers should not worry about failures from this
+    # test.
+    tags = ["manual"],
+)

+ 135 - 0
Sample/lib/node_add_on/protobuf_src/README.md

@@ -0,0 +1,135 @@
+Protocol Buffers - Google's data interchange format
+===================================================
+
+Copyright 2008 Google Inc.
+
+https://developers.google.com/protocol-buffers/
+
+CMake Installation
+-----------------------
+
+To compile or install protobuf from source using CMake, see
+[cmake/README.md](../cmake/README.md).
+
+C++ Protobuf - Unix
+-----------------------
+
+To build protobuf from source, the following tools are needed:
+
+  * bazel
+  * git
+  * g++
+
+On Ubuntu/Debian, for example, you can install them with:
+
+    sudo apt-get install g++ git bazel
+
+On other platforms, please use the corresponding package managing tool to
+install them before proceeding.  See https://bazel.build/install for further
+instructions on installing Bazel, or to build from source using CMake, see
+[cmake/README.md](../cmake/README.md).
+
+To get the source, download the release .tar.gz or .zip package in the
+release page:
+
+    https://github.com/protocolbuffers/protobuf/releases/latest
+
+For example: if you only need C++, download `protobuf-cpp-[VERSION].tar.gz`; if
+you need C++ and Java, download `protobuf-java-[VERSION].tar.gz` (every package
+contains C++ source already); if you need C++ and multiple other languages,
+download `protobuf-all-[VERSION].tar.gz`.
+
+You can also get the source by "git clone" our git repository. Make sure you
+have also cloned the submodules and generated the configure script (skip this
+if you are using a release .tar.gz or .zip package):
+
+    git clone https://github.com/protocolbuffers/protobuf.git
+    cd protobuf
+    git submodule update --init --recursive
+
+To build the C++ Protocol Buffer runtime and the Protocol Buffer compiler
+(protoc) execute the following:
+
+    bazel build :protoc :protobuf
+
+The compiler can then be installed, for example on Linux:
+
+    cp bazel-bin/protoc /usr/local/bin
+
+For more usage information on Bazel, please refer to http://bazel.build.
+
+**Compiling dependent packages**
+
+To compile a package that uses Protocol Buffers, you need to setup a Bazel
+WORKSPACE that's hooked up to the protobuf repository and loads its
+dependencies.  For an example, see [WORKSPACE](../examples/WORKSPACE).
+
+**Note for Mac users**
+
+For a Mac system, Unix tools are not available by default. You will first need
+to install Xcode from the Mac AppStore and then run the following command from
+a terminal:
+
+    sudo xcode-select --install
+
+To install Unix tools, you can install "port" following the instructions at
+https://www.macports.org . This will reside in /opt/local/bin/port for most
+Mac installations.
+
+    sudo /opt/local/bin/port install bazel
+
+Alternative for Homebrew users:
+
+    brew install bazel
+
+Then follow the Unix instructions above.
+
+
+C++ Protobuf - Windows
+--------------------------
+
+If you only need the protoc binary, you can download it from the release
+page:
+
+    https://github.com/protocolbuffers/protobuf/releases/latest
+
+In the downloads section, download the zip file protoc-$VERSION-win32.zip.
+It contains the protoc binary as well as public proto files of protobuf
+library.
+
+Protobuf and its dependencies can be installed directly by using `vcpkg`:
+
+    >vcpkg install protobuf protobuf:x64-windows
+
+If zlib support is desired, you'll also need to install the zlib feature:
+
+    >vcpkg install protobuf[zlib] protobuf[zlib]:x64-windows
+
+See https://github.com/Microsoft/vcpkg for more information.
+
+To build from source using Microsoft Visual C++, see [cmake/README.md](../cmake/README.md).
+
+To build from source using Cygwin or MinGW, follow the Unix installation
+instructions, above.
+
+Binary Compatibility Warning
+----------------------------
+
+Due to the nature of C++, it is unlikely that any two versions of the
+Protocol Buffers C++ runtime libraries will have compatible ABIs.
+That is, if you linked an executable against an older version of
+libprotobuf, it is unlikely to work with a newer version without
+re-compiling.  This problem, when it occurs, will normally be detected
+immediately on startup of your app.  Still, you may want to consider
+using static linkage.  You can configure this in your `cc_binary` Bazel rules
+by specifying:
+
+    linkstatic=True
+
+Usage
+-----
+
+The complete documentation for Protocol Buffers is available via the
+web at:
+
+https://protobuf.dev/

+ 151 - 0
Sample/lib/node_add_on/protobuf_src/absl/BUILD.bazel

@@ -0,0 +1,151 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load("@bazel_skylib//lib:selects.bzl", "selects")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+config_setting(
+    name = "clang_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "clang",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "gcc_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "gcc",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "mingw_unspecified_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "mingw",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "mingw-gcc_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "mingw-gcc",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "msvc_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "msvc-cl",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "clang-cl_compiler",
+    flag_values = {
+        "@bazel_tools//tools/cpp:compiler": "clang-cl",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "osx",
+    constraint_values = [
+        "@platforms//os:osx",
+    ],
+)
+
+config_setting(
+    name = "ios",
+    constraint_values = [
+        "@platforms//os:ios",
+    ],
+)
+
+config_setting(
+    name = "ppc",
+    values = {
+        "cpu": "ppc",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "cpu_wasm",
+    values = {
+        "cpu": "wasm",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "cpu_wasm32",
+    values = {
+        "cpu": "wasm32",
+    },
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "platforms_wasm32",
+    constraint_values = [
+        "@platforms//cpu:wasm32",
+    ],
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "platforms_wasm64",
+    constraint_values = [
+        "@platforms//cpu:wasm64",
+    ],
+    visibility = [":__subpackages__"],
+)
+
+selects.config_setting_group(
+    name = "wasm",
+    match_any = [
+        ":cpu_wasm",
+        ":cpu_wasm32",
+        ":platforms_wasm32",
+        ":platforms_wasm64",
+    ],
+    visibility = [":__subpackages__"],
+)
+
+config_setting(
+    name = "fuchsia",
+    values = {
+        "cpu": "fuchsia",
+    },
+    visibility = [":__subpackages__"],
+)
+
+selects.config_setting_group(
+    name = "mingw_compiler",
+    match_any = [
+        ":mingw_unspecified_compiler",
+        ":mingw-gcc_compiler",
+    ],
+    visibility = [":__subpackages__"],
+)

+ 44 - 0
Sample/lib/node_add_on/protobuf_src/absl/CMakeLists.txt

@@ -0,0 +1,44 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+add_subdirectory(base)
+add_subdirectory(algorithm)
+add_subdirectory(cleanup)
+add_subdirectory(container)
+add_subdirectory(crc)
+add_subdirectory(debugging)
+add_subdirectory(flags)
+add_subdirectory(functional)
+add_subdirectory(hash)
+add_subdirectory(log)
+add_subdirectory(memory)
+add_subdirectory(meta)
+add_subdirectory(numeric)
+add_subdirectory(profiling)
+add_subdirectory(random)
+add_subdirectory(status)
+add_subdirectory(strings)
+add_subdirectory(synchronization)
+add_subdirectory(time)
+add_subdirectory(types)
+add_subdirectory(utility)
+
+if (ABSL_BUILD_DLL)
+  absl_make_dll()
+  if ((BUILD_TESTING AND ABSL_BUILD_TESTING) OR ABSL_BUILD_TEST_HELPERS)
+    absl_make_dll(TEST ON)
+  endif()
+endif()

+ 243 - 0
Sample/lib/node_add_on/protobuf_src/absl/abseil.podspec.gen.py

@@ -0,0 +1,243 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""This script generates abseil.podspec from all BUILD.bazel files.
+
+This is expected to run on abseil git repository with Bazel 1.0 on Linux.
+It recursively analyzes BUILD.bazel files using query command of Bazel to
+dump its build rules in XML format. From these rules, it constructs podspec
+structure.
+"""
+
+import argparse
+import collections
+import os
+import re
+import subprocess
+import xml.etree.ElementTree
+
+# Template of root podspec.
+SPEC_TEMPLATE = """
+# This file has been automatically generated from a script.
+# Please make modifications to `abseil.podspec.gen.py` instead.
+Pod::Spec.new do |s|
+  s.name     = 'abseil'
+  s.version  = '${version}'
+  s.summary  = 'Abseil Common Libraries (C++) from Google'
+  s.homepage = 'https://abseil.io'
+  s.license  = 'Apache License, Version 2.0'
+  s.authors  = { 'Abseil Team' => 'abseil-io@googlegroups.com' }
+  s.source = {
+    :git => 'https://github.com/abseil/abseil-cpp.git',
+    :tag => '${tag}',
+  }
+  s.resource_bundles = {
+    s.module_name => 'PrivacyInfo.xcprivacy',
+  }
+  s.module_name = 'absl'
+  s.header_mappings_dir = 'absl'
+  s.header_dir = 'absl'
+  s.libraries = 'c++'
+  s.compiler_flags = '-Wno-everything'
+  s.pod_target_xcconfig = {
+    'USER_HEADER_SEARCH_PATHS' => '$(inherited) "$(PODS_TARGET_SRCROOT)"',
+    'USE_HEADERMAP' => 'NO',
+    'ALWAYS_SEARCH_USER_PATHS' => 'NO',
+  }
+  s.ios.deployment_target = '9.0'
+  s.osx.deployment_target = '10.11'
+  s.tvos.deployment_target = '9.0'
+  s.watchos.deployment_target = '2.0'
+  s.subspec 'xcprivacy' do |ss|
+    ss.resource_bundles = {
+      ss.module_name => 'PrivacyInfo.xcprivacy',
+    }
+  end
+"""
+
+# Rule object representing the rule of Bazel BUILD.
+Rule = collections.namedtuple(
+    "Rule", "type name package srcs hdrs textual_hdrs deps visibility testonly")
+
+
+def get_elem_value(elem, name):
+  """Returns the value of XML element with the given name."""
+  for child in elem:
+    if child.attrib.get("name") != name:
+      continue
+    if child.tag == "string":
+      return child.attrib.get("value")
+    if child.tag == "boolean":
+      return child.attrib.get("value") == "true"
+    if child.tag == "list":
+      return [nested_child.attrib.get("value") for nested_child in child]
+    raise "Cannot recognize tag: " + child.tag
+  return None
+
+
+def normalize_paths(paths):
+  """Returns the list of normalized path."""
+  # e.g. ["//absl/strings:dir/header.h"] -> ["absl/strings/dir/header.h"]
+  return [path.lstrip("/").replace(":", "/") for path in paths]
+
+
+def parse_rule(elem, package):
+  """Returns a rule from bazel XML rule."""
+  return Rule(
+      type=elem.attrib["class"],
+      name=get_elem_value(elem, "name"),
+      package=package,
+      srcs=normalize_paths(get_elem_value(elem, "srcs") or []),
+      hdrs=normalize_paths(get_elem_value(elem, "hdrs") or []),
+      textual_hdrs=normalize_paths(get_elem_value(elem, "textual_hdrs") or []),
+      deps=get_elem_value(elem, "deps") or [],
+      visibility=get_elem_value(elem, "visibility") or [],
+      testonly=get_elem_value(elem, "testonly") or False)
+
+
+def read_build(package):
+  """Runs bazel query on given package file and returns all cc rules."""
+  result = subprocess.check_output(
+      ["bazel", "query", package + ":all", "--output", "xml"])
+  root = xml.etree.ElementTree.fromstring(result)
+  return [
+      parse_rule(elem, package)
+      for elem in root
+      if elem.tag == "rule" and elem.attrib["class"].startswith("cc_")
+  ]
+
+
+def collect_rules(root_path):
+  """Collects and returns all rules from root path recursively."""
+  rules = []
+  for cur, _, _ in os.walk(root_path):
+    build_path = os.path.join(cur, "BUILD.bazel")
+    if os.path.exists(build_path):
+      rules.extend(read_build("//" + cur))
+  return rules
+
+
+def relevant_rule(rule):
+  """Returns true if a given rule is relevant when generating a podspec."""
+  return (
+      # cc_library only (ignore cc_test, cc_binary)
+      rule.type == "cc_library" and
+      # ignore empty rule
+      (rule.hdrs + rule.textual_hdrs + rule.srcs) and
+      # ignore test-only rule
+      not rule.testonly)
+
+
+def get_spec_var(depth):
+  """Returns the name of variable for spec with given depth."""
+  return "s" if depth == 0 else "s{}".format(depth)
+
+
+def get_spec_name(label):
+  """Converts the label of bazel rule to the name of podspec."""
+  assert label.startswith("//absl/"), "{} doesn't start with //absl/".format(
+      label)
+  # e.g. //absl/apple/banana -> abseil/apple/banana
+  return "abseil/" + label[7:]
+
+
+def write_podspec(f, rules, args):
+  """Writes a podspec from given rules and args."""
+  rule_dir = build_rule_directory(rules)["abseil"]
+  # Write root part with given arguments
+  spec = re.sub(r"\$\{(\w+)\}", lambda x: args[x.group(1)],
+                SPEC_TEMPLATE).lstrip()
+  f.write(spec)
+  # Write all target rules
+  write_podspec_map(f, rule_dir, 0)
+  f.write("end\n")
+
+
+def build_rule_directory(rules):
+  """Builds a tree-style rule directory from given rules."""
+  rule_dir = {}
+  for rule in rules:
+    cur = rule_dir
+    for frag in get_spec_name(rule.package).split("/"):
+      cur = cur.setdefault(frag, {})
+    cur[rule.name] = rule
+  return rule_dir
+
+
+def write_podspec_map(f, cur_map, depth):
+  """Writes podspec from rule map recursively."""
+  for key, value in sorted(cur_map.items()):
+    indent = "  " * (depth + 1)
+    f.write("{indent}{var0}.subspec '{key}' do |{var1}|\n".format(
+        indent=indent,
+        key=key,
+        var0=get_spec_var(depth),
+        var1=get_spec_var(depth + 1)))
+    if isinstance(value, dict):
+      write_podspec_map(f, value, depth + 1)
+    else:
+      write_podspec_rule(f, value, depth + 1)
+    f.write("{indent}end\n".format(indent=indent))
+
+
+def write_podspec_rule(f, rule, depth):
+  """Writes podspec from given rule."""
+  indent = "  " * (depth + 1)
+  spec_var = get_spec_var(depth)
+  # Puts all files in hdrs, textual_hdrs, and srcs into source_files.
+  # Since CocoaPods treats header_files a bit differently from bazel,
+  # this won't generate a header_files field so that all source_files
+  # are considered as header files.
+  srcs = sorted(set(rule.hdrs + rule.textual_hdrs + rule.srcs))
+  write_indented_list(
+      f, "{indent}{var}.source_files = ".format(indent=indent, var=spec_var),
+      srcs)
+  # Writes dependencies of this rule.
+  for dep in sorted(rule.deps):
+    name = get_spec_name(dep.replace(":", "/"))
+    f.write("{indent}{var}.dependency '{dep}'\n".format(
+        indent=indent, var=spec_var, dep=name))
+  # Writes dependency to xcprivacy
+  f.write(
+      "{indent}{var}.dependency '{dep}'\n".format(
+          indent=indent, var=spec_var, dep="abseil/xcprivacy"
+      )
+  )
+
+
+def write_indented_list(f, leading, values):
+  """Writes leading values in an indented style."""
+  f.write(leading)
+  f.write((",\n" + " " * len(leading)).join("'{}'".format(v) for v in values))
+  f.write("\n")
+
+
+def generate(args):
+  """Generates a podspec file from all BUILD files under absl directory."""
+  rules = filter(relevant_rule, collect_rules("absl"))
+  with open(args.output, "wt") as f:
+    write_podspec(f, rules, vars(args))
+
+
+def main():
+  parser = argparse.ArgumentParser(
+      description="Generates abseil.podspec from BUILD.bazel")
+  parser.add_argument(
+      "-v", "--version", help="The version of podspec", required=True)
+  parser.add_argument(
+      "-t",
+      "--tag",
+      default=None,
+      help="The name of git tag (default: version)")
+  parser.add_argument(
+      "-o",
+      "--output",
+      default="abseil.podspec",
+      help="The name of output file (default: abseil.podspec)")
+  args = parser.parse_args()
+  if args.tag is None:
+    args.tag = args.version
+  generate(args)
+
+
+if __name__ == "__main__":
+  main()

+ 88 - 0
Sample/lib/node_add_on/protobuf_src/absl/algorithm/BUILD.bazel

@@ -0,0 +1,88 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load(
+    "//absl:copts/configure_copts.bzl",
+    "ABSL_DEFAULT_COPTS",
+    "ABSL_DEFAULT_LINKOPTS",
+    "ABSL_TEST_COPTS",
+)
+
+package(
+    default_visibility = ["//visibility:public"],
+    features = [
+        "header_modules",
+        "layering_check",
+        "parse_headers",
+    ],
+)
+
+licenses(["notice"])
+
+cc_library(
+    name = "algorithm",
+    hdrs = ["algorithm.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        "//absl/base:config",
+    ],
+)
+
+cc_test(
+    name = "algorithm_test",
+    size = "small",
+    srcs = ["algorithm_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":algorithm",
+        "//absl/base:config",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "container",
+    hdrs = [
+        "container.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":algorithm",
+        "//absl/base:core_headers",
+        "//absl/base:nullability",
+        "//absl/meta:type_traits",
+    ],
+)
+
+cc_test(
+    name = "container_test",
+    srcs = ["container_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":container",
+        "//absl/base",
+        "//absl/base:core_headers",
+        "//absl/memory",
+        "//absl/types:span",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)

+ 71 - 0
Sample/lib/node_add_on/protobuf_src/absl/algorithm/CMakeLists.txt

@@ -0,0 +1,71 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+absl_cc_library(
+  NAME
+    algorithm
+  HDRS
+    "algorithm.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+  PUBLIC
+)
+
+absl_cc_test(
+  NAME
+    algorithm_test
+  SRCS
+    "algorithm_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::algorithm
+    absl::config
+    GTest::gmock_main
+)
+
+absl_cc_library(
+  NAME
+    algorithm_container
+  HDRS
+    "container.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::algorithm
+    absl::core_headers
+    absl::meta
+    absl::nullability
+  PUBLIC
+)
+
+absl_cc_test(
+  NAME
+    container_test
+  SRCS
+    "container_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::algorithm_container
+    absl::base
+    absl::core_headers
+    absl::memory
+    absl::span
+    GTest::gmock_main
+)

+ 64 - 0
Sample/lib/node_add_on/protobuf_src/absl/algorithm/algorithm.h

@@ -0,0 +1,64 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: algorithm.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains Google extensions to the standard <algorithm> C++
+// header.
+
+#ifndef ABSL_ALGORITHM_ALGORITHM_H_
+#define ABSL_ALGORITHM_ALGORITHM_H_
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// equal()
+// rotate()
+//
+// Historical note: Abseil once provided implementations of these algorithms
+// prior to their adoption in C++14. New code should prefer to use the std
+// variants.
+//
+// See the documentation for the STL <algorithm> header for more information:
+// https://en.cppreference.com/w/cpp/header/algorithm
+using std::equal;
+using std::rotate;
+
+// linear_search()
+//
+// Performs a linear search for `value` using the iterator `first` up to
+// but not including `last`, returning true if [`first`, `last`) contains an
+// element equal to `value`.
+//
+// A linear search is of O(n) complexity which is guaranteed to make at most
+// n = (`last` - `first`) comparisons. A linear search over short containers
+// may be faster than a binary search, even when the container is sorted.
+template <typename InputIterator, typename EqualityComparable>
+bool linear_search(InputIterator first, InputIterator last,
+                   const EqualityComparable& value) {
+  return std::find(first, last, value) != last;
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_ALGORITHM_ALGORITHM_H_

+ 50 - 0
Sample/lib/node_add_on/protobuf_src/absl/algorithm/algorithm_test.cc

@@ -0,0 +1,50 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/algorithm/algorithm.h"
+
+#include <algorithm>
+#include <list>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace {
+
+class LinearSearchTest : public testing::Test {
+ protected:
+  LinearSearchTest() : container_{1, 2, 3} {}
+
+  static bool Is3(int n) { return n == 3; }
+  static bool Is4(int n) { return n == 4; }
+
+  std::vector<int> container_;
+};
+
+TEST_F(LinearSearchTest, linear_search) {
+  EXPECT_TRUE(absl::linear_search(container_.begin(), container_.end(), 3));
+  EXPECT_FALSE(absl::linear_search(container_.begin(), container_.end(), 4));
+}
+
+TEST_F(LinearSearchTest, linear_searchConst) {
+  const std::vector<int> *const const_container = &container_;
+  EXPECT_TRUE(
+      absl::linear_search(const_container->begin(), const_container->end(), 3));
+  EXPECT_FALSE(
+      absl::linear_search(const_container->begin(), const_container->end(), 4));
+}
+
+}  // namespace

+ 1761 - 0
Sample/lib/node_add_on/protobuf_src/absl/algorithm/container.h

@@ -0,0 +1,1761 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: container.h
+// -----------------------------------------------------------------------------
+//
+// This header file provides Container-based versions of algorithmic functions
+// within the C++ standard library. The following standard library sets of
+// functions are covered within this file:
+//
+//   * Algorithmic <iterator> functions
+//   * Algorithmic <numeric> functions
+//   * <algorithm> functions
+//
+// The standard library functions operate on iterator ranges; the functions
+// within this API operate on containers, though many return iterator ranges.
+//
+// All functions within this API are named with a `c_` prefix. Calls such as
+// `absl::c_xx(container, ...) are equivalent to std:: functions such as
+// `std::xx(std::begin(cont), std::end(cont), ...)`. Functions that act on
+// iterators but not conceptually on iterator ranges (e.g. `std::iter_swap`)
+// have no equivalent here.
+//
+// For template parameter and variable naming, `C` indicates the container type
+// to which the function is applied, `Pred` indicates the predicate object type
+// to be used by the function and `T` indicates the applicable element type.
+
+#ifndef ABSL_ALGORITHM_CONTAINER_H_
+#define ABSL_ALGORITHM_CONTAINER_H_
+
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <numeric>
+#include <random>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "absl/algorithm/algorithm.h"
+#include "absl/base/macros.h"
+#include "absl/base/nullability.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_algorithm_internal {
+
+// NOTE: it is important to defer to ADL lookup for building with C++ modules,
+// especially for headers like <valarray> which are not visible from this file
+// but specialize std::begin and std::end.
+using std::begin;
+using std::end;
+
+// The type of the iterator given by begin(c) (possibly std::begin(c)).
+// ContainerIter<const vector<T>> gives vector<T>::const_iterator,
+// while ContainerIter<vector<T>> gives vector<T>::iterator.
+template <typename C>
+using ContainerIter = decltype(begin(std::declval<C&>()));
+
+// An MSVC bug involving template parameter substitution requires us to use
+// decltype() here instead of just std::pair.
+template <typename C1, typename C2>
+using ContainerIterPairType =
+    decltype(std::make_pair(ContainerIter<C1>(), ContainerIter<C2>()));
+
+template <typename C>
+using ContainerDifferenceType = decltype(std::distance(
+    std::declval<ContainerIter<C>>(), std::declval<ContainerIter<C>>()));
+
+template <typename C>
+using ContainerPointerType =
+    typename std::iterator_traits<ContainerIter<C>>::pointer;
+
+// container_algorithm_internal::c_begin and
+// container_algorithm_internal::c_end are abbreviations for proper ADL
+// lookup of std::begin and std::end, i.e.
+//   using std::begin;
+//   using std::end;
+//   std::foo(begin(c), end(c));
+// becomes
+//   std::foo(container_algorithm_internal::begin(c),
+//            container_algorithm_internal::end(c));
+// These are meant for internal use only.
+
+template <typename C>
+ContainerIter<C> c_begin(C& c) {
+  return begin(c);
+}
+
+template <typename C>
+ContainerIter<C> c_end(C& c) {
+  return end(c);
+}
+
+template <typename T>
+struct IsUnorderedContainer : std::false_type {};
+
+template <class Key, class T, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<
+    std::unordered_map<Key, T, Hash, KeyEqual, Allocator>> : std::true_type {};
+
+template <class Key, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<std::unordered_set<Key, Hash, KeyEqual, Allocator>>
+    : std::true_type {};
+
+}  // namespace container_algorithm_internal
+
+// PUBLIC API
+
+//------------------------------------------------------------------------------
+// Abseil algorithm.h functions
+//------------------------------------------------------------------------------
+
+// c_linear_search()
+//
+// Container-based version of absl::linear_search() for performing a linear
+// search within a container.
+template <typename C, typename EqualityComparable>
+bool c_linear_search(const C& c, EqualityComparable&& value) {
+  return linear_search(container_algorithm_internal::c_begin(c),
+                       container_algorithm_internal::c_end(c),
+                       std::forward<EqualityComparable>(value));
+}
+
+//------------------------------------------------------------------------------
+// <iterator> algorithms
+//------------------------------------------------------------------------------
+
+// c_distance()
+//
+// Container-based version of the <iterator> `std::distance()` function to
+// return the number of elements within a container.
+template <typename C>
+container_algorithm_internal::ContainerDifferenceType<const C> c_distance(
+    const C& c) {
+  return std::distance(container_algorithm_internal::c_begin(c),
+                       container_algorithm_internal::c_end(c));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Non-modifying sequence operations
+//------------------------------------------------------------------------------
+
+// c_all_of()
+//
+// Container-based version of the <algorithm> `std::all_of()` function to
+// test if all elements within a container satisfy a condition.
+template <typename C, typename Pred>
+bool c_all_of(const C& c, Pred&& pred) {
+  return std::all_of(container_algorithm_internal::c_begin(c),
+                     container_algorithm_internal::c_end(c),
+                     std::forward<Pred>(pred));
+}
+
+// c_any_of()
+//
+// Container-based version of the <algorithm> `std::any_of()` function to
+// test if any element in a container fulfills a condition.
+template <typename C, typename Pred>
+bool c_any_of(const C& c, Pred&& pred) {
+  return std::any_of(container_algorithm_internal::c_begin(c),
+                     container_algorithm_internal::c_end(c),
+                     std::forward<Pred>(pred));
+}
+
+// c_none_of()
+//
+// Container-based version of the <algorithm> `std::none_of()` function to
+// test if no elements in a container fulfill a condition.
+template <typename C, typename Pred>
+bool c_none_of(const C& c, Pred&& pred) {
+  return std::none_of(container_algorithm_internal::c_begin(c),
+                      container_algorithm_internal::c_end(c),
+                      std::forward<Pred>(pred));
+}
+
+// c_for_each()
+//
+// Container-based version of the <algorithm> `std::for_each()` function to
+// apply a function to a container's elements.
+template <typename C, typename Function>
+decay_t<Function> c_for_each(C&& c, Function&& f) {
+  return std::for_each(container_algorithm_internal::c_begin(c),
+                       container_algorithm_internal::c_end(c),
+                       std::forward<Function>(f));
+}
+
+// c_find()
+//
+// Container-based version of the <algorithm> `std::find()` function to find
+// the first element containing the passed value within a container value.
+template <typename C, typename T>
+container_algorithm_internal::ContainerIter<C> c_find(C& c, T&& value) {
+  return std::find(container_algorithm_internal::c_begin(c),
+                   container_algorithm_internal::c_end(c),
+                   std::forward<T>(value));
+}
+
+// c_find_if()
+//
+// Container-based version of the <algorithm> `std::find_if()` function to find
+// the first element in a container matching the given condition.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_find_if(C& c, Pred&& pred) {
+  return std::find_if(container_algorithm_internal::c_begin(c),
+                      container_algorithm_internal::c_end(c),
+                      std::forward<Pred>(pred));
+}
+
+// c_find_if_not()
+//
+// Container-based version of the <algorithm> `std::find_if_not()` function to
+// find the first element in a container not matching the given condition.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_find_if_not(C& c,
+                                                             Pred&& pred) {
+  return std::find_if_not(container_algorithm_internal::c_begin(c),
+                          container_algorithm_internal::c_end(c),
+                          std::forward<Pred>(pred));
+}
+
+// c_find_end()
+//
+// Container-based version of the <algorithm> `std::find_end()` function to
+// find the last subsequence within a container.
+template <typename Sequence1, typename Sequence2>
+container_algorithm_internal::ContainerIter<Sequence1> c_find_end(
+    Sequence1& sequence, Sequence2& subsequence) {
+  return std::find_end(container_algorithm_internal::c_begin(sequence),
+                       container_algorithm_internal::c_end(sequence),
+                       container_algorithm_internal::c_begin(subsequence),
+                       container_algorithm_internal::c_end(subsequence));
+}
+
+// Overload of c_find_end() for using a predicate evaluation other than `==` as
+// the function's test condition.
+template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<Sequence1> c_find_end(
+    Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) {
+  return std::find_end(container_algorithm_internal::c_begin(sequence),
+                       container_algorithm_internal::c_end(sequence),
+                       container_algorithm_internal::c_begin(subsequence),
+                       container_algorithm_internal::c_end(subsequence),
+                       std::forward<BinaryPredicate>(pred));
+}
+
+// c_find_first_of()
+//
+// Container-based version of the <algorithm> `std::find_first_of()` function to
+// find the first element within the container that is also within the options
+// container.
+template <typename C1, typename C2>
+container_algorithm_internal::ContainerIter<C1> c_find_first_of(C1& container,
+                                                                C2& options) {
+  return std::find_first_of(container_algorithm_internal::c_begin(container),
+                            container_algorithm_internal::c_end(container),
+                            container_algorithm_internal::c_begin(options),
+                            container_algorithm_internal::c_end(options));
+}
+
+// Overload of c_find_first_of() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename C1, typename C2, typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<C1> c_find_first_of(
+    C1& container, C2& options, BinaryPredicate&& pred) {
+  return std::find_first_of(container_algorithm_internal::c_begin(container),
+                            container_algorithm_internal::c_end(container),
+                            container_algorithm_internal::c_begin(options),
+                            container_algorithm_internal::c_end(options),
+                            std::forward<BinaryPredicate>(pred));
+}
+
+// c_adjacent_find()
+//
+// Container-based version of the <algorithm> `std::adjacent_find()` function to
+// find equal adjacent elements within a container.
+template <typename Sequence>
+container_algorithm_internal::ContainerIter<Sequence> c_adjacent_find(
+    Sequence& sequence) {
+  return std::adjacent_find(container_algorithm_internal::c_begin(sequence),
+                            container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_adjacent_find() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename Sequence, typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<Sequence> c_adjacent_find(
+    Sequence& sequence, BinaryPredicate&& pred) {
+  return std::adjacent_find(container_algorithm_internal::c_begin(sequence),
+                            container_algorithm_internal::c_end(sequence),
+                            std::forward<BinaryPredicate>(pred));
+}
+
+// c_count()
+//
+// Container-based version of the <algorithm> `std::count()` function to count
+// values that match within a container.
+template <typename C, typename T>
+container_algorithm_internal::ContainerDifferenceType<const C> c_count(
+    const C& c, T&& value) {
+  return std::count(container_algorithm_internal::c_begin(c),
+                    container_algorithm_internal::c_end(c),
+                    std::forward<T>(value));
+}
+
+// c_count_if()
+//
+// Container-based version of the <algorithm> `std::count_if()` function to
+// count values matching a condition within a container.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerDifferenceType<const C> c_count_if(
+    const C& c, Pred&& pred) {
+  return std::count_if(container_algorithm_internal::c_begin(c),
+                       container_algorithm_internal::c_end(c),
+                       std::forward<Pred>(pred));
+}
+
+// c_mismatch()
+//
+// Container-based version of the <algorithm> `std::mismatch()` function to
+// return the first element where two ordered containers differ. Applies `==` to
+// the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)).
+template <typename C1, typename C2>
+container_algorithm_internal::ContainerIterPairType<C1, C2> c_mismatch(C1& c1,
+                                                                       C2& c2) {
+  return std::mismatch(container_algorithm_internal::c_begin(c1),
+                       container_algorithm_internal::c_end(c1),
+                       container_algorithm_internal::c_begin(c2),
+                       container_algorithm_internal::c_end(c2));
+}
+
+// Overload of c_mismatch() for using a predicate evaluation other than `==` as
+// the function's test condition. Applies `pred`to the first N elements of `c1`
+// and `c2`, where N = min(size(c1), size(c2)).
+template <typename C1, typename C2, typename BinaryPredicate>
+container_algorithm_internal::ContainerIterPairType<C1, C2> c_mismatch(
+    C1& c1, C2& c2, BinaryPredicate pred) {
+  return std::mismatch(container_algorithm_internal::c_begin(c1),
+                       container_algorithm_internal::c_end(c1),
+                       container_algorithm_internal::c_begin(c2),
+                       container_algorithm_internal::c_end(c2), pred);
+}
+
+// c_equal()
+//
+// Container-based version of the <algorithm> `std::equal()` function to
+// test whether two containers are equal.
+template <typename C1, typename C2>
+bool c_equal(const C1& c1, const C2& c2) {
+  return std::equal(container_algorithm_internal::c_begin(c1),
+                    container_algorithm_internal::c_end(c1),
+                    container_algorithm_internal::c_begin(c2),
+                    container_algorithm_internal::c_end(c2));
+}
+
+// Overload of c_equal() for using a predicate evaluation other than `==` as
+// the function's test condition.
+template <typename C1, typename C2, typename BinaryPredicate>
+bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) {
+  return std::equal(container_algorithm_internal::c_begin(c1),
+                    container_algorithm_internal::c_end(c1),
+                    container_algorithm_internal::c_begin(c2),
+                    container_algorithm_internal::c_end(c2),
+                    std::forward<BinaryPredicate>(pred));
+}
+
+// c_is_permutation()
+//
+// Container-based version of the <algorithm> `std::is_permutation()` function
+// to test whether a container is a permutation of another.
+template <typename C1, typename C2>
+bool c_is_permutation(const C1& c1, const C2& c2) {
+  return std::is_permutation(container_algorithm_internal::c_begin(c1),
+                             container_algorithm_internal::c_end(c1),
+                             container_algorithm_internal::c_begin(c2),
+                             container_algorithm_internal::c_end(c2));
+}
+
+// Overload of c_is_permutation() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename C1, typename C2, typename BinaryPredicate>
+bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) {
+  return std::is_permutation(container_algorithm_internal::c_begin(c1),
+                             container_algorithm_internal::c_end(c1),
+                             container_algorithm_internal::c_begin(c2),
+                             container_algorithm_internal::c_end(c2),
+                             std::forward<BinaryPredicate>(pred));
+}
+
+// c_search()
+//
+// Container-based version of the <algorithm> `std::search()` function to search
+// a container for a subsequence.
+template <typename Sequence1, typename Sequence2>
+container_algorithm_internal::ContainerIter<Sequence1> c_search(
+    Sequence1& sequence, Sequence2& subsequence) {
+  return std::search(container_algorithm_internal::c_begin(sequence),
+                     container_algorithm_internal::c_end(sequence),
+                     container_algorithm_internal::c_begin(subsequence),
+                     container_algorithm_internal::c_end(subsequence));
+}
+
+// Overload of c_search() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename Sequence1, typename Sequence2, typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<Sequence1> c_search(
+    Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) {
+  return std::search(container_algorithm_internal::c_begin(sequence),
+                     container_algorithm_internal::c_end(sequence),
+                     container_algorithm_internal::c_begin(subsequence),
+                     container_algorithm_internal::c_end(subsequence),
+                     std::forward<BinaryPredicate>(pred));
+}
+
+// c_search_n()
+//
+// Container-based version of the <algorithm> `std::search_n()` function to
+// search a container for the first sequence of N elements.
+template <typename Sequence, typename Size, typename T>
+container_algorithm_internal::ContainerIter<Sequence> c_search_n(
+    Sequence& sequence, Size count, T&& value) {
+  return std::search_n(container_algorithm_internal::c_begin(sequence),
+                       container_algorithm_internal::c_end(sequence), count,
+                       std::forward<T>(value));
+}
+
+// Overload of c_search_n() for using a predicate evaluation other than
+// `==` as the function's test condition.
+template <typename Sequence, typename Size, typename T,
+          typename BinaryPredicate>
+container_algorithm_internal::ContainerIter<Sequence> c_search_n(
+    Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred) {
+  return std::search_n(container_algorithm_internal::c_begin(sequence),
+                       container_algorithm_internal::c_end(sequence), count,
+                       std::forward<T>(value),
+                       std::forward<BinaryPredicate>(pred));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Modifying sequence operations
+//------------------------------------------------------------------------------
+
+// c_copy()
+//
+// Container-based version of the <algorithm> `std::copy()` function to copy a
+// container's elements into an iterator.
+template <typename InputSequence, typename OutputIterator>
+OutputIterator c_copy(const InputSequence& input, OutputIterator output) {
+  return std::copy(container_algorithm_internal::c_begin(input),
+                   container_algorithm_internal::c_end(input), output);
+}
+
+// c_copy_n()
+//
+// Container-based version of the <algorithm> `std::copy_n()` function to copy a
+// container's first N elements into an iterator.
+template <typename C, typename Size, typename OutputIterator>
+OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) {
+  return std::copy_n(container_algorithm_internal::c_begin(input), n, output);
+}
+
+// c_copy_if()
+//
+// Container-based version of the <algorithm> `std::copy_if()` function to copy
+// a container's elements satisfying some condition into an iterator.
+template <typename InputSequence, typename OutputIterator, typename Pred>
+OutputIterator c_copy_if(const InputSequence& input, OutputIterator output,
+                         Pred&& pred) {
+  return std::copy_if(container_algorithm_internal::c_begin(input),
+                      container_algorithm_internal::c_end(input), output,
+                      std::forward<Pred>(pred));
+}
+
+// c_copy_backward()
+//
+// Container-based version of the <algorithm> `std::copy_backward()` function to
+// copy a container's elements in reverse order into an iterator.
+template <typename C, typename BidirectionalIterator>
+BidirectionalIterator c_copy_backward(const C& src,
+                                      BidirectionalIterator dest) {
+  return std::copy_backward(container_algorithm_internal::c_begin(src),
+                            container_algorithm_internal::c_end(src), dest);
+}
+
+// c_move()
+//
+// Container-based version of the <algorithm> `std::move()` function to move
+// a container's elements into an iterator.
+template <typename C, typename OutputIterator>
+OutputIterator c_move(C&& src, OutputIterator dest) {
+  return std::move(container_algorithm_internal::c_begin(src),
+                   container_algorithm_internal::c_end(src), dest);
+}
+
+// c_move_backward()
+//
+// Container-based version of the <algorithm> `std::move_backward()` function to
+// move a container's elements into an iterator in reverse order.
+template <typename C, typename BidirectionalIterator>
+BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) {
+  return std::move_backward(container_algorithm_internal::c_begin(src),
+                            container_algorithm_internal::c_end(src), dest);
+}
+
+// c_swap_ranges()
+//
+// Container-based version of the <algorithm> `std::swap_ranges()` function to
+// swap a container's elements with another container's elements. Swaps the
+// first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)).
+template <typename C1, typename C2>
+container_algorithm_internal::ContainerIter<C2> c_swap_ranges(C1& c1, C2& c2) {
+  auto first1 = container_algorithm_internal::c_begin(c1);
+  auto last1 = container_algorithm_internal::c_end(c1);
+  auto first2 = container_algorithm_internal::c_begin(c2);
+  auto last2 = container_algorithm_internal::c_end(c2);
+
+  using std::swap;
+  for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) {
+    swap(*first1, *first2);
+  }
+  return first2;
+}
+
+// c_transform()
+//
+// Container-based version of the <algorithm> `std::transform()` function to
+// transform a container's elements using the unary operation, storing the
+// result in an iterator pointing to the last transformed element in the output
+// range.
+template <typename InputSequence, typename OutputIterator, typename UnaryOp>
+OutputIterator c_transform(const InputSequence& input, OutputIterator output,
+                           UnaryOp&& unary_op) {
+  return std::transform(container_algorithm_internal::c_begin(input),
+                        container_algorithm_internal::c_end(input), output,
+                        std::forward<UnaryOp>(unary_op));
+}
+
+// Overload of c_transform() for performing a transformation using a binary
+// predicate. Applies `binary_op` to the first N elements of `c1` and `c2`,
+// where N = min(size(c1), size(c2)).
+template <typename InputSequence1, typename InputSequence2,
+          typename OutputIterator, typename BinaryOp>
+OutputIterator c_transform(const InputSequence1& input1,
+                           const InputSequence2& input2, OutputIterator output,
+                           BinaryOp&& binary_op) {
+  auto first1 = container_algorithm_internal::c_begin(input1);
+  auto last1 = container_algorithm_internal::c_end(input1);
+  auto first2 = container_algorithm_internal::c_begin(input2);
+  auto last2 = container_algorithm_internal::c_end(input2);
+  for (; first1 != last1 && first2 != last2;
+       ++first1, (void)++first2, ++output) {
+    *output = binary_op(*first1, *first2);
+  }
+
+  return output;
+}
+
+// c_replace()
+//
+// Container-based version of the <algorithm> `std::replace()` function to
+// replace a container's elements of some value with a new value. The container
+// is modified in place.
+template <typename Sequence, typename T>
+void c_replace(Sequence& sequence, const T& old_value, const T& new_value) {
+  std::replace(container_algorithm_internal::c_begin(sequence),
+               container_algorithm_internal::c_end(sequence), old_value,
+               new_value);
+}
+
+// c_replace_if()
+//
+// Container-based version of the <algorithm> `std::replace_if()` function to
+// replace a container's elements of some value with a new value based on some
+// condition. The container is modified in place.
+template <typename C, typename Pred, typename T>
+void c_replace_if(C& c, Pred&& pred, T&& new_value) {
+  std::replace_if(container_algorithm_internal::c_begin(c),
+                  container_algorithm_internal::c_end(c),
+                  std::forward<Pred>(pred), std::forward<T>(new_value));
+}
+
+// c_replace_copy()
+//
+// Container-based version of the <algorithm> `std::replace_copy()` function to
+// replace a container's elements of some value with a new value  and return the
+// results within an iterator.
+template <typename C, typename OutputIterator, typename T>
+OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value,
+                              T&& new_value) {
+  return std::replace_copy(container_algorithm_internal::c_begin(c),
+                           container_algorithm_internal::c_end(c), result,
+                           std::forward<T>(old_value),
+                           std::forward<T>(new_value));
+}
+
+// c_replace_copy_if()
+//
+// Container-based version of the <algorithm> `std::replace_copy_if()` function
+// to replace a container's elements of some value with a new value based on
+// some condition, and return the results within an iterator.
+template <typename C, typename OutputIterator, typename Pred, typename T>
+OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred,
+                                 const T& new_value) {
+  return std::replace_copy_if(container_algorithm_internal::c_begin(c),
+                              container_algorithm_internal::c_end(c), result,
+                              std::forward<Pred>(pred), new_value);
+}
+
+// c_fill()
+//
+// Container-based version of the <algorithm> `std::fill()` function to fill a
+// container with some value.
+template <typename C, typename T>
+void c_fill(C& c, const T& value) {
+  std::fill(container_algorithm_internal::c_begin(c),
+            container_algorithm_internal::c_end(c), value);
+}
+
+// c_fill_n()
+//
+// Container-based version of the <algorithm> `std::fill_n()` function to fill
+// the first N elements in a container with some value.
+template <typename C, typename Size, typename T>
+void c_fill_n(C& c, Size n, const T& value) {
+  std::fill_n(container_algorithm_internal::c_begin(c), n, value);
+}
+
+// c_generate()
+//
+// Container-based version of the <algorithm> `std::generate()` function to
+// assign a container's elements to the values provided by the given generator.
+template <typename C, typename Generator>
+void c_generate(C& c, Generator&& gen) {
+  std::generate(container_algorithm_internal::c_begin(c),
+                container_algorithm_internal::c_end(c),
+                std::forward<Generator>(gen));
+}
+
+// c_generate_n()
+//
+// Container-based version of the <algorithm> `std::generate_n()` function to
+// assign a container's first N elements to the values provided by the given
+// generator.
+template <typename C, typename Size, typename Generator>
+container_algorithm_internal::ContainerIter<C> c_generate_n(C& c, Size n,
+                                                            Generator&& gen) {
+  return std::generate_n(container_algorithm_internal::c_begin(c), n,
+                         std::forward<Generator>(gen));
+}
+
+// Note: `c_xx()` <algorithm> container versions for `remove()`, `remove_if()`,
+// and `unique()` are omitted, because it's not clear whether or not such
+// functions should call erase on their supplied sequences afterwards. Either
+// behavior would be surprising for a different set of users.
+
+// c_remove_copy()
+//
+// Container-based version of the <algorithm> `std::remove_copy()` function to
+// copy a container's elements while removing any elements matching the given
+// `value`.
+template <typename C, typename OutputIterator, typename T>
+OutputIterator c_remove_copy(const C& c, OutputIterator result,
+                             const T& value) {
+  return std::remove_copy(container_algorithm_internal::c_begin(c),
+                          container_algorithm_internal::c_end(c), result,
+                          value);
+}
+
+// c_remove_copy_if()
+//
+// Container-based version of the <algorithm> `std::remove_copy_if()` function
+// to copy a container's elements while removing any elements matching the given
+// condition.
+template <typename C, typename OutputIterator, typename Pred>
+OutputIterator c_remove_copy_if(const C& c, OutputIterator result,
+                                Pred&& pred) {
+  return std::remove_copy_if(container_algorithm_internal::c_begin(c),
+                             container_algorithm_internal::c_end(c), result,
+                             std::forward<Pred>(pred));
+}
+
+// c_unique_copy()
+//
+// Container-based version of the <algorithm> `std::unique_copy()` function to
+// copy a container's elements while removing any elements containing duplicate
+// values.
+template <typename C, typename OutputIterator>
+OutputIterator c_unique_copy(const C& c, OutputIterator result) {
+  return std::unique_copy(container_algorithm_internal::c_begin(c),
+                          container_algorithm_internal::c_end(c), result);
+}
+
+// Overload of c_unique_copy() for using a predicate evaluation other than
+// `==` for comparing uniqueness of the element values.
+template <typename C, typename OutputIterator, typename BinaryPredicate>
+OutputIterator c_unique_copy(const C& c, OutputIterator result,
+                             BinaryPredicate&& pred) {
+  return std::unique_copy(container_algorithm_internal::c_begin(c),
+                          container_algorithm_internal::c_end(c), result,
+                          std::forward<BinaryPredicate>(pred));
+}
+
+// c_reverse()
+//
+// Container-based version of the <algorithm> `std::reverse()` function to
+// reverse a container's elements.
+template <typename Sequence>
+void c_reverse(Sequence& sequence) {
+  std::reverse(container_algorithm_internal::c_begin(sequence),
+               container_algorithm_internal::c_end(sequence));
+}
+
+// c_reverse_copy()
+//
+// Container-based version of the <algorithm> `std::reverse()` function to
+// reverse a container's elements and write them to an iterator range.
+template <typename C, typename OutputIterator>
+OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) {
+  return std::reverse_copy(container_algorithm_internal::c_begin(sequence),
+                           container_algorithm_internal::c_end(sequence),
+                           result);
+}
+
+// c_rotate()
+//
+// Container-based version of the <algorithm> `std::rotate()` function to
+// shift a container's elements leftward such that the `middle` element becomes
+// the first element in the container.
+template <typename C,
+          typename Iterator = container_algorithm_internal::ContainerIter<C>>
+Iterator c_rotate(C& sequence, Iterator middle) {
+  return absl::rotate(container_algorithm_internal::c_begin(sequence), middle,
+                      container_algorithm_internal::c_end(sequence));
+}
+
+// c_rotate_copy()
+//
+// Container-based version of the <algorithm> `std::rotate_copy()` function to
+// shift a container's elements leftward such that the `middle` element becomes
+// the first element in a new iterator range.
+template <typename C, typename OutputIterator>
+OutputIterator c_rotate_copy(
+    const C& sequence,
+    container_algorithm_internal::ContainerIter<const C> middle,
+    OutputIterator result) {
+  return std::rotate_copy(container_algorithm_internal::c_begin(sequence),
+                          middle, container_algorithm_internal::c_end(sequence),
+                          result);
+}
+
+// c_shuffle()
+//
+// Container-based version of the <algorithm> `std::shuffle()` function to
+// randomly shuffle elements within the container using a `gen()` uniform random
+// number generator.
+template <typename RandomAccessContainer, typename UniformRandomBitGenerator>
+void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) {
+  std::shuffle(container_algorithm_internal::c_begin(c),
+               container_algorithm_internal::c_end(c),
+               std::forward<UniformRandomBitGenerator>(gen));
+}
+
+// c_sample()
+//
+// Container-based version of the <algorithm> `std::sample()` function to
+// randomly sample elements from the container without replacement using a
+// `gen()` uniform random number generator and write them to an iterator range.
+template <typename C, typename OutputIterator, typename Distance,
+          typename UniformRandomBitGenerator>
+OutputIterator c_sample(const C& c, OutputIterator result, Distance n,
+                        UniformRandomBitGenerator&& gen) {
+#if defined(__cpp_lib_sample) && __cpp_lib_sample >= 201603L
+  return std::sample(container_algorithm_internal::c_begin(c),
+                     container_algorithm_internal::c_end(c), result, n,
+                     std::forward<UniformRandomBitGenerator>(gen));
+#else
+  // Fall back to a stable selection-sampling implementation.
+  auto first = container_algorithm_internal::c_begin(c);
+  Distance unsampled_elements = c_distance(c);
+  n = (std::min)(n, unsampled_elements);
+  for (; n != 0; ++first) {
+    Distance r =
+        std::uniform_int_distribution<Distance>(0, --unsampled_elements)(gen);
+    if (r < n) {
+      *result++ = *first;
+      --n;
+    }
+  }
+  return result;
+#endif
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Partition functions
+//------------------------------------------------------------------------------
+
+// c_is_partitioned()
+//
+// Container-based version of the <algorithm> `std::is_partitioned()` function
+// to test whether all elements in the container for which `pred` returns `true`
+// precede those for which `pred` is `false`.
+template <typename C, typename Pred>
+bool c_is_partitioned(const C& c, Pred&& pred) {
+  return std::is_partitioned(container_algorithm_internal::c_begin(c),
+                             container_algorithm_internal::c_end(c),
+                             std::forward<Pred>(pred));
+}
+
+// c_partition()
+//
+// Container-based version of the <algorithm> `std::partition()` function
+// to rearrange all elements in a container in such a way that all elements for
+// which `pred` returns `true` precede all those for which it returns `false`,
+// returning an iterator to the first element of the second group.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_partition(C& c, Pred&& pred) {
+  return std::partition(container_algorithm_internal::c_begin(c),
+                        container_algorithm_internal::c_end(c),
+                        std::forward<Pred>(pred));
+}
+
+// c_stable_partition()
+//
+// Container-based version of the <algorithm> `std::stable_partition()` function
+// to rearrange all elements in a container in such a way that all elements for
+// which `pred` returns `true` precede all those for which it returns `false`,
+// preserving the relative ordering between the two groups. The function returns
+// an iterator to the first element of the second group.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_stable_partition(C& c,
+                                                                  Pred&& pred) {
+  return std::stable_partition(container_algorithm_internal::c_begin(c),
+                               container_algorithm_internal::c_end(c),
+                               std::forward<Pred>(pred));
+}
+
+// c_partition_copy()
+//
+// Container-based version of the <algorithm> `std::partition_copy()` function
+// to partition a container's elements and return them into two iterators: one
+// for which `pred` returns `true`, and one for which `pred` returns `false.`
+
+template <typename C, typename OutputIterator1, typename OutputIterator2,
+          typename Pred>
+std::pair<OutputIterator1, OutputIterator2> c_partition_copy(
+    const C& c, OutputIterator1 out_true, OutputIterator2 out_false,
+    Pred&& pred) {
+  return std::partition_copy(container_algorithm_internal::c_begin(c),
+                             container_algorithm_internal::c_end(c), out_true,
+                             out_false, std::forward<Pred>(pred));
+}
+
+// c_partition_point()
+//
+// Container-based version of the <algorithm> `std::partition_point()` function
+// to return the first element of an already partitioned container for which
+// the given `pred` is not `true`.
+template <typename C, typename Pred>
+container_algorithm_internal::ContainerIter<C> c_partition_point(C& c,
+                                                                 Pred&& pred) {
+  return std::partition_point(container_algorithm_internal::c_begin(c),
+                              container_algorithm_internal::c_end(c),
+                              std::forward<Pred>(pred));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Sorting functions
+//------------------------------------------------------------------------------
+
+// c_sort()
+//
+// Container-based version of the <algorithm> `std::sort()` function
+// to sort elements in ascending order of their values.
+template <typename C>
+void c_sort(C& c) {
+  std::sort(container_algorithm_internal::c_begin(c),
+            container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_sort() for performing a `comp` comparison other than the
+// default `operator<`.
+template <typename C, typename LessThan>
+void c_sort(C& c, LessThan&& comp) {
+  std::sort(container_algorithm_internal::c_begin(c),
+            container_algorithm_internal::c_end(c),
+            std::forward<LessThan>(comp));
+}
+
+// c_stable_sort()
+//
+// Container-based version of the <algorithm> `std::stable_sort()` function
+// to sort elements in ascending order of their values, preserving the order
+// of equivalents.
+template <typename C>
+void c_stable_sort(C& c) {
+  std::stable_sort(container_algorithm_internal::c_begin(c),
+                   container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_stable_sort() for performing a `comp` comparison other than the
+// default `operator<`.
+template <typename C, typename LessThan>
+void c_stable_sort(C& c, LessThan&& comp) {
+  std::stable_sort(container_algorithm_internal::c_begin(c),
+                   container_algorithm_internal::c_end(c),
+                   std::forward<LessThan>(comp));
+}
+
+// c_is_sorted()
+//
+// Container-based version of the <algorithm> `std::is_sorted()` function
+// to evaluate whether the given container is sorted in ascending order.
+template <typename C>
+bool c_is_sorted(const C& c) {
+  return std::is_sorted(container_algorithm_internal::c_begin(c),
+                        container_algorithm_internal::c_end(c));
+}
+
+// c_is_sorted() overload for performing a `comp` comparison other than the
+// default `operator<`.
+template <typename C, typename LessThan>
+bool c_is_sorted(const C& c, LessThan&& comp) {
+  return std::is_sorted(container_algorithm_internal::c_begin(c),
+                        container_algorithm_internal::c_end(c),
+                        std::forward<LessThan>(comp));
+}
+
+// c_partial_sort()
+//
+// Container-based version of the <algorithm> `std::partial_sort()` function
+// to rearrange elements within a container such that elements before `middle`
+// are sorted in ascending order.
+template <typename RandomAccessContainer>
+void c_partial_sort(
+    RandomAccessContainer& sequence,
+    container_algorithm_internal::ContainerIter<RandomAccessContainer> middle) {
+  std::partial_sort(container_algorithm_internal::c_begin(sequence), middle,
+                    container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_partial_sort() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename RandomAccessContainer, typename LessThan>
+void c_partial_sort(
+    RandomAccessContainer& sequence,
+    container_algorithm_internal::ContainerIter<RandomAccessContainer> middle,
+    LessThan&& comp) {
+  std::partial_sort(container_algorithm_internal::c_begin(sequence), middle,
+                    container_algorithm_internal::c_end(sequence),
+                    std::forward<LessThan>(comp));
+}
+
+// c_partial_sort_copy()
+//
+// Container-based version of the <algorithm> `std::partial_sort_copy()`
+// function to sort the elements in the given range `result` within the larger
+// `sequence` in ascending order (and using `result` as the output parameter).
+// At most min(result.last - result.first, sequence.last - sequence.first)
+// elements from the sequence will be stored in the result.
+template <typename C, typename RandomAccessContainer>
+container_algorithm_internal::ContainerIter<RandomAccessContainer>
+c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) {
+  return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence),
+                                container_algorithm_internal::c_end(sequence),
+                                container_algorithm_internal::c_begin(result),
+                                container_algorithm_internal::c_end(result));
+}
+
+// Overload of c_partial_sort_copy() for performing a `comp` comparison other
+// than the default `operator<`.
+template <typename C, typename RandomAccessContainer, typename LessThan>
+container_algorithm_internal::ContainerIter<RandomAccessContainer>
+c_partial_sort_copy(const C& sequence, RandomAccessContainer& result,
+                    LessThan&& comp) {
+  return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence),
+                                container_algorithm_internal::c_end(sequence),
+                                container_algorithm_internal::c_begin(result),
+                                container_algorithm_internal::c_end(result),
+                                std::forward<LessThan>(comp));
+}
+
+// c_is_sorted_until()
+//
+// Container-based version of the <algorithm> `std::is_sorted_until()` function
+// to return the first element within a container that is not sorted in
+// ascending order as an iterator.
+template <typename C>
+container_algorithm_internal::ContainerIter<C> c_is_sorted_until(C& c) {
+  return std::is_sorted_until(container_algorithm_internal::c_begin(c),
+                              container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_is_sorted_until() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename C, typename LessThan>
+container_algorithm_internal::ContainerIter<C> c_is_sorted_until(
+    C& c, LessThan&& comp) {
+  return std::is_sorted_until(container_algorithm_internal::c_begin(c),
+                              container_algorithm_internal::c_end(c),
+                              std::forward<LessThan>(comp));
+}
+
+// c_nth_element()
+//
+// Container-based version of the <algorithm> `std::nth_element()` function
+// to rearrange the elements within a container such that the `nth` element
+// would be in that position in an ordered sequence; other elements may be in
+// any order, except that all preceding `nth` will be less than that element,
+// and all following `nth` will be greater than that element.
+template <typename RandomAccessContainer>
+void c_nth_element(
+    RandomAccessContainer& sequence,
+    container_algorithm_internal::ContainerIter<RandomAccessContainer> nth) {
+  std::nth_element(container_algorithm_internal::c_begin(sequence), nth,
+                   container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_nth_element() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename RandomAccessContainer, typename LessThan>
+void c_nth_element(
+    RandomAccessContainer& sequence,
+    container_algorithm_internal::ContainerIter<RandomAccessContainer> nth,
+    LessThan&& comp) {
+  std::nth_element(container_algorithm_internal::c_begin(sequence), nth,
+                   container_algorithm_internal::c_end(sequence),
+                   std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Binary Search
+//------------------------------------------------------------------------------
+
+// c_lower_bound()
+//
+// Container-based version of the <algorithm> `std::lower_bound()` function
+// to return an iterator pointing to the first element in a sorted container
+// which does not compare less than `value`.
+template <typename Sequence, typename T>
+container_algorithm_internal::ContainerIter<Sequence> c_lower_bound(
+    Sequence& sequence, const T& value) {
+  return std::lower_bound(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence), value);
+}
+
+// Overload of c_lower_bound() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename Sequence, typename T, typename LessThan>
+container_algorithm_internal::ContainerIter<Sequence> c_lower_bound(
+    Sequence& sequence, const T& value, LessThan&& comp) {
+  return std::lower_bound(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence), value,
+                          std::forward<LessThan>(comp));
+}
+
+// c_upper_bound()
+//
+// Container-based version of the <algorithm> `std::upper_bound()` function
+// to return an iterator pointing to the first element in a sorted container
+// which is greater than `value`.
+template <typename Sequence, typename T>
+container_algorithm_internal::ContainerIter<Sequence> c_upper_bound(
+    Sequence& sequence, const T& value) {
+  return std::upper_bound(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence), value);
+}
+
+// Overload of c_upper_bound() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename Sequence, typename T, typename LessThan>
+container_algorithm_internal::ContainerIter<Sequence> c_upper_bound(
+    Sequence& sequence, const T& value, LessThan&& comp) {
+  return std::upper_bound(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence), value,
+                          std::forward<LessThan>(comp));
+}
+
+// c_equal_range()
+//
+// Container-based version of the <algorithm> `std::equal_range()` function
+// to return an iterator pair pointing to the first and last elements in a
+// sorted container which compare equal to `value`.
+template <typename Sequence, typename T>
+container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
+c_equal_range(Sequence& sequence, const T& value) {
+  return std::equal_range(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence), value);
+}
+
+// Overload of c_equal_range() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename Sequence, typename T, typename LessThan>
+container_algorithm_internal::ContainerIterPairType<Sequence, Sequence>
+c_equal_range(Sequence& sequence, const T& value, LessThan&& comp) {
+  return std::equal_range(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence), value,
+                          std::forward<LessThan>(comp));
+}
+
+// c_binary_search()
+//
+// Container-based version of the <algorithm> `std::binary_search()` function
+// to test if any element in the sorted container contains a value equivalent to
+// 'value'.
+template <typename Sequence, typename T>
+bool c_binary_search(const Sequence& sequence, const T& value) {
+  return std::binary_search(container_algorithm_internal::c_begin(sequence),
+                            container_algorithm_internal::c_end(sequence),
+                            value);
+}
+
+// Overload of c_binary_search() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename Sequence, typename T, typename LessThan>
+bool c_binary_search(const Sequence& sequence, const T& value,
+                     LessThan&& comp) {
+  return std::binary_search(container_algorithm_internal::c_begin(sequence),
+                            container_algorithm_internal::c_end(sequence),
+                            value, std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Merge functions
+//------------------------------------------------------------------------------
+
+// c_merge()
+//
+// Container-based version of the <algorithm> `std::merge()` function
+// to merge two sorted containers into a single sorted iterator.
+template <typename C1, typename C2, typename OutputIterator>
+OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) {
+  return std::merge(container_algorithm_internal::c_begin(c1),
+                    container_algorithm_internal::c_end(c1),
+                    container_algorithm_internal::c_begin(c2),
+                    container_algorithm_internal::c_end(c2), result);
+}
+
+// Overload of c_merge() for performing a `comp` comparison other than
+// the default `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan>
+OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result,
+                       LessThan&& comp) {
+  return std::merge(container_algorithm_internal::c_begin(c1),
+                    container_algorithm_internal::c_end(c1),
+                    container_algorithm_internal::c_begin(c2),
+                    container_algorithm_internal::c_end(c2), result,
+                    std::forward<LessThan>(comp));
+}
+
+// c_inplace_merge()
+//
+// Container-based version of the <algorithm> `std::inplace_merge()` function
+// to merge a supplied iterator `middle` into a container.
+template <typename C>
+void c_inplace_merge(C& c,
+                     container_algorithm_internal::ContainerIter<C> middle) {
+  std::inplace_merge(container_algorithm_internal::c_begin(c), middle,
+                     container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_inplace_merge() for performing a merge using a `comp` other
+// than `operator<`.
+template <typename C, typename LessThan>
+void c_inplace_merge(C& c,
+                     container_algorithm_internal::ContainerIter<C> middle,
+                     LessThan&& comp) {
+  std::inplace_merge(container_algorithm_internal::c_begin(c), middle,
+                     container_algorithm_internal::c_end(c),
+                     std::forward<LessThan>(comp));
+}
+
+// c_includes()
+//
+// Container-based version of the <algorithm> `std::includes()` function
+// to test whether a sorted container `c1` entirely contains another sorted
+// container `c2`.
+template <typename C1, typename C2>
+bool c_includes(const C1& c1, const C2& c2) {
+  return std::includes(container_algorithm_internal::c_begin(c1),
+                       container_algorithm_internal::c_end(c1),
+                       container_algorithm_internal::c_begin(c2),
+                       container_algorithm_internal::c_end(c2));
+}
+
+// Overload of c_includes() for performing a merge using a `comp` other than
+// `operator<`.
+template <typename C1, typename C2, typename LessThan>
+bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) {
+  return std::includes(container_algorithm_internal::c_begin(c1),
+                       container_algorithm_internal::c_end(c1),
+                       container_algorithm_internal::c_begin(c2),
+                       container_algorithm_internal::c_end(c2),
+                       std::forward<LessThan>(comp));
+}
+
+// c_set_union()
+//
+// Container-based version of the <algorithm> `std::set_union()` function
+// to return an iterator containing the union of two containers; duplicate
+// values are not copied into the output.
+template <typename C1, typename C2, typename OutputIterator,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+              void>::type,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+              void>::type>
+OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) {
+  return std::set_union(container_algorithm_internal::c_begin(c1),
+                        container_algorithm_internal::c_end(c1),
+                        container_algorithm_internal::c_begin(c2),
+                        container_algorithm_internal::c_end(c2), output);
+}
+
+// Overload of c_set_union() for performing a merge using a `comp` other than
+// `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+              void>::type,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+              void>::type>
+OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output,
+                           LessThan&& comp) {
+  return std::set_union(container_algorithm_internal::c_begin(c1),
+                        container_algorithm_internal::c_end(c1),
+                        container_algorithm_internal::c_begin(c2),
+                        container_algorithm_internal::c_end(c2), output,
+                        std::forward<LessThan>(comp));
+}
+
+// c_set_intersection()
+//
+// Container-based version of the <algorithm> `std::set_intersection()` function
+// to return an iterator containing the intersection of two sorted containers.
+template <typename C1, typename C2, typename OutputIterator,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+              void>::type,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+              void>::type>
+OutputIterator c_set_intersection(const C1& c1, const C2& c2,
+                                  OutputIterator output) {
+  // In debug builds, ensure that both containers are sorted with respect to the
+  // default comparator. std::set_intersection requires the containers be sorted
+  // using operator<.
+  assert(absl::c_is_sorted(c1));
+  assert(absl::c_is_sorted(c2));
+  return std::set_intersection(container_algorithm_internal::c_begin(c1),
+                               container_algorithm_internal::c_end(c1),
+                               container_algorithm_internal::c_begin(c2),
+                               container_algorithm_internal::c_end(c2), output);
+}
+
+// Overload of c_set_intersection() for performing a merge using a `comp` other
+// than `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+              void>::type,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+              void>::type>
+OutputIterator c_set_intersection(const C1& c1, const C2& c2,
+                                  OutputIterator output, LessThan&& comp) {
+  // In debug builds, ensure that both containers are sorted with respect to the
+  // default comparator. std::set_intersection requires the containers be sorted
+  // using the same comparator.
+  assert(absl::c_is_sorted(c1, comp));
+  assert(absl::c_is_sorted(c2, comp));
+  return std::set_intersection(container_algorithm_internal::c_begin(c1),
+                               container_algorithm_internal::c_end(c1),
+                               container_algorithm_internal::c_begin(c2),
+                               container_algorithm_internal::c_end(c2), output,
+                               std::forward<LessThan>(comp));
+}
+
+// c_set_difference()
+//
+// Container-based version of the <algorithm> `std::set_difference()` function
+// to return an iterator containing elements present in the first container but
+// not in the second.
+template <typename C1, typename C2, typename OutputIterator,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+              void>::type,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+              void>::type>
+OutputIterator c_set_difference(const C1& c1, const C2& c2,
+                                OutputIterator output) {
+  return std::set_difference(container_algorithm_internal::c_begin(c1),
+                             container_algorithm_internal::c_end(c1),
+                             container_algorithm_internal::c_begin(c2),
+                             container_algorithm_internal::c_end(c2), output);
+}
+
+// Overload of c_set_difference() for performing a merge using a `comp` other
+// than `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+              void>::type,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+              void>::type>
+OutputIterator c_set_difference(const C1& c1, const C2& c2,
+                                OutputIterator output, LessThan&& comp) {
+  return std::set_difference(container_algorithm_internal::c_begin(c1),
+                             container_algorithm_internal::c_end(c1),
+                             container_algorithm_internal::c_begin(c2),
+                             container_algorithm_internal::c_end(c2), output,
+                             std::forward<LessThan>(comp));
+}
+
+// c_set_symmetric_difference()
+//
+// Container-based version of the <algorithm> `std::set_symmetric_difference()`
+// function to return an iterator containing elements present in either one
+// container or the other, but not both.
+template <typename C1, typename C2, typename OutputIterator,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+              void>::type,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+              void>::type>
+OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2,
+                                          OutputIterator output) {
+  return std::set_symmetric_difference(
+      container_algorithm_internal::c_begin(c1),
+      container_algorithm_internal::c_end(c1),
+      container_algorithm_internal::c_begin(c2),
+      container_algorithm_internal::c_end(c2), output);
+}
+
+// Overload of c_set_symmetric_difference() for performing a merge using a
+// `comp` other than `operator<`.
+template <typename C1, typename C2, typename OutputIterator, typename LessThan,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C1>::value,
+              void>::type,
+          typename = typename std::enable_if<
+              !container_algorithm_internal::IsUnorderedContainer<C2>::value,
+              void>::type>
+OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2,
+                                          OutputIterator output,
+                                          LessThan&& comp) {
+  return std::set_symmetric_difference(
+      container_algorithm_internal::c_begin(c1),
+      container_algorithm_internal::c_end(c1),
+      container_algorithm_internal::c_begin(c2),
+      container_algorithm_internal::c_end(c2), output,
+      std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <algorithm> Heap functions
+//------------------------------------------------------------------------------
+
+// c_push_heap()
+//
+// Container-based version of the <algorithm> `std::push_heap()` function
+// to push a value onto a container heap.
+template <typename RandomAccessContainer>
+void c_push_heap(RandomAccessContainer& sequence) {
+  std::push_heap(container_algorithm_internal::c_begin(sequence),
+                 container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_push_heap() for performing a push operation on a heap using a
+// `comp` other than `operator<`.
+template <typename RandomAccessContainer, typename LessThan>
+void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) {
+  std::push_heap(container_algorithm_internal::c_begin(sequence),
+                 container_algorithm_internal::c_end(sequence),
+                 std::forward<LessThan>(comp));
+}
+
+// c_pop_heap()
+//
+// Container-based version of the <algorithm> `std::pop_heap()` function
+// to pop a value from a heap container.
+template <typename RandomAccessContainer>
+void c_pop_heap(RandomAccessContainer& sequence) {
+  std::pop_heap(container_algorithm_internal::c_begin(sequence),
+                container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_pop_heap() for performing a pop operation on a heap using a
+// `comp` other than `operator<`.
+template <typename RandomAccessContainer, typename LessThan>
+void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) {
+  std::pop_heap(container_algorithm_internal::c_begin(sequence),
+                container_algorithm_internal::c_end(sequence),
+                std::forward<LessThan>(comp));
+}
+
+// c_make_heap()
+//
+// Container-based version of the <algorithm> `std::make_heap()` function
+// to make a container a heap.
+template <typename RandomAccessContainer>
+void c_make_heap(RandomAccessContainer& sequence) {
+  std::make_heap(container_algorithm_internal::c_begin(sequence),
+                 container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_make_heap() for performing heap comparisons using a
+// `comp` other than `operator<`
+template <typename RandomAccessContainer, typename LessThan>
+void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) {
+  std::make_heap(container_algorithm_internal::c_begin(sequence),
+                 container_algorithm_internal::c_end(sequence),
+                 std::forward<LessThan>(comp));
+}
+
+// c_sort_heap()
+//
+// Container-based version of the <algorithm> `std::sort_heap()` function
+// to sort a heap into ascending order (after which it is no longer a heap).
+template <typename RandomAccessContainer>
+void c_sort_heap(RandomAccessContainer& sequence) {
+  std::sort_heap(container_algorithm_internal::c_begin(sequence),
+                 container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_sort_heap() for performing heap comparisons using a
+// `comp` other than `operator<`
+template <typename RandomAccessContainer, typename LessThan>
+void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) {
+  std::sort_heap(container_algorithm_internal::c_begin(sequence),
+                 container_algorithm_internal::c_end(sequence),
+                 std::forward<LessThan>(comp));
+}
+
+// c_is_heap()
+//
+// Container-based version of the <algorithm> `std::is_heap()` function
+// to check whether the given container is a heap.
+template <typename RandomAccessContainer>
+bool c_is_heap(const RandomAccessContainer& sequence) {
+  return std::is_heap(container_algorithm_internal::c_begin(sequence),
+                      container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_is_heap() for performing heap comparisons using a
+// `comp` other than `operator<`
+template <typename RandomAccessContainer, typename LessThan>
+bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) {
+  return std::is_heap(container_algorithm_internal::c_begin(sequence),
+                      container_algorithm_internal::c_end(sequence),
+                      std::forward<LessThan>(comp));
+}
+
+// c_is_heap_until()
+//
+// Container-based version of the <algorithm> `std::is_heap_until()` function
+// to find the first element in a given container which is not in heap order.
+template <typename RandomAccessContainer>
+container_algorithm_internal::ContainerIter<RandomAccessContainer>
+c_is_heap_until(RandomAccessContainer& sequence) {
+  return std::is_heap_until(container_algorithm_internal::c_begin(sequence),
+                            container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_is_heap_until() for performing heap comparisons using a
+// `comp` other than `operator<`
+template <typename RandomAccessContainer, typename LessThan>
+container_algorithm_internal::ContainerIter<RandomAccessContainer>
+c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) {
+  return std::is_heap_until(container_algorithm_internal::c_begin(sequence),
+                            container_algorithm_internal::c_end(sequence),
+                            std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+//  <algorithm> Min/max
+//------------------------------------------------------------------------------
+
+// c_min_element()
+//
+// Container-based version of the <algorithm> `std::min_element()` function
+// to return an iterator pointing to the element with the smallest value, using
+// `operator<` to make the comparisons.
+template <typename Sequence>
+container_algorithm_internal::ContainerIter<Sequence> c_min_element(
+    Sequence& sequence) {
+  return std::min_element(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_min_element() for performing a `comp` comparison other than
+// `operator<`.
+template <typename Sequence, typename LessThan>
+container_algorithm_internal::ContainerIter<Sequence> c_min_element(
+    Sequence& sequence, LessThan&& comp) {
+  return std::min_element(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence),
+                          std::forward<LessThan>(comp));
+}
+
+// c_max_element()
+//
+// Container-based version of the <algorithm> `std::max_element()` function
+// to return an iterator pointing to the element with the largest value, using
+// `operator<` to make the comparisons.
+template <typename Sequence>
+container_algorithm_internal::ContainerIter<Sequence> c_max_element(
+    Sequence& sequence) {
+  return std::max_element(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence));
+}
+
+// Overload of c_max_element() for performing a `comp` comparison other than
+// `operator<`.
+template <typename Sequence, typename LessThan>
+container_algorithm_internal::ContainerIter<Sequence> c_max_element(
+    Sequence& sequence, LessThan&& comp) {
+  return std::max_element(container_algorithm_internal::c_begin(sequence),
+                          container_algorithm_internal::c_end(sequence),
+                          std::forward<LessThan>(comp));
+}
+
+// c_minmax_element()
+//
+// Container-based version of the <algorithm> `std::minmax_element()` function
+// to return a pair of iterators pointing to the elements containing the
+// smallest and largest values, respectively, using `operator<` to make the
+// comparisons.
+template <typename C>
+container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
+    C& c) {
+  return std::minmax_element(container_algorithm_internal::c_begin(c),
+                             container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_minmax_element() for performing `comp` comparisons other than
+// `operator<`.
+template <typename C, typename LessThan>
+container_algorithm_internal::ContainerIterPairType<C, C> c_minmax_element(
+    C& c, LessThan&& comp) {
+  return std::minmax_element(container_algorithm_internal::c_begin(c),
+                             container_algorithm_internal::c_end(c),
+                             std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+//  <algorithm> Lexicographical Comparisons
+//------------------------------------------------------------------------------
+
+// c_lexicographical_compare()
+//
+// Container-based version of the <algorithm> `std::lexicographical_compare()`
+// function to lexicographically compare (e.g. sort words alphabetically) two
+// container sequences. The comparison is performed using `operator<`. Note
+// that capital letters ("A-Z") have ASCII values less than lowercase letters
+// ("a-z").
+template <typename Sequence1, typename Sequence2>
+bool c_lexicographical_compare(const Sequence1& sequence1,
+                               const Sequence2& sequence2) {
+  return std::lexicographical_compare(
+      container_algorithm_internal::c_begin(sequence1),
+      container_algorithm_internal::c_end(sequence1),
+      container_algorithm_internal::c_begin(sequence2),
+      container_algorithm_internal::c_end(sequence2));
+}
+
+// Overload of c_lexicographical_compare() for performing a lexicographical
+// comparison using a `comp` operator instead of `operator<`.
+template <typename Sequence1, typename Sequence2, typename LessThan>
+bool c_lexicographical_compare(const Sequence1& sequence1,
+                               const Sequence2& sequence2, LessThan&& comp) {
+  return std::lexicographical_compare(
+      container_algorithm_internal::c_begin(sequence1),
+      container_algorithm_internal::c_end(sequence1),
+      container_algorithm_internal::c_begin(sequence2),
+      container_algorithm_internal::c_end(sequence2),
+      std::forward<LessThan>(comp));
+}
+
+// c_next_permutation()
+//
+// Container-based version of the <algorithm> `std::next_permutation()` function
+// to rearrange a container's elements into the next lexicographically greater
+// permutation.
+template <typename C>
+bool c_next_permutation(C& c) {
+  return std::next_permutation(container_algorithm_internal::c_begin(c),
+                               container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_next_permutation() for performing a lexicographical
+// comparison using a `comp` operator instead of `operator<`.
+template <typename C, typename LessThan>
+bool c_next_permutation(C& c, LessThan&& comp) {
+  return std::next_permutation(container_algorithm_internal::c_begin(c),
+                               container_algorithm_internal::c_end(c),
+                               std::forward<LessThan>(comp));
+}
+
+// c_prev_permutation()
+//
+// Container-based version of the <algorithm> `std::prev_permutation()` function
+// to rearrange a container's elements into the next lexicographically lesser
+// permutation.
+template <typename C>
+bool c_prev_permutation(C& c) {
+  return std::prev_permutation(container_algorithm_internal::c_begin(c),
+                               container_algorithm_internal::c_end(c));
+}
+
+// Overload of c_prev_permutation() for performing a lexicographical
+// comparison using a `comp` operator instead of `operator<`.
+template <typename C, typename LessThan>
+bool c_prev_permutation(C& c, LessThan&& comp) {
+  return std::prev_permutation(container_algorithm_internal::c_begin(c),
+                               container_algorithm_internal::c_end(c),
+                               std::forward<LessThan>(comp));
+}
+
+//------------------------------------------------------------------------------
+// <numeric> algorithms
+//------------------------------------------------------------------------------
+
+// c_iota()
+//
+// Container-based version of the <numeric> `std::iota()` function
+// to compute successive values of `value`, as if incremented with `++value`
+// after each element is written, and write them to the container.
+template <typename Sequence, typename T>
+void c_iota(Sequence& sequence, const T& value) {
+  std::iota(container_algorithm_internal::c_begin(sequence),
+            container_algorithm_internal::c_end(sequence), value);
+}
+
+// c_accumulate()
+//
+// Container-based version of the <numeric> `std::accumulate()` function
+// to accumulate the element values of a container to `init` and return that
+// accumulation by value.
+//
+// Note: Due to a language technicality this function has return type
+// absl::decay_t<T>. As a user of this function you can casually read
+// this as "returns T by value" and assume it does the right thing.
+template <typename Sequence, typename T>
+decay_t<T> c_accumulate(const Sequence& sequence, T&& init) {
+  return std::accumulate(container_algorithm_internal::c_begin(sequence),
+                         container_algorithm_internal::c_end(sequence),
+                         std::forward<T>(init));
+}
+
+// Overload of c_accumulate() for using a binary operations other than
+// addition for computing the accumulation.
+template <typename Sequence, typename T, typename BinaryOp>
+decay_t<T> c_accumulate(const Sequence& sequence, T&& init,
+                        BinaryOp&& binary_op) {
+  return std::accumulate(container_algorithm_internal::c_begin(sequence),
+                         container_algorithm_internal::c_end(sequence),
+                         std::forward<T>(init),
+                         std::forward<BinaryOp>(binary_op));
+}
+
+// c_inner_product()
+//
+// Container-based version of the <numeric> `std::inner_product()` function
+// to compute the cumulative inner product of container element pairs.
+//
+// Note: Due to a language technicality this function has return type
+// absl::decay_t<T>. As a user of this function you can casually read
+// this as "returns T by value" and assume it does the right thing.
+template <typename Sequence1, typename Sequence2, typename T>
+decay_t<T> c_inner_product(const Sequence1& factors1, const Sequence2& factors2,
+                           T&& sum) {
+  return std::inner_product(container_algorithm_internal::c_begin(factors1),
+                            container_algorithm_internal::c_end(factors1),
+                            container_algorithm_internal::c_begin(factors2),
+                            std::forward<T>(sum));
+}
+
+// Overload of c_inner_product() for using binary operations other than
+// `operator+` (for computing the accumulation) and `operator*` (for computing
+// the product between the two container's element pair).
+template <typename Sequence1, typename Sequence2, typename T,
+          typename BinaryOp1, typename BinaryOp2>
+decay_t<T> c_inner_product(const Sequence1& factors1, const Sequence2& factors2,
+                           T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) {
+  return std::inner_product(container_algorithm_internal::c_begin(factors1),
+                            container_algorithm_internal::c_end(factors1),
+                            container_algorithm_internal::c_begin(factors2),
+                            std::forward<T>(sum), std::forward<BinaryOp1>(op1),
+                            std::forward<BinaryOp2>(op2));
+}
+
+// c_adjacent_difference()
+//
+// Container-based version of the <numeric> `std::adjacent_difference()`
+// function to compute the difference between each element and the one preceding
+// it and write it to an iterator.
+template <typename InputSequence, typename OutputIt>
+OutputIt c_adjacent_difference(const InputSequence& input,
+                               OutputIt output_first) {
+  return std::adjacent_difference(container_algorithm_internal::c_begin(input),
+                                  container_algorithm_internal::c_end(input),
+                                  output_first);
+}
+
+// Overload of c_adjacent_difference() for using a binary operation other than
+// subtraction to compute the adjacent difference.
+template <typename InputSequence, typename OutputIt, typename BinaryOp>
+OutputIt c_adjacent_difference(const InputSequence& input,
+                               OutputIt output_first, BinaryOp&& op) {
+  return std::adjacent_difference(container_algorithm_internal::c_begin(input),
+                                  container_algorithm_internal::c_end(input),
+                                  output_first, std::forward<BinaryOp>(op));
+}
+
+// c_partial_sum()
+//
+// Container-based version of the <numeric> `std::partial_sum()` function
+// to compute the partial sum of the elements in a sequence and write them
+// to an iterator. The partial sum is the sum of all element values so far in
+// the sequence.
+template <typename InputSequence, typename OutputIt>
+OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) {
+  return std::partial_sum(container_algorithm_internal::c_begin(input),
+                          container_algorithm_internal::c_end(input),
+                          output_first);
+}
+
+// Overload of c_partial_sum() for using a binary operation other than addition
+// to compute the "partial sum".
+template <typename InputSequence, typename OutputIt, typename BinaryOp>
+OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first,
+                       BinaryOp&& op) {
+  return std::partial_sum(container_algorithm_internal::c_begin(input),
+                          container_algorithm_internal::c_end(input),
+                          output_first, std::forward<BinaryOp>(op));
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_ALGORITHM_CONTAINER_H_

+ 1147 - 0
Sample/lib/node_add_on/protobuf_src/absl/algorithm/container_test.cc

@@ -0,0 +1,1147 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/algorithm/container.h"
+
+#include <algorithm>
+#include <functional>
+#include <initializer_list>
+#include <iterator>
+#include <list>
+#include <memory>
+#include <ostream>
+#include <random>
+#include <set>
+#include <unordered_set>
+#include <utility>
+#include <valarray>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/casts.h"
+#include "absl/base/macros.h"
+#include "absl/memory/memory.h"
+#include "absl/types/span.h"
+
+namespace {
+
+using ::testing::Each;
+using ::testing::ElementsAre;
+using ::testing::Gt;
+using ::testing::IsNull;
+using ::testing::IsSubsetOf;
+using ::testing::Lt;
+using ::testing::Pointee;
+using ::testing::SizeIs;
+using ::testing::Truly;
+using ::testing::UnorderedElementsAre;
+
+// Most of these tests just check that the code compiles, not that it
+// does the right thing. That's fine since the functions just forward
+// to the STL implementation.
+class NonMutatingTest : public testing::Test {
+ protected:
+  std::unordered_set<int> container_ = {1, 2, 3};
+  std::list<int> sequence_ = {1, 2, 3};
+  std::vector<int> vector_ = {1, 2, 3};
+  int array_[3] = {1, 2, 3};
+};
+
+struct AccumulateCalls {
+  void operator()(int value) { calls.push_back(value); }
+  std::vector<int> calls;
+};
+
+bool Predicate(int value) { return value < 3; }
+bool BinPredicate(int v1, int v2) { return v1 < v2; }
+bool Equals(int v1, int v2) { return v1 == v2; }
+bool IsOdd(int x) { return x % 2 != 0; }
+
+TEST_F(NonMutatingTest, Distance) {
+  EXPECT_EQ(container_.size(),
+            static_cast<size_t>(absl::c_distance(container_)));
+  EXPECT_EQ(sequence_.size(), static_cast<size_t>(absl::c_distance(sequence_)));
+  EXPECT_EQ(vector_.size(), static_cast<size_t>(absl::c_distance(vector_)));
+  EXPECT_EQ(ABSL_ARRAYSIZE(array_),
+            static_cast<size_t>(absl::c_distance(array_)));
+
+  // Works with a temporary argument.
+  EXPECT_EQ(vector_.size(),
+            static_cast<size_t>(absl::c_distance(std::vector<int>(vector_))));
+}
+
+TEST_F(NonMutatingTest, Distance_OverloadedBeginEnd) {
+  // Works with classes which have custom ADL-selected overloads of std::begin
+  // and std::end.
+  std::initializer_list<int> a = {1, 2, 3};
+  std::valarray<int> b = {1, 2, 3};
+  EXPECT_EQ(3, absl::c_distance(a));
+  EXPECT_EQ(3, absl::c_distance(b));
+
+  // It is assumed that other c_* functions use the same mechanism for
+  // ADL-selecting begin/end overloads.
+}
+
+TEST_F(NonMutatingTest, ForEach) {
+  AccumulateCalls c = absl::c_for_each(container_, AccumulateCalls());
+  // Don't rely on the unordered_set's order.
+  std::sort(c.calls.begin(), c.calls.end());
+  EXPECT_EQ(vector_, c.calls);
+
+  // Works with temporary container, too.
+  AccumulateCalls c2 =
+      absl::c_for_each(std::unordered_set<int>(container_), AccumulateCalls());
+  std::sort(c2.calls.begin(), c2.calls.end());
+  EXPECT_EQ(vector_, c2.calls);
+}
+
+TEST_F(NonMutatingTest, FindReturnsCorrectType) {
+  auto it = absl::c_find(container_, 3);
+  EXPECT_EQ(3, *it);
+  absl::c_find(absl::implicit_cast<const std::list<int>&>(sequence_), 3);
+}
+
+TEST_F(NonMutatingTest, FindIf) { absl::c_find_if(container_, Predicate); }
+
+TEST_F(NonMutatingTest, FindIfNot) {
+  absl::c_find_if_not(container_, Predicate);
+}
+
+TEST_F(NonMutatingTest, FindEnd) {
+  absl::c_find_end(sequence_, vector_);
+  absl::c_find_end(vector_, sequence_);
+}
+
+TEST_F(NonMutatingTest, FindEndWithPredicate) {
+  absl::c_find_end(sequence_, vector_, BinPredicate);
+  absl::c_find_end(vector_, sequence_, BinPredicate);
+}
+
+TEST_F(NonMutatingTest, FindFirstOf) {
+  absl::c_find_first_of(container_, sequence_);
+  absl::c_find_first_of(sequence_, container_);
+}
+
+TEST_F(NonMutatingTest, FindFirstOfWithPredicate) {
+  absl::c_find_first_of(container_, sequence_, BinPredicate);
+  absl::c_find_first_of(sequence_, container_, BinPredicate);
+}
+
+TEST_F(NonMutatingTest, AdjacentFind) { absl::c_adjacent_find(sequence_); }
+
+TEST_F(NonMutatingTest, AdjacentFindWithPredicate) {
+  absl::c_adjacent_find(sequence_, BinPredicate);
+}
+
+TEST_F(NonMutatingTest, Count) { EXPECT_EQ(1, absl::c_count(container_, 3)); }
+
+TEST_F(NonMutatingTest, CountIf) {
+  EXPECT_EQ(2, absl::c_count_if(container_, Predicate));
+  const std::unordered_set<int>& const_container = container_;
+  EXPECT_EQ(2, absl::c_count_if(const_container, Predicate));
+}
+
+TEST_F(NonMutatingTest, Mismatch) {
+  // Testing necessary as absl::c_mismatch executes logic.
+  {
+    auto result = absl::c_mismatch(vector_, sequence_);
+    EXPECT_EQ(result.first, vector_.end());
+    EXPECT_EQ(result.second, sequence_.end());
+  }
+  {
+    auto result = absl::c_mismatch(sequence_, vector_);
+    EXPECT_EQ(result.first, sequence_.end());
+    EXPECT_EQ(result.second, vector_.end());
+  }
+
+  sequence_.back() = 5;
+  {
+    auto result = absl::c_mismatch(vector_, sequence_);
+    EXPECT_EQ(result.first, std::prev(vector_.end()));
+    EXPECT_EQ(result.second, std::prev(sequence_.end()));
+  }
+  {
+    auto result = absl::c_mismatch(sequence_, vector_);
+    EXPECT_EQ(result.first, std::prev(sequence_.end()));
+    EXPECT_EQ(result.second, std::prev(vector_.end()));
+  }
+
+  sequence_.pop_back();
+  {
+    auto result = absl::c_mismatch(vector_, sequence_);
+    EXPECT_EQ(result.first, std::prev(vector_.end()));
+    EXPECT_EQ(result.second, sequence_.end());
+  }
+  {
+    auto result = absl::c_mismatch(sequence_, vector_);
+    EXPECT_EQ(result.first, sequence_.end());
+    EXPECT_EQ(result.second, std::prev(vector_.end()));
+  }
+  {
+    struct NoNotEquals {
+      constexpr bool operator==(NoNotEquals) const { return true; }
+      constexpr bool operator!=(NoNotEquals) const = delete;
+    };
+    std::vector<NoNotEquals> first;
+    std::list<NoNotEquals> second;
+
+    // Check this still compiles.
+    absl::c_mismatch(first, second);
+  }
+}
+
+TEST_F(NonMutatingTest, MismatchWithPredicate) {
+  // Testing necessary as absl::c_mismatch executes logic.
+  {
+    auto result = absl::c_mismatch(vector_, sequence_, BinPredicate);
+    EXPECT_EQ(result.first, vector_.begin());
+    EXPECT_EQ(result.second, sequence_.begin());
+  }
+  {
+    auto result = absl::c_mismatch(sequence_, vector_, BinPredicate);
+    EXPECT_EQ(result.first, sequence_.begin());
+    EXPECT_EQ(result.second, vector_.begin());
+  }
+
+  sequence_.front() = 0;
+  {
+    auto result = absl::c_mismatch(vector_, sequence_, BinPredicate);
+    EXPECT_EQ(result.first, vector_.begin());
+    EXPECT_EQ(result.second, sequence_.begin());
+  }
+  {
+    auto result = absl::c_mismatch(sequence_, vector_, BinPredicate);
+    EXPECT_EQ(result.first, std::next(sequence_.begin()));
+    EXPECT_EQ(result.second, std::next(vector_.begin()));
+  }
+
+  sequence_.clear();
+  {
+    auto result = absl::c_mismatch(vector_, sequence_, BinPredicate);
+    EXPECT_EQ(result.first, vector_.begin());
+    EXPECT_EQ(result.second, sequence_.end());
+  }
+  {
+    auto result = absl::c_mismatch(sequence_, vector_, BinPredicate);
+    EXPECT_EQ(result.first, sequence_.end());
+    EXPECT_EQ(result.second, vector_.begin());
+  }
+}
+
+TEST_F(NonMutatingTest, Equal) {
+  EXPECT_TRUE(absl::c_equal(vector_, sequence_));
+  EXPECT_TRUE(absl::c_equal(sequence_, vector_));
+  EXPECT_TRUE(absl::c_equal(sequence_, array_));
+  EXPECT_TRUE(absl::c_equal(array_, vector_));
+
+  // Test that behavior appropriately differs from that of equal().
+  std::vector<int> vector_plus = {1, 2, 3};
+  vector_plus.push_back(4);
+  EXPECT_FALSE(absl::c_equal(vector_plus, sequence_));
+  EXPECT_FALSE(absl::c_equal(sequence_, vector_plus));
+  EXPECT_FALSE(absl::c_equal(array_, vector_plus));
+}
+
+TEST_F(NonMutatingTest, EqualWithPredicate) {
+  EXPECT_TRUE(absl::c_equal(vector_, sequence_, Equals));
+  EXPECT_TRUE(absl::c_equal(sequence_, vector_, Equals));
+  EXPECT_TRUE(absl::c_equal(array_, sequence_, Equals));
+  EXPECT_TRUE(absl::c_equal(vector_, array_, Equals));
+
+  // Test that behavior appropriately differs from that of equal().
+  std::vector<int> vector_plus = {1, 2, 3};
+  vector_plus.push_back(4);
+  EXPECT_FALSE(absl::c_equal(vector_plus, sequence_, Equals));
+  EXPECT_FALSE(absl::c_equal(sequence_, vector_plus, Equals));
+  EXPECT_FALSE(absl::c_equal(vector_plus, array_, Equals));
+}
+
+TEST_F(NonMutatingTest, IsPermutation) {
+  auto vector_permut_ = vector_;
+  std::next_permutation(vector_permut_.begin(), vector_permut_.end());
+  EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_));
+  EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_));
+
+  // Test that behavior appropriately differs from that of is_permutation().
+  std::vector<int> vector_plus = {1, 2, 3};
+  vector_plus.push_back(4);
+  EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_));
+  EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus));
+}
+
+TEST_F(NonMutatingTest, IsPermutationWithPredicate) {
+  auto vector_permut_ = vector_;
+  std::next_permutation(vector_permut_.begin(), vector_permut_.end());
+  EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_, Equals));
+  EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_, Equals));
+
+  // Test that behavior appropriately differs from that of is_permutation().
+  std::vector<int> vector_plus = {1, 2, 3};
+  vector_plus.push_back(4);
+  EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_, Equals));
+  EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus, Equals));
+}
+
+TEST_F(NonMutatingTest, Search) {
+  absl::c_search(sequence_, vector_);
+  absl::c_search(vector_, sequence_);
+  absl::c_search(array_, sequence_);
+}
+
+TEST_F(NonMutatingTest, SearchWithPredicate) {
+  absl::c_search(sequence_, vector_, BinPredicate);
+  absl::c_search(vector_, sequence_, BinPredicate);
+}
+
+TEST_F(NonMutatingTest, SearchN) { absl::c_search_n(sequence_, 3, 1); }
+
+TEST_F(NonMutatingTest, SearchNWithPredicate) {
+  absl::c_search_n(sequence_, 3, 1, BinPredicate);
+}
+
+TEST_F(NonMutatingTest, LowerBound) {
+  std::list<int>::iterator i = absl::c_lower_bound(sequence_, 3);
+  ASSERT_TRUE(i != sequence_.end());
+  EXPECT_EQ(2, std::distance(sequence_.begin(), i));
+  EXPECT_EQ(3, *i);
+}
+
+TEST_F(NonMutatingTest, LowerBoundWithPredicate) {
+  std::vector<int> v(vector_);
+  std::sort(v.begin(), v.end(), std::greater<int>());
+  std::vector<int>::iterator i = absl::c_lower_bound(v, 3, std::greater<int>());
+  EXPECT_TRUE(i == v.begin());
+  EXPECT_EQ(3, *i);
+}
+
+TEST_F(NonMutatingTest, UpperBound) {
+  std::list<int>::iterator i = absl::c_upper_bound(sequence_, 1);
+  ASSERT_TRUE(i != sequence_.end());
+  EXPECT_EQ(1, std::distance(sequence_.begin(), i));
+  EXPECT_EQ(2, *i);
+}
+
+TEST_F(NonMutatingTest, UpperBoundWithPredicate) {
+  std::vector<int> v(vector_);
+  std::sort(v.begin(), v.end(), std::greater<int>());
+  std::vector<int>::iterator i = absl::c_upper_bound(v, 1, std::greater<int>());
+  EXPECT_EQ(3, i - v.begin());
+  EXPECT_TRUE(i == v.end());
+}
+
+TEST_F(NonMutatingTest, EqualRange) {
+  std::pair<std::list<int>::iterator, std::list<int>::iterator> p =
+      absl::c_equal_range(sequence_, 2);
+  EXPECT_EQ(1, std::distance(sequence_.begin(), p.first));
+  EXPECT_EQ(2, std::distance(sequence_.begin(), p.second));
+}
+
+TEST_F(NonMutatingTest, EqualRangeArray) {
+  auto p = absl::c_equal_range(array_, 2);
+  EXPECT_EQ(1, std::distance(std::begin(array_), p.first));
+  EXPECT_EQ(2, std::distance(std::begin(array_), p.second));
+}
+
+TEST_F(NonMutatingTest, EqualRangeWithPredicate) {
+  std::vector<int> v(vector_);
+  std::sort(v.begin(), v.end(), std::greater<int>());
+  std::pair<std::vector<int>::iterator, std::vector<int>::iterator> p =
+      absl::c_equal_range(v, 2, std::greater<int>());
+  EXPECT_EQ(1, std::distance(v.begin(), p.first));
+  EXPECT_EQ(2, std::distance(v.begin(), p.second));
+}
+
+TEST_F(NonMutatingTest, BinarySearch) {
+  EXPECT_TRUE(absl::c_binary_search(vector_, 2));
+  EXPECT_TRUE(absl::c_binary_search(std::vector<int>(vector_), 2));
+}
+
+TEST_F(NonMutatingTest, BinarySearchWithPredicate) {
+  std::vector<int> v(vector_);
+  std::sort(v.begin(), v.end(), std::greater<int>());
+  EXPECT_TRUE(absl::c_binary_search(v, 2, std::greater<int>()));
+  EXPECT_TRUE(
+      absl::c_binary_search(std::vector<int>(v), 2, std::greater<int>()));
+}
+
+TEST_F(NonMutatingTest, MinElement) {
+  std::list<int>::iterator i = absl::c_min_element(sequence_);
+  ASSERT_TRUE(i != sequence_.end());
+  EXPECT_EQ(*i, 1);
+}
+
+TEST_F(NonMutatingTest, MinElementWithPredicate) {
+  std::list<int>::iterator i =
+      absl::c_min_element(sequence_, std::greater<int>());
+  ASSERT_TRUE(i != sequence_.end());
+  EXPECT_EQ(*i, 3);
+}
+
+TEST_F(NonMutatingTest, MaxElement) {
+  std::list<int>::iterator i = absl::c_max_element(sequence_);
+  ASSERT_TRUE(i != sequence_.end());
+  EXPECT_EQ(*i, 3);
+}
+
+TEST_F(NonMutatingTest, MaxElementWithPredicate) {
+  std::list<int>::iterator i =
+      absl::c_max_element(sequence_, std::greater<int>());
+  ASSERT_TRUE(i != sequence_.end());
+  EXPECT_EQ(*i, 1);
+}
+
+TEST_F(NonMutatingTest, LexicographicalCompare) {
+  EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_));
+
+  std::vector<int> v;
+  v.push_back(1);
+  v.push_back(2);
+  v.push_back(4);
+
+  EXPECT_TRUE(absl::c_lexicographical_compare(sequence_, v));
+  EXPECT_TRUE(absl::c_lexicographical_compare(std::list<int>(sequence_), v));
+}
+
+TEST_F(NonMutatingTest, LexicographicalCopmareWithPredicate) {
+  EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_,
+                                               std::greater<int>()));
+
+  std::vector<int> v;
+  v.push_back(1);
+  v.push_back(2);
+  v.push_back(4);
+
+  EXPECT_TRUE(
+      absl::c_lexicographical_compare(v, sequence_, std::greater<int>()));
+  EXPECT_TRUE(absl::c_lexicographical_compare(
+      std::vector<int>(v), std::list<int>(sequence_), std::greater<int>()));
+}
+
+TEST_F(NonMutatingTest, Includes) {
+  std::set<int> s(vector_.begin(), vector_.end());
+  s.insert(4);
+  EXPECT_TRUE(absl::c_includes(s, vector_));
+}
+
+TEST_F(NonMutatingTest, IncludesWithPredicate) {
+  std::vector<int> v = {3, 2, 1};
+  std::set<int, std::greater<int>> s(v.begin(), v.end());
+  s.insert(4);
+  EXPECT_TRUE(absl::c_includes(s, v, std::greater<int>()));
+}
+
+class NumericMutatingTest : public testing::Test {
+ protected:
+  std::list<int> list_ = {1, 2, 3};
+  std::vector<int> output_;
+};
+
+TEST_F(NumericMutatingTest, Iota) {
+  absl::c_iota(list_, 5);
+  std::list<int> expected{5, 6, 7};
+  EXPECT_EQ(list_, expected);
+}
+
+TEST_F(NonMutatingTest, Accumulate) {
+  EXPECT_EQ(absl::c_accumulate(sequence_, 4), 1 + 2 + 3 + 4);
+}
+
+TEST_F(NonMutatingTest, AccumulateWithBinaryOp) {
+  EXPECT_EQ(absl::c_accumulate(sequence_, 4, std::multiplies<int>()),
+            1 * 2 * 3 * 4);
+}
+
+TEST_F(NonMutatingTest, AccumulateLvalueInit) {
+  int lvalue = 4;
+  EXPECT_EQ(absl::c_accumulate(sequence_, lvalue), 1 + 2 + 3 + 4);
+}
+
+TEST_F(NonMutatingTest, AccumulateWithBinaryOpLvalueInit) {
+  int lvalue = 4;
+  EXPECT_EQ(absl::c_accumulate(sequence_, lvalue, std::multiplies<int>()),
+            1 * 2 * 3 * 4);
+}
+
+TEST_F(NonMutatingTest, InnerProduct) {
+  EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 1000),
+            1000 + 1 * 1 + 2 * 2 + 3 * 3);
+}
+
+TEST_F(NonMutatingTest, InnerProductWithBinaryOps) {
+  EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 10,
+                                  std::multiplies<int>(), std::plus<int>()),
+            10 * (1 + 1) * (2 + 2) * (3 + 3));
+}
+
+TEST_F(NonMutatingTest, InnerProductLvalueInit) {
+  int lvalue = 1000;
+  EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue),
+            1000 + 1 * 1 + 2 * 2 + 3 * 3);
+}
+
+TEST_F(NonMutatingTest, InnerProductWithBinaryOpsLvalueInit) {
+  int lvalue = 10;
+  EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue,
+                                  std::multiplies<int>(), std::plus<int>()),
+            10 * (1 + 1) * (2 + 2) * (3 + 3));
+}
+
+TEST_F(NumericMutatingTest, AdjacentDifference) {
+  auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_));
+  *last = 1000;
+  std::vector<int> expected{1, 2 - 1, 3 - 2, 1000};
+  EXPECT_EQ(output_, expected);
+}
+
+TEST_F(NumericMutatingTest, AdjacentDifferenceWithBinaryOp) {
+  auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_),
+                                          std::multiplies<int>());
+  *last = 1000;
+  std::vector<int> expected{1, 2 * 1, 3 * 2, 1000};
+  EXPECT_EQ(output_, expected);
+}
+
+TEST_F(NumericMutatingTest, PartialSum) {
+  auto last = absl::c_partial_sum(list_, std::back_inserter(output_));
+  *last = 1000;
+  std::vector<int> expected{1, 1 + 2, 1 + 2 + 3, 1000};
+  EXPECT_EQ(output_, expected);
+}
+
+TEST_F(NumericMutatingTest, PartialSumWithBinaryOp) {
+  auto last = absl::c_partial_sum(list_, std::back_inserter(output_),
+                                  std::multiplies<int>());
+  *last = 1000;
+  std::vector<int> expected{1, 1 * 2, 1 * 2 * 3, 1000};
+  EXPECT_EQ(output_, expected);
+}
+
+TEST_F(NonMutatingTest, LinearSearch) {
+  EXPECT_TRUE(absl::c_linear_search(container_, 3));
+  EXPECT_FALSE(absl::c_linear_search(container_, 4));
+}
+
+TEST_F(NonMutatingTest, AllOf) {
+  const std::vector<int>& v = vector_;
+  EXPECT_FALSE(absl::c_all_of(v, [](int x) { return x > 1; }));
+  EXPECT_TRUE(absl::c_all_of(v, [](int x) { return x > 0; }));
+}
+
+TEST_F(NonMutatingTest, AnyOf) {
+  const std::vector<int>& v = vector_;
+  EXPECT_TRUE(absl::c_any_of(v, [](int x) { return x > 2; }));
+  EXPECT_FALSE(absl::c_any_of(v, [](int x) { return x > 5; }));
+}
+
+TEST_F(NonMutatingTest, NoneOf) {
+  const std::vector<int>& v = vector_;
+  EXPECT_FALSE(absl::c_none_of(v, [](int x) { return x > 2; }));
+  EXPECT_TRUE(absl::c_none_of(v, [](int x) { return x > 5; }));
+}
+
+TEST_F(NonMutatingTest, MinMaxElementLess) {
+  std::pair<std::vector<int>::const_iterator, std::vector<int>::const_iterator>
+      p = absl::c_minmax_element(vector_, std::less<int>());
+  EXPECT_TRUE(p.first == vector_.begin());
+  EXPECT_TRUE(p.second == vector_.begin() + 2);
+}
+
+TEST_F(NonMutatingTest, MinMaxElementGreater) {
+  std::pair<std::vector<int>::const_iterator, std::vector<int>::const_iterator>
+      p = absl::c_minmax_element(vector_, std::greater<int>());
+  EXPECT_TRUE(p.first == vector_.begin() + 2);
+  EXPECT_TRUE(p.second == vector_.begin());
+}
+
+TEST_F(NonMutatingTest, MinMaxElementNoPredicate) {
+  std::pair<std::vector<int>::const_iterator, std::vector<int>::const_iterator>
+      p = absl::c_minmax_element(vector_);
+  EXPECT_TRUE(p.first == vector_.begin());
+  EXPECT_TRUE(p.second == vector_.begin() + 2);
+}
+
+class SortingTest : public testing::Test {
+ protected:
+  std::list<int> sorted_ = {1, 2, 3, 4};
+  std::list<int> unsorted_ = {2, 4, 1, 3};
+  std::list<int> reversed_ = {4, 3, 2, 1};
+};
+
+TEST_F(SortingTest, IsSorted) {
+  EXPECT_TRUE(absl::c_is_sorted(sorted_));
+  EXPECT_FALSE(absl::c_is_sorted(unsorted_));
+  EXPECT_FALSE(absl::c_is_sorted(reversed_));
+}
+
+TEST_F(SortingTest, IsSortedWithPredicate) {
+  EXPECT_FALSE(absl::c_is_sorted(sorted_, std::greater<int>()));
+  EXPECT_FALSE(absl::c_is_sorted(unsorted_, std::greater<int>()));
+  EXPECT_TRUE(absl::c_is_sorted(reversed_, std::greater<int>()));
+}
+
+TEST_F(SortingTest, IsSortedUntil) {
+  EXPECT_EQ(1, *absl::c_is_sorted_until(unsorted_));
+  EXPECT_EQ(4, *absl::c_is_sorted_until(unsorted_, std::greater<int>()));
+}
+
+TEST_F(SortingTest, NthElement) {
+  std::vector<int> unsorted = {2, 4, 1, 3};
+  absl::c_nth_element(unsorted, unsorted.begin() + 2);
+  EXPECT_THAT(unsorted, ElementsAre(Lt(3), Lt(3), 3, Gt(3)));
+  absl::c_nth_element(unsorted, unsorted.begin() + 2, std::greater<int>());
+  EXPECT_THAT(unsorted, ElementsAre(Gt(2), Gt(2), 2, Lt(2)));
+}
+
+TEST(MutatingTest, IsPartitioned) {
+  EXPECT_TRUE(
+      absl::c_is_partitioned(std::vector<int>{1, 3, 5, 2, 4, 6}, IsOdd));
+  EXPECT_FALSE(
+      absl::c_is_partitioned(std::vector<int>{1, 2, 3, 4, 5, 6}, IsOdd));
+  EXPECT_FALSE(
+      absl::c_is_partitioned(std::vector<int>{2, 4, 6, 1, 3, 5}, IsOdd));
+}
+
+TEST(MutatingTest, Partition) {
+  std::vector<int> actual = {1, 2, 3, 4, 5};
+  absl::c_partition(actual, IsOdd);
+  EXPECT_THAT(actual, Truly([](const std::vector<int>& c) {
+                return absl::c_is_partitioned(c, IsOdd);
+              }));
+}
+
+TEST(MutatingTest, StablePartition) {
+  std::vector<int> actual = {1, 2, 3, 4, 5};
+  absl::c_stable_partition(actual, IsOdd);
+  EXPECT_THAT(actual, ElementsAre(1, 3, 5, 2, 4));
+}
+
+TEST(MutatingTest, PartitionCopy) {
+  const std::vector<int> initial = {1, 2, 3, 4, 5};
+  std::vector<int> odds, evens;
+  auto ends = absl::c_partition_copy(initial, back_inserter(odds),
+                                     back_inserter(evens), IsOdd);
+  *ends.first = 7;
+  *ends.second = 6;
+  EXPECT_THAT(odds, ElementsAre(1, 3, 5, 7));
+  EXPECT_THAT(evens, ElementsAre(2, 4, 6));
+}
+
+TEST(MutatingTest, PartitionPoint) {
+  const std::vector<int> initial = {1, 3, 5, 2, 4};
+  auto middle = absl::c_partition_point(initial, IsOdd);
+  EXPECT_EQ(2, *middle);
+}
+
+TEST(MutatingTest, CopyMiddle) {
+  const std::vector<int> initial = {4, -1, -2, -3, 5};
+  const std::list<int> input = {1, 2, 3};
+  const std::vector<int> expected = {4, 1, 2, 3, 5};
+
+  std::list<int> test_list(initial.begin(), initial.end());
+  absl::c_copy(input, ++test_list.begin());
+  EXPECT_EQ(std::list<int>(expected.begin(), expected.end()), test_list);
+
+  std::vector<int> test_vector = initial;
+  absl::c_copy(input, test_vector.begin() + 1);
+  EXPECT_EQ(expected, test_vector);
+}
+
+TEST(MutatingTest, CopyFrontInserter) {
+  const std::list<int> initial = {4, 5};
+  const std::list<int> input = {1, 2, 3};
+  const std::list<int> expected = {3, 2, 1, 4, 5};
+
+  std::list<int> test_list = initial;
+  absl::c_copy(input, std::front_inserter(test_list));
+  EXPECT_EQ(expected, test_list);
+}
+
+TEST(MutatingTest, CopyBackInserter) {
+  const std::vector<int> initial = {4, 5};
+  const std::list<int> input = {1, 2, 3};
+  const std::vector<int> expected = {4, 5, 1, 2, 3};
+
+  std::list<int> test_list(initial.begin(), initial.end());
+  absl::c_copy(input, std::back_inserter(test_list));
+  EXPECT_EQ(std::list<int>(expected.begin(), expected.end()), test_list);
+
+  std::vector<int> test_vector = initial;
+  absl::c_copy(input, std::back_inserter(test_vector));
+  EXPECT_EQ(expected, test_vector);
+}
+
+TEST(MutatingTest, CopyN) {
+  const std::vector<int> initial = {1, 2, 3, 4, 5};
+  const std::vector<int> expected = {1, 2};
+  std::vector<int> actual;
+  absl::c_copy_n(initial, 2, back_inserter(actual));
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MutatingTest, CopyIf) {
+  const std::list<int> input = {1, 2, 3};
+  std::vector<int> output;
+  absl::c_copy_if(input, std::back_inserter(output),
+                  [](int i) { return i != 2; });
+  EXPECT_THAT(output, ElementsAre(1, 3));
+}
+
+TEST(MutatingTest, CopyBackward) {
+  std::vector<int> actual = {1, 2, 3, 4, 5};
+  std::vector<int> expected = {1, 2, 1, 2, 3};
+  absl::c_copy_backward(absl::MakeSpan(actual.data(), 3), actual.end());
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MutatingTest, Move) {
+  std::vector<std::unique_ptr<int>> src;
+  src.emplace_back(absl::make_unique<int>(1));
+  src.emplace_back(absl::make_unique<int>(2));
+  src.emplace_back(absl::make_unique<int>(3));
+  src.emplace_back(absl::make_unique<int>(4));
+  src.emplace_back(absl::make_unique<int>(5));
+
+  std::vector<std::unique_ptr<int>> dest = {};
+  absl::c_move(src, std::back_inserter(dest));
+  EXPECT_THAT(src, Each(IsNull()));
+  EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(4),
+                                Pointee(5)));
+}
+
+TEST(MutatingTest, MoveBackward) {
+  std::vector<std::unique_ptr<int>> actual;
+  actual.emplace_back(absl::make_unique<int>(1));
+  actual.emplace_back(absl::make_unique<int>(2));
+  actual.emplace_back(absl::make_unique<int>(3));
+  actual.emplace_back(absl::make_unique<int>(4));
+  actual.emplace_back(absl::make_unique<int>(5));
+  auto subrange = absl::MakeSpan(actual.data(), 3);
+  absl::c_move_backward(subrange, actual.end());
+  EXPECT_THAT(actual, ElementsAre(IsNull(), IsNull(), Pointee(1), Pointee(2),
+                                  Pointee(3)));
+}
+
+TEST(MutatingTest, MoveWithRvalue) {
+  auto MakeRValueSrc = [] {
+    std::vector<std::unique_ptr<int>> src;
+    src.emplace_back(absl::make_unique<int>(1));
+    src.emplace_back(absl::make_unique<int>(2));
+    src.emplace_back(absl::make_unique<int>(3));
+    return src;
+  };
+
+  std::vector<std::unique_ptr<int>> dest = MakeRValueSrc();
+  absl::c_move(MakeRValueSrc(), std::back_inserter(dest));
+  EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(1),
+                                Pointee(2), Pointee(3)));
+}
+
+TEST(MutatingTest, SwapRanges) {
+  std::vector<int> odds = {2, 4, 6};
+  std::vector<int> evens = {1, 3, 5};
+  absl::c_swap_ranges(odds, evens);
+  EXPECT_THAT(odds, ElementsAre(1, 3, 5));
+  EXPECT_THAT(evens, ElementsAre(2, 4, 6));
+
+  odds.pop_back();
+  absl::c_swap_ranges(odds, evens);
+  EXPECT_THAT(odds, ElementsAre(2, 4));
+  EXPECT_THAT(evens, ElementsAre(1, 3, 6));
+
+  absl::c_swap_ranges(evens, odds);
+  EXPECT_THAT(odds, ElementsAre(1, 3));
+  EXPECT_THAT(evens, ElementsAre(2, 4, 6));
+}
+
+TEST_F(NonMutatingTest, Transform) {
+  std::vector<int> x{0, 2, 4}, y, z;
+  auto end = absl::c_transform(x, back_inserter(y), std::negate<int>());
+  EXPECT_EQ(std::vector<int>({0, -2, -4}), y);
+  *end = 7;
+  EXPECT_EQ(std::vector<int>({0, -2, -4, 7}), y);
+
+  y = {1, 3, 0};
+  end = absl::c_transform(x, y, back_inserter(z), std::plus<int>());
+  EXPECT_EQ(std::vector<int>({1, 5, 4}), z);
+  *end = 7;
+  EXPECT_EQ(std::vector<int>({1, 5, 4, 7}), z);
+
+  z.clear();
+  y.pop_back();
+  end = absl::c_transform(x, y, std::back_inserter(z), std::plus<int>());
+  EXPECT_EQ(std::vector<int>({1, 5}), z);
+  *end = 7;
+  EXPECT_EQ(std::vector<int>({1, 5, 7}), z);
+
+  z.clear();
+  std::swap(x, y);
+  end = absl::c_transform(x, y, std::back_inserter(z), std::plus<int>());
+  EXPECT_EQ(std::vector<int>({1, 5}), z);
+  *end = 7;
+  EXPECT_EQ(std::vector<int>({1, 5, 7}), z);
+}
+
+TEST(MutatingTest, Replace) {
+  const std::vector<int> initial = {1, 2, 3, 1, 4, 5};
+  const std::vector<int> expected = {4, 2, 3, 4, 4, 5};
+
+  std::vector<int> test_vector = initial;
+  absl::c_replace(test_vector, 1, 4);
+  EXPECT_EQ(expected, test_vector);
+
+  std::list<int> test_list(initial.begin(), initial.end());
+  absl::c_replace(test_list, 1, 4);
+  EXPECT_EQ(std::list<int>(expected.begin(), expected.end()), test_list);
+}
+
+TEST(MutatingTest, ReplaceIf) {
+  std::vector<int> actual = {1, 2, 3, 4, 5};
+  const std::vector<int> expected = {0, 2, 0, 4, 0};
+
+  absl::c_replace_if(actual, IsOdd, 0);
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MutatingTest, ReplaceCopy) {
+  const std::vector<int> initial = {1, 2, 3, 1, 4, 5};
+  const std::vector<int> expected = {4, 2, 3, 4, 4, 5};
+
+  std::vector<int> actual;
+  absl::c_replace_copy(initial, back_inserter(actual), 1, 4);
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MutatingTest, Sort) {
+  std::vector<int> test_vector = {2, 3, 1, 4};
+  absl::c_sort(test_vector);
+  EXPECT_THAT(test_vector, ElementsAre(1, 2, 3, 4));
+}
+
+TEST(MutatingTest, SortWithPredicate) {
+  std::vector<int> test_vector = {2, 3, 1, 4};
+  absl::c_sort(test_vector, std::greater<int>());
+  EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1));
+}
+
+// For absl::c_stable_sort tests. Needs an operator< that does not cover all
+// fields so that the test can check the sort preserves order of equal elements.
+struct Element {
+  int key;
+  int value;
+  friend bool operator<(const Element& e1, const Element& e2) {
+    return e1.key < e2.key;
+  }
+  // Make gmock print useful diagnostics.
+  friend std::ostream& operator<<(std::ostream& o, const Element& e) {
+    return o << "{" << e.key << ", " << e.value << "}";
+  }
+};
+
+MATCHER_P2(IsElement, key, value, "") {
+  return arg.key == key && arg.value == value;
+}
+
+TEST(MutatingTest, StableSort) {
+  std::vector<Element> test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}};
+  absl::c_stable_sort(test_vector);
+  EXPECT_THAT(test_vector,
+              ElementsAre(IsElement(1, 1), IsElement(1, 0), IsElement(2, 1),
+                          IsElement(2, 0), IsElement(2, 2)));
+}
+
+TEST(MutatingTest, StableSortWithPredicate) {
+  std::vector<Element> test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}};
+  absl::c_stable_sort(test_vector, [](const Element& e1, const Element& e2) {
+    return e2 < e1;
+  });
+  EXPECT_THAT(test_vector,
+              ElementsAre(IsElement(2, 1), IsElement(2, 0), IsElement(2, 2),
+                          IsElement(1, 1), IsElement(1, 0)));
+}
+
+TEST(MutatingTest, ReplaceCopyIf) {
+  const std::vector<int> initial = {1, 2, 3, 4, 5};
+  const std::vector<int> expected = {0, 2, 0, 4, 0};
+
+  std::vector<int> actual;
+  absl::c_replace_copy_if(initial, back_inserter(actual), IsOdd, 0);
+  EXPECT_EQ(expected, actual);
+}
+
+TEST(MutatingTest, Fill) {
+  std::vector<int> actual(5);
+  absl::c_fill(actual, 1);
+  EXPECT_THAT(actual, ElementsAre(1, 1, 1, 1, 1));
+}
+
+TEST(MutatingTest, FillN) {
+  std::vector<int> actual(5, 0);
+  absl::c_fill_n(actual, 2, 1);
+  EXPECT_THAT(actual, ElementsAre(1, 1, 0, 0, 0));
+}
+
+TEST(MutatingTest, Generate) {
+  std::vector<int> actual(5);
+  int x = 0;
+  absl::c_generate(actual, [&x]() { return ++x; });
+  EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5));
+}
+
+TEST(MutatingTest, GenerateN) {
+  std::vector<int> actual(5, 0);
+  int x = 0;
+  absl::c_generate_n(actual, 3, [&x]() { return ++x; });
+  EXPECT_THAT(actual, ElementsAre(1, 2, 3, 0, 0));
+}
+
+TEST(MutatingTest, RemoveCopy) {
+  std::vector<int> actual;
+  absl::c_remove_copy(std::vector<int>{1, 2, 3}, back_inserter(actual), 2);
+  EXPECT_THAT(actual, ElementsAre(1, 3));
+}
+
+TEST(MutatingTest, RemoveCopyIf) {
+  std::vector<int> actual;
+  absl::c_remove_copy_if(std::vector<int>{1, 2, 3}, back_inserter(actual),
+                         IsOdd);
+  EXPECT_THAT(actual, ElementsAre(2));
+}
+
+TEST(MutatingTest, UniqueCopy) {
+  std::vector<int> actual;
+  absl::c_unique_copy(std::vector<int>{1, 2, 2, 2, 3, 3, 2},
+                      back_inserter(actual));
+  EXPECT_THAT(actual, ElementsAre(1, 2, 3, 2));
+}
+
+TEST(MutatingTest, UniqueCopyWithPredicate) {
+  std::vector<int> actual;
+  absl::c_unique_copy(std::vector<int>{1, 2, 3, -1, -2, -3, 1},
+                      back_inserter(actual),
+                      [](int x, int y) { return (x < 0) == (y < 0); });
+  EXPECT_THAT(actual, ElementsAre(1, -1, 1));
+}
+
+TEST(MutatingTest, Reverse) {
+  std::vector<int> test_vector = {1, 2, 3, 4};
+  absl::c_reverse(test_vector);
+  EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1));
+
+  std::list<int> test_list = {1, 2, 3, 4};
+  absl::c_reverse(test_list);
+  EXPECT_THAT(test_list, ElementsAre(4, 3, 2, 1));
+}
+
+TEST(MutatingTest, ReverseCopy) {
+  std::vector<int> actual;
+  absl::c_reverse_copy(std::vector<int>{1, 2, 3, 4}, back_inserter(actual));
+  EXPECT_THAT(actual, ElementsAre(4, 3, 2, 1));
+}
+
+TEST(MutatingTest, Rotate) {
+  std::vector<int> actual = {1, 2, 3, 4};
+  auto it = absl::c_rotate(actual, actual.begin() + 2);
+  EXPECT_THAT(actual, testing::ElementsAreArray({3, 4, 1, 2}));
+  EXPECT_EQ(*it, 1);
+}
+
+TEST(MutatingTest, RotateCopy) {
+  std::vector<int> initial = {1, 2, 3, 4};
+  std::vector<int> actual;
+  auto end =
+      absl::c_rotate_copy(initial, initial.begin() + 2, back_inserter(actual));
+  *end = 5;
+  EXPECT_THAT(actual, ElementsAre(3, 4, 1, 2, 5));
+}
+
+template <typename T>
+T RandomlySeededPrng() {
+  std::random_device rdev;
+  std::seed_seq::result_type data[T::state_size];
+  std::generate_n(data, T::state_size, std::ref(rdev));
+  std::seed_seq prng_seed(data, data + T::state_size);
+  return T(prng_seed);
+}
+
+TEST(MutatingTest, Shuffle) {
+  std::vector<int> actual = {1, 2, 3, 4, 5};
+  absl::c_shuffle(actual, RandomlySeededPrng<std::mt19937_64>());
+  EXPECT_THAT(actual, UnorderedElementsAre(1, 2, 3, 4, 5));
+}
+
+TEST(MutatingTest, Sample) {
+  std::vector<int> actual;
+  absl::c_sample(std::vector<int>{1, 2, 3, 4, 5}, std::back_inserter(actual), 3,
+                 RandomlySeededPrng<std::mt19937_64>());
+  EXPECT_THAT(actual, IsSubsetOf({1, 2, 3, 4, 5}));
+  EXPECT_THAT(actual, SizeIs(3));
+}
+
+TEST(MutatingTest, PartialSort) {
+  std::vector<int> sequence{5, 3, 42, 0};
+  absl::c_partial_sort(sequence, sequence.begin() + 2);
+  EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(0, 3));
+  absl::c_partial_sort(sequence, sequence.begin() + 2, std::greater<int>());
+  EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(42, 5));
+}
+
+TEST(MutatingTest, PartialSortCopy) {
+  const std::vector<int> initial = {5, 3, 42, 0};
+  std::vector<int> actual(2);
+  absl::c_partial_sort_copy(initial, actual);
+  EXPECT_THAT(actual, ElementsAre(0, 3));
+  absl::c_partial_sort_copy(initial, actual, std::greater<int>());
+  EXPECT_THAT(actual, ElementsAre(42, 5));
+}
+
+TEST(MutatingTest, Merge) {
+  std::vector<int> actual;
+  absl::c_merge(std::vector<int>{1, 3, 5}, std::vector<int>{2, 4},
+                back_inserter(actual));
+  EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5));
+}
+
+TEST(MutatingTest, MergeWithComparator) {
+  std::vector<int> actual;
+  absl::c_merge(std::vector<int>{5, 3, 1}, std::vector<int>{4, 2},
+                back_inserter(actual), std::greater<int>());
+  EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1));
+}
+
+TEST(MutatingTest, InplaceMerge) {
+  std::vector<int> actual = {1, 3, 5, 2, 4};
+  absl::c_inplace_merge(actual, actual.begin() + 3);
+  EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5));
+}
+
+TEST(MutatingTest, InplaceMergeWithComparator) {
+  std::vector<int> actual = {5, 3, 1, 4, 2};
+  absl::c_inplace_merge(actual, actual.begin() + 3, std::greater<int>());
+  EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1));
+}
+
+class SetOperationsTest : public testing::Test {
+ protected:
+  std::vector<int> a_ = {1, 2, 3};
+  std::vector<int> b_ = {1, 3, 5};
+
+  std::vector<int> a_reversed_ = {3, 2, 1};
+  std::vector<int> b_reversed_ = {5, 3, 1};
+};
+
+TEST_F(SetOperationsTest, SetUnion) {
+  std::vector<int> actual;
+  absl::c_set_union(a_, b_, back_inserter(actual));
+  EXPECT_THAT(actual, ElementsAre(1, 2, 3, 5));
+}
+
+TEST_F(SetOperationsTest, SetUnionWithComparator) {
+  std::vector<int> actual;
+  absl::c_set_union(a_reversed_, b_reversed_, back_inserter(actual),
+                    std::greater<int>());
+  EXPECT_THAT(actual, ElementsAre(5, 3, 2, 1));
+}
+
+TEST_F(SetOperationsTest, SetIntersection) {
+  std::vector<int> actual;
+  absl::c_set_intersection(a_, b_, back_inserter(actual));
+  EXPECT_THAT(actual, ElementsAre(1, 3));
+}
+
+TEST_F(SetOperationsTest, SetIntersectionWithComparator) {
+  std::vector<int> actual;
+  absl::c_set_intersection(a_reversed_, b_reversed_, back_inserter(actual),
+                           std::greater<int>());
+  EXPECT_THAT(actual, ElementsAre(3, 1));
+}
+
+TEST_F(SetOperationsTest, SetDifference) {
+  std::vector<int> actual;
+  absl::c_set_difference(a_, b_, back_inserter(actual));
+  EXPECT_THAT(actual, ElementsAre(2));
+}
+
+TEST_F(SetOperationsTest, SetDifferenceWithComparator) {
+  std::vector<int> actual;
+  absl::c_set_difference(a_reversed_, b_reversed_, back_inserter(actual),
+                         std::greater<int>());
+  EXPECT_THAT(actual, ElementsAre(2));
+}
+
+TEST_F(SetOperationsTest, SetSymmetricDifference) {
+  std::vector<int> actual;
+  absl::c_set_symmetric_difference(a_, b_, back_inserter(actual));
+  EXPECT_THAT(actual, ElementsAre(2, 5));
+}
+
+TEST_F(SetOperationsTest, SetSymmetricDifferenceWithComparator) {
+  std::vector<int> actual;
+  absl::c_set_symmetric_difference(a_reversed_, b_reversed_,
+                                   back_inserter(actual), std::greater<int>());
+  EXPECT_THAT(actual, ElementsAre(5, 2));
+}
+
+TEST(HeapOperationsTest, WithoutComparator) {
+  std::vector<int> heap = {1, 2, 3};
+  EXPECT_FALSE(absl::c_is_heap(heap));
+  absl::c_make_heap(heap);
+  EXPECT_TRUE(absl::c_is_heap(heap));
+  heap.push_back(4);
+  EXPECT_EQ(3, absl::c_is_heap_until(heap) - heap.begin());
+  absl::c_push_heap(heap);
+  EXPECT_EQ(4, heap[0]);
+  absl::c_pop_heap(heap);
+  EXPECT_EQ(4, heap[3]);
+  absl::c_make_heap(heap);
+  absl::c_sort_heap(heap);
+  EXPECT_THAT(heap, ElementsAre(1, 2, 3, 4));
+  EXPECT_FALSE(absl::c_is_heap(heap));
+}
+
+TEST(HeapOperationsTest, WithComparator) {
+  using greater = std::greater<int>;
+  std::vector<int> heap = {3, 2, 1};
+  EXPECT_FALSE(absl::c_is_heap(heap, greater()));
+  absl::c_make_heap(heap, greater());
+  EXPECT_TRUE(absl::c_is_heap(heap, greater()));
+  heap.push_back(0);
+  EXPECT_EQ(3, absl::c_is_heap_until(heap, greater()) - heap.begin());
+  absl::c_push_heap(heap, greater());
+  EXPECT_EQ(0, heap[0]);
+  absl::c_pop_heap(heap, greater());
+  EXPECT_EQ(0, heap[3]);
+  absl::c_make_heap(heap, greater());
+  absl::c_sort_heap(heap, greater());
+  EXPECT_THAT(heap, ElementsAre(3, 2, 1, 0));
+  EXPECT_FALSE(absl::c_is_heap(heap, greater()));
+}
+
+TEST(MutatingTest, PermutationOperations) {
+  std::vector<int> initial = {1, 2, 3, 4};
+  std::vector<int> permuted = initial;
+
+  absl::c_next_permutation(permuted);
+  EXPECT_TRUE(absl::c_is_permutation(initial, permuted));
+  EXPECT_TRUE(absl::c_is_permutation(initial, permuted, std::equal_to<int>()));
+
+  std::vector<int> permuted2 = initial;
+  absl::c_prev_permutation(permuted2, std::greater<int>());
+  EXPECT_EQ(permuted, permuted2);
+
+  absl::c_prev_permutation(permuted);
+  EXPECT_EQ(initial, permuted);
+}
+
+}  // namespace

+ 883 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/BUILD.bazel

@@ -0,0 +1,883 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load(
+    "//absl:copts/configure_copts.bzl",
+    "ABSL_DEFAULT_COPTS",
+    "ABSL_DEFAULT_LINKOPTS",
+    "ABSL_TEST_COPTS",
+)
+
+package(
+    default_visibility = ["//visibility:public"],
+    features = [
+        "header_modules",
+        "layering_check",
+        "parse_headers",
+    ],
+)
+
+licenses(["notice"])
+
+cc_library(
+    name = "atomic_hook",
+    hdrs = ["internal/atomic_hook.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":config",
+        ":core_headers",
+    ],
+)
+
+cc_library(
+    name = "errno_saver",
+    hdrs = ["internal/errno_saver.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [":config"],
+)
+
+cc_library(
+    name = "log_severity",
+    srcs = ["log_severity.cc"],
+    hdrs = ["log_severity.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        ":core_headers",
+    ],
+)
+
+cc_library(
+    name = "no_destructor",
+    hdrs = ["no_destructor.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        ":nullability",
+    ],
+)
+
+cc_library(
+    name = "nullability",
+    srcs = ["internal/nullability_impl.h"],
+    hdrs = ["nullability.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":core_headers",
+        "//absl/meta:type_traits",
+    ],
+)
+
+cc_library(
+    name = "raw_logging_internal",
+    srcs = ["internal/raw_logging.cc"],
+    hdrs = ["internal/raw_logging.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":atomic_hook",
+        ":config",
+        ":core_headers",
+        ":errno_saver",
+        ":log_severity",
+    ],
+)
+
+cc_library(
+    name = "spinlock_wait",
+    srcs = [
+        "internal/spinlock_akaros.inc",
+        "internal/spinlock_linux.inc",
+        "internal/spinlock_posix.inc",
+        "internal/spinlock_wait.cc",
+        "internal/spinlock_win32.inc",
+    ],
+    hdrs = ["internal/spinlock_wait.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl/base:__pkg__",
+    ],
+    deps = [
+        ":base_internal",
+        ":core_headers",
+        ":errno_saver",
+    ],
+)
+
+cc_library(
+    name = "config",
+    hdrs = [
+        "config.h",
+        "options.h",
+        "policy_checks.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+)
+
+cc_library(
+    name = "cycleclock_internal",
+    hdrs = [
+        "internal/cycleclock_config.h",
+        "internal/unscaledcycleclock_config.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":base_internal",
+        ":config",
+    ],
+)
+
+cc_library(
+    name = "dynamic_annotations",
+    srcs = [
+        "internal/dynamic_annotations.h",
+    ],
+    hdrs = [
+        "dynamic_annotations.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        ":core_headers",
+    ],
+)
+
+cc_library(
+    name = "core_headers",
+    hdrs = [
+        "attributes.h",
+        "const_init.h",
+        "macros.h",
+        "optimization.h",
+        "port.h",
+        "thread_annotations.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+    ],
+)
+
+cc_library(
+    name = "malloc_internal",
+    srcs = [
+        "internal/low_level_alloc.cc",
+    ],
+    hdrs = [
+        "internal/direct_mmap.h",
+        "internal/low_level_alloc.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS + select({
+        "//conditions:default": [],
+    }),
+    linkopts = select({
+        "//absl:msvc_compiler": [],
+        "//absl:clang-cl_compiler": [],
+        "//absl:wasm": [],
+        "//conditions:default": ["-pthread"],
+    }) + ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//visibility:public",
+    ],
+    deps = [
+        ":base",
+        ":base_internal",
+        ":config",
+        ":core_headers",
+        ":dynamic_annotations",
+        ":raw_logging_internal",
+    ],
+)
+
+cc_library(
+    name = "base_internal",
+    hdrs = [
+        "internal/hide_ptr.h",
+        "internal/identity.h",
+        "internal/inline_variable.h",
+        "internal/invoke.h",
+        "internal/scheduling_mode.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":config",
+        "//absl/meta:type_traits",
+    ],
+)
+
+cc_library(
+    name = "base",
+    srcs = [
+        "internal/cycleclock.cc",
+        "internal/spinlock.cc",
+        "internal/sysinfo.cc",
+        "internal/thread_identity.cc",
+        "internal/unscaledcycleclock.cc",
+    ],
+    hdrs = [
+        "call_once.h",
+        "casts.h",
+        "internal/cycleclock.h",
+        "internal/low_level_scheduling.h",
+        "internal/per_thread_tls.h",
+        "internal/spinlock.h",
+        "internal/sysinfo.h",
+        "internal/thread_identity.h",
+        "internal/tsan_mutex_interface.h",
+        "internal/unscaledcycleclock.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = select({
+        "//absl:msvc_compiler": [
+            "-DEFAULTLIB:advapi32.lib",
+        ],
+        "//absl:clang-cl_compiler": [
+            "-DEFAULTLIB:advapi32.lib",
+        ],
+        "//absl:mingw_compiler": [
+            "-DEFAULTLIB:advapi32.lib",
+            "-ladvapi32",
+        ],
+        "//absl:wasm": [],
+        "//conditions:default": ["-pthread"],
+    }) + ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":atomic_hook",
+        ":base_internal",
+        ":config",
+        ":core_headers",
+        ":cycleclock_internal",
+        ":dynamic_annotations",
+        ":log_severity",
+        ":nullability",
+        ":raw_logging_internal",
+        ":spinlock_wait",
+        "//absl/meta:type_traits",
+    ],
+)
+
+cc_library(
+    name = "atomic_hook_test_helper",
+    testonly = True,
+    srcs = ["internal/atomic_hook_test_helper.cc"],
+    hdrs = ["internal/atomic_hook_test_helper.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":atomic_hook",
+        ":core_headers",
+    ],
+)
+
+cc_test(
+    name = "atomic_hook_test",
+    size = "small",
+    srcs = ["internal/atomic_hook_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":atomic_hook",
+        ":atomic_hook_test_helper",
+        ":core_headers",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "bit_cast_test",
+    size = "small",
+    srcs = [
+        "bit_cast_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":base",
+        ":core_headers",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "throw_delegate",
+    srcs = ["internal/throw_delegate.cc"],
+    hdrs = ["internal/throw_delegate.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":config",
+        ":raw_logging_internal",
+    ],
+)
+
+cc_test(
+    name = "throw_delegate_test",
+    srcs = ["throw_delegate_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        ":throw_delegate",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "errno_saver_test",
+    size = "small",
+    srcs = ["internal/errno_saver_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":errno_saver",
+        ":strerror",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "exception_testing",
+    testonly = True,
+    hdrs = ["internal/exception_testing.h"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":config",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_library(
+    name = "pretty_function",
+    hdrs = ["internal/pretty_function.h"],
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+)
+
+cc_library(
+    name = "exception_safety_testing",
+    testonly = True,
+    srcs = ["internal/exception_safety_testing.cc"],
+    hdrs = ["internal/exception_safety_testing.h"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        ":pretty_function",
+        "//absl/memory",
+        "//absl/meta:type_traits",
+        "//absl/strings",
+        "//absl/utility",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_test(
+    name = "exception_safety_testing_test",
+    srcs = ["exception_safety_testing_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":exception_safety_testing",
+        "//absl/memory",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "inline_variable_test",
+    size = "small",
+    srcs = [
+        "inline_variable_test.cc",
+        "inline_variable_test_a.cc",
+        "inline_variable_test_b.cc",
+        "internal/inline_variable_testing.h",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":base_internal",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "invoke_test",
+    size = "small",
+    srcs = ["invoke_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":base_internal",
+        "//absl/memory",
+        "//absl/strings",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+# Common test library made available for use in non-absl code that overrides
+# AbslInternalSpinLockDelay and AbslInternalSpinLockWake.
+cc_library(
+    name = "spinlock_test_common",
+    testonly = True,
+    srcs = ["spinlock_test_common.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":base",
+        ":base_internal",
+        ":config",
+        ":core_headers",
+        "//absl/synchronization",
+        "@com_google_googletest//:gtest",
+    ],
+    alwayslink = 1,
+)
+
+cc_test(
+    name = "spinlock_test",
+    size = "medium",
+    srcs = ["spinlock_test_common.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
+    deps = [
+        ":base",
+        ":base_internal",
+        ":config",
+        ":core_headers",
+        "//absl/synchronization",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "spinlock_benchmark_common",
+    testonly = True,
+    srcs = ["internal/spinlock_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl/base:__pkg__",
+    ],
+    deps = [
+        ":base",
+        ":base_internal",
+        ":no_destructor",
+        ":raw_logging_internal",
+        "//absl/synchronization",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+    alwayslink = 1,
+)
+
+cc_binary(
+    name = "spinlock_benchmark",
+    testonly = True,
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":spinlock_benchmark_common",
+    ],
+)
+
+cc_library(
+    name = "endian",
+    hdrs = [
+        "internal/endian.h",
+        "internal/unaligned_access.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":base",
+        ":config",
+        ":core_headers",
+        ":nullability",
+    ],
+)
+
+cc_test(
+    name = "endian_test",
+    srcs = ["internal/endian_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    deps = [
+        ":config",
+        ":endian",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "config_test",
+    srcs = ["config_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        "//absl/synchronization:thread_pool",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "call_once_test",
+    srcs = ["call_once_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":base",
+        ":core_headers",
+        "//absl/synchronization",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "no_destructor_test",
+    srcs = ["no_destructor_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        ":no_destructor",
+        ":raw_logging_internal",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_binary(
+    name = "no_destructor_benchmark",
+    testonly = True,
+    srcs = ["no_destructor_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":no_destructor",
+        ":raw_logging_internal",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
+
+cc_test(
+    name = "nullability_test",
+    srcs = ["nullability_test.cc"],
+    deps = [
+        ":core_headers",
+        ":nullability",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "raw_logging_test",
+    srcs = ["raw_logging_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":raw_logging_internal",
+        "//absl/strings",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "sysinfo_test",
+    size = "small",
+    srcs = ["internal/sysinfo_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":base",
+        "//absl/synchronization",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "low_level_alloc_test",
+    size = "medium",
+    srcs = ["internal/low_level_alloc_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_ios_x86_64",
+        "no_test_wasm",
+    ],
+    deps = [
+        ":malloc_internal",
+        "//absl/container:node_hash_map",
+    ],
+)
+
+cc_test(
+    name = "thread_identity_test",
+    size = "small",
+    srcs = ["internal/thread_identity_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = [
+        "no_test_wasm",
+    ],
+    deps = [
+        ":base",
+        ":core_headers",
+        "//absl/synchronization",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "thread_identity_benchmark",
+    srcs = ["internal/thread_identity_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":base",
+        "//absl/synchronization",
+        "@com_github_google_benchmark//:benchmark_main",
+        "@com_google_googletest//:gtest",
+    ],
+)
+
+cc_library(
+    name = "scoped_set_env",
+    testonly = True,
+    srcs = ["internal/scoped_set_env.cc"],
+    hdrs = ["internal/scoped_set_env.h"],
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":config",
+        ":raw_logging_internal",
+    ],
+)
+
+cc_test(
+    name = "scoped_set_env_test",
+    size = "small",
+    srcs = ["internal/scoped_set_env_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":scoped_set_env",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "log_severity_test",
+    size = "small",
+    srcs = ["log_severity_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":log_severity",
+        "//absl/flags:flag_internal",
+        "//absl/flags:marshalling",
+        "//absl/strings",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "strerror",
+    srcs = ["internal/strerror.cc"],
+    hdrs = ["internal/strerror.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":config",
+        ":core_headers",
+        ":errno_saver",
+    ],
+)
+
+cc_test(
+    name = "strerror_test",
+    size = "small",
+    srcs = ["internal/strerror_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":strerror",
+        "//absl/strings",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_binary(
+    name = "strerror_benchmark",
+    testonly = True,
+    srcs = ["internal/strerror_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":strerror",
+        "@com_github_google_benchmark//:benchmark_main",
+    ],
+)
+
+cc_library(
+    name = "fast_type_id",
+    hdrs = ["internal/fast_type_id.h"],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    visibility = [
+        "//absl:__subpackages__",
+    ],
+    deps = [
+        ":config",
+    ],
+)
+
+cc_test(
+    name = "fast_type_id_test",
+    size = "small",
+    srcs = ["internal/fast_type_id_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":fast_type_id",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_library(
+    name = "prefetch",
+    hdrs = [
+        "prefetch.h",
+    ],
+    copts = ABSL_DEFAULT_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":config",
+        ":core_headers",
+    ],
+)
+
+cc_test(
+    name = "prefetch_test",
+    size = "small",
+    srcs = [
+        "prefetch_test.cc",
+    ],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":prefetch",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "unique_small_name_test",
+    size = "small",
+    srcs = ["internal/unique_small_name_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    linkstatic = 1,
+    deps = [
+        ":core_headers",
+        "//absl/strings",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)
+
+cc_test(
+    name = "optimization_test",
+    size = "small",
+    srcs = ["optimization_test.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    deps = [
+        ":core_headers",
+        "//absl/types:optional",
+        "@com_google_googletest//:gtest",
+        "@com_google_googletest//:gtest_main",
+    ],
+)

+ 740 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/CMakeLists.txt

@@ -0,0 +1,740 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+find_library(LIBRT rt)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    atomic_hook
+  HDRS
+    "internal/atomic_hook.h"
+  DEPS
+    absl::config
+    absl::core_headers
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    errno_saver
+  HDRS
+    "internal/errno_saver.h"
+  DEPS
+    absl::config
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+)
+
+absl_cc_library(
+  NAME
+    log_severity
+  HDRS
+    "log_severity.h"
+  SRCS
+    "log_severity.cc"
+  DEPS
+    absl::config
+    absl::core_headers
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+)
+
+absl_cc_library(
+  NAME
+    no_destructor
+  HDRS
+    "no_destructor.h"
+  DEPS
+    absl::config
+    absl::nullability
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+)
+
+absl_cc_library(
+  NAME
+    nullability
+  HDRS
+    "nullability.h"
+  SRCS
+    "internal/nullability_impl.h"
+  DEPS
+    absl::core_headers
+    absl::type_traits
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+)
+
+absl_cc_test(
+  NAME
+    nullability_test
+  SRCS
+    "nullability_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::core_headers
+    absl::nullability
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    raw_logging_internal
+  HDRS
+    "internal/raw_logging.h"
+  SRCS
+    "internal/raw_logging.cc"
+  DEPS
+    absl::atomic_hook
+    absl::config
+    absl::core_headers
+    absl::errno_saver
+    absl::log_severity
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    spinlock_wait
+  HDRS
+    "internal/spinlock_wait.h"
+  SRCS
+    "internal/spinlock_akaros.inc"
+    "internal/spinlock_linux.inc"
+    "internal/spinlock_posix.inc"
+    "internal/spinlock_wait.cc"
+    "internal/spinlock_win32.inc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base_internal
+    absl::core_headers
+    absl::errno_saver
+)
+
+absl_cc_library(
+  NAME
+    config
+  HDRS
+    "config.h"
+    "options.h"
+    "policy_checks.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    dynamic_annotations
+  HDRS
+    "dynamic_annotations.h"
+  SRCS
+    "internal/dynamic_annotations.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+  PUBLIC
+)
+
+absl_cc_library(
+  NAME
+    core_headers
+  HDRS
+    "attributes.h"
+    "const_init.h"
+    "macros.h"
+    "optimization.h"
+    "port.h"
+    "thread_annotations.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+  PUBLIC
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    malloc_internal
+  HDRS
+    "internal/direct_mmap.h"
+    "internal/low_level_alloc.h"
+  SRCS
+    "internal/low_level_alloc.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base
+    absl::base_internal
+    absl::config
+    absl::core_headers
+    absl::dynamic_annotations
+    absl::raw_logging_internal
+    Threads::Threads
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    base_internal
+  HDRS
+    "internal/hide_ptr.h"
+    "internal/identity.h"
+    "internal/inline_variable.h"
+    "internal/invoke.h"
+    "internal/scheduling_mode.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::type_traits
+)
+
+absl_cc_library(
+  NAME
+    base
+  HDRS
+    "call_once.h"
+    "casts.h"
+    "internal/cycleclock.h"
+    "internal/cycleclock_config.h"
+    "internal/low_level_scheduling.h"
+    "internal/per_thread_tls.h"
+    "internal/spinlock.h"
+    "internal/sysinfo.h"
+    "internal/thread_identity.h"
+    "internal/tsan_mutex_interface.h"
+    "internal/unscaledcycleclock.h"
+    "internal/unscaledcycleclock_config.h"
+  SRCS
+    "internal/cycleclock.cc"
+    "internal/spinlock.cc"
+    "internal/sysinfo.cc"
+    "internal/thread_identity.cc"
+    "internal/unscaledcycleclock.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+    $<$<BOOL:${LIBRT}>:-lrt>
+    $<$<BOOL:${MINGW}>:-ladvapi32>
+  DEPS
+    absl::atomic_hook
+    absl::base_internal
+    absl::config
+    absl::core_headers
+    absl::dynamic_annotations
+    absl::log_severity
+    absl::nullability
+    absl::raw_logging_internal
+    absl::spinlock_wait
+    absl::type_traits
+    Threads::Threads
+  PUBLIC
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    throw_delegate
+  HDRS
+    "internal/throw_delegate.h"
+  SRCS
+    "internal/throw_delegate.cc"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::raw_logging_internal
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    exception_testing
+  HDRS
+    "internal/exception_testing.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    GTest::gtest
+  TESTONLY
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    pretty_function
+  HDRS
+    "internal/pretty_function.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    exception_safety_testing
+  HDRS
+    "internal/exception_safety_testing.h"
+  SRCS
+    "internal/exception_safety_testing.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::pretty_function
+    absl::memory
+    absl::meta
+    absl::strings
+    absl::utility
+    GTest::gtest
+  TESTONLY
+)
+
+absl_cc_test(
+  NAME
+    absl_exception_safety_testing_test
+  SRCS
+    "exception_safety_testing_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::exception_safety_testing
+    absl::memory
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    atomic_hook_test_helper
+  SRCS
+    "internal/atomic_hook_test_helper.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::atomic_hook
+    absl::core_headers
+  TESTONLY
+)
+
+absl_cc_test(
+  NAME
+    atomic_hook_test
+  SRCS
+    "internal/atomic_hook_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::atomic_hook_test_helper
+    absl::atomic_hook
+    absl::core_headers
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    bit_cast_test
+  SRCS
+    "bit_cast_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::core_headers
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    errno_saver_test
+  SRCS
+    "internal/errno_saver_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::errno_saver
+    absl::strerror
+    GTest::gmock
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    throw_delegate_test
+  SRCS
+    "throw_delegate_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::throw_delegate
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    inline_variable_test
+  SRCS
+    "internal/inline_variable_testing.h"
+    "inline_variable_test.cc"
+    "inline_variable_test_a.cc"
+    "inline_variable_test_b.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base_internal
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    invoke_test
+  SRCS
+    "invoke_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base_internal
+    absl::memory
+    absl::strings
+    GTest::gmock
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    spinlock_test_common
+  SRCS
+    "spinlock_test_common.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::base_internal
+    absl::core_headers
+    absl::synchronization
+    GTest::gtest
+  TESTONLY
+)
+
+# On bazel BUILD this target use "alwayslink = 1" which is not implemented here
+absl_cc_test(
+  NAME
+    spinlock_test
+  SRCS
+    "spinlock_test_common.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::base_internal
+    absl::config
+    absl::core_headers
+    absl::synchronization
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    endian
+  HDRS
+    "internal/endian.h"
+    "internal/unaligned_access.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::core_headers
+    absl::nullability
+  PUBLIC
+)
+
+absl_cc_test(
+  NAME
+    endian_test
+  SRCS
+    "internal/endian_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::config
+    absl::endian
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    config_test
+  SRCS
+    "config_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::config
+    absl::synchronization
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    call_once_test
+  SRCS
+    "call_once_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::core_headers
+    absl::synchronization
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    no_destructor_test
+  SRCS
+    "no_destructor_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::no_destructor
+    absl::config
+    absl::raw_logging_internal
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    raw_logging_test
+  SRCS
+    "raw_logging_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::raw_logging_internal
+    absl::strings
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    sysinfo_test
+  SRCS
+    "internal/sysinfo_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::synchronization
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    low_level_alloc_test
+  SRCS
+    "internal/low_level_alloc_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::malloc_internal
+    absl::node_hash_map
+    Threads::Threads
+)
+
+absl_cc_test(
+  NAME
+    thread_identity_test
+  SRCS
+    "internal/thread_identity_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+    absl::core_headers
+    absl::synchronization
+    Threads::Threads
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    scoped_set_env
+  SRCS
+    "internal/scoped_set_env.cc"
+  HDRS
+    "internal/scoped_set_env.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  DEPS
+    absl::config
+    absl::raw_logging_internal
+)
+
+absl_cc_test(
+  NAME
+    scoped_set_env_test
+  SRCS
+    "internal/scoped_set_env_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::scoped_set_env
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    cmake_thread_test
+  SRCS
+    "internal/cmake_thread_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::base
+)
+
+absl_cc_test(
+  NAME
+    log_severity_test
+  SRCS
+    "log_severity_test.cc"
+  DEPS
+    absl::flags_internal
+    absl::flags_marshalling
+    absl::log_severity
+    absl::strings
+    GTest::gmock
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    strerror
+  SRCS
+    "internal/strerror.cc"
+  HDRS
+    "internal/strerror.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+    absl::errno_saver
+)
+
+absl_cc_test(
+  NAME
+    strerror_test
+  SRCS
+    "internal/strerror_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::strerror
+    absl::strings
+    GTest::gmock
+    GTest::gtest_main
+)
+
+# Internal-only target, do not depend on directly.
+absl_cc_library(
+  NAME
+    fast_type_id
+  HDRS
+    "internal/fast_type_id.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+)
+
+absl_cc_test(
+  NAME
+    fast_type_id_test
+  SRCS
+    "internal/fast_type_id_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::fast_type_id
+    GTest::gtest_main
+)
+
+absl_cc_library(
+  NAME
+    prefetch
+  HDRS
+    "prefetch.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::config
+    absl::core_headers
+)
+
+absl_cc_test(
+  NAME
+    prefetch_test
+  SRCS
+    "prefetch_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::prefetch
+    GTest::gtest_main
+)
+
+absl_cc_test(
+  NAME
+    optimization_test
+  SRCS
+    "optimization_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  DEPS
+    absl::core_headers
+    absl::optional
+    GTest::gtest_main
+)

+ 952 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/attributes.h

@@ -0,0 +1,952 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This header file defines macros for declaring attributes for functions,
+// types, and variables.
+//
+// These macros are used within Abseil and allow the compiler to optimize, where
+// applicable, certain function calls.
+//
+// Most macros here are exposing GCC or Clang features, and are stubbed out for
+// other compilers.
+//
+// GCC attributes documentation:
+//   https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html
+//   https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html
+//   https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html
+//
+// Most attributes in this file are already supported by GCC 4.7. However, some
+// of them are not supported in older version of Clang. Thus, we check
+// `__has_attribute()` first. If the check fails, we check if we are on GCC and
+// assume the attribute exists on GCC (which is verified on GCC 4.7).
+
+#ifndef ABSL_BASE_ATTRIBUTES_H_
+#define ABSL_BASE_ATTRIBUTES_H_
+
+#include "absl/base/config.h"
+
+// ABSL_HAVE_ATTRIBUTE
+//
+// A function-like feature checking macro that is a wrapper around
+// `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a
+// nonzero constant integer if the attribute is supported or 0 if not.
+//
+// It evaluates to zero if `__has_attribute` is not defined by the compiler.
+//
+// GCC: https://gcc.gnu.org/gcc-5/changes.html
+// Clang: https://clang.llvm.org/docs/LanguageExtensions.html
+#ifdef __has_attribute
+#define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x)
+#else
+#define ABSL_HAVE_ATTRIBUTE(x) 0
+#endif
+
+// ABSL_HAVE_CPP_ATTRIBUTE
+//
+// A function-like feature checking macro that accepts C++11 style attributes.
+// It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6
+// (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't
+// find `__has_cpp_attribute`, will evaluate to 0.
+#if defined(__cplusplus) && defined(__has_cpp_attribute)
+// NOTE: requiring __cplusplus above should not be necessary, but
+// works around https://bugs.llvm.org/show_bug.cgi?id=23435.
+#define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
+#else
+#define ABSL_HAVE_CPP_ATTRIBUTE(x) 0
+#endif
+
+// -----------------------------------------------------------------------------
+// Function Attributes
+// -----------------------------------------------------------------------------
+//
+// GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+// Clang: https://clang.llvm.org/docs/AttributeReference.html
+
+// ABSL_PRINTF_ATTRIBUTE
+// ABSL_SCANF_ATTRIBUTE
+//
+// Tells the compiler to perform `printf` format string checking if the
+// compiler supports it; see the 'format' attribute in
+// <https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html>.
+//
+// Note: As the GCC manual states, "[s]ince non-static C++ methods
+// have an implicit 'this' argument, the arguments of such methods
+// should be counted from two, not one."
+#if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \
+  __attribute__((__format__(__printf__, string_index, first_to_check)))
+#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \
+  __attribute__((__format__(__scanf__, string_index, first_to_check)))
+#else
+#define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check)
+#define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check)
+#endif
+
+// ABSL_ATTRIBUTE_ALWAYS_INLINE
+// ABSL_ATTRIBUTE_NOINLINE
+//
+// Forces functions to either inline or not inline. Introduced in gcc 3.1.
+#if ABSL_HAVE_ATTRIBUTE(always_inline) || \
+    (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
+#define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1
+#else
+#define ABSL_ATTRIBUTE_ALWAYS_INLINE
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline))
+#define ABSL_HAVE_ATTRIBUTE_NOINLINE 1
+#else
+#define ABSL_ATTRIBUTE_NOINLINE
+#endif
+
+// ABSL_ATTRIBUTE_NO_TAIL_CALL
+//
+// Prevents the compiler from optimizing away stack frames for functions which
+// end in a call to another function.
+#if ABSL_HAVE_ATTRIBUTE(disable_tail_calls)
+#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
+#define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls))
+#elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__)
+#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1
+#define ABSL_ATTRIBUTE_NO_TAIL_CALL \
+  __attribute__((optimize("no-optimize-sibling-calls")))
+#else
+#define ABSL_ATTRIBUTE_NO_TAIL_CALL
+#define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0
+#endif
+
+// ABSL_ATTRIBUTE_WEAK
+//
+// Tags a function as weak for the purposes of compilation and linking.
+// Weak attributes did not work properly in LLVM's Windows backend before
+// 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598
+// for further information.
+// The MinGW compiler doesn't complain about the weak attribute until the link
+// step, presumably because Windows doesn't use ELF binaries.
+#if (ABSL_HAVE_ATTRIBUTE(weak) ||                                         \
+     (defined(__GNUC__) && !defined(__clang__))) &&                       \
+    (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \
+    !defined(__MINGW32__)
+#undef ABSL_ATTRIBUTE_WEAK
+#define ABSL_ATTRIBUTE_WEAK __attribute__((weak))
+#define ABSL_HAVE_ATTRIBUTE_WEAK 1
+#else
+#define ABSL_ATTRIBUTE_WEAK
+#define ABSL_HAVE_ATTRIBUTE_WEAK 0
+#endif
+
+// ABSL_ATTRIBUTE_NONNULL
+//
+// Tells the compiler either (a) that a particular function parameter
+// should be a non-null pointer, or (b) that all pointer arguments should
+// be non-null.
+//
+// Note: As the GCC manual states, "[s]ince non-static C++ methods
+// have an implicit 'this' argument, the arguments of such methods
+// should be counted from two, not one."
+//
+// Args are indexed starting at 1.
+//
+// For non-static class member functions, the implicit `this` argument
+// is arg 1, and the first explicit argument is arg 2. For static class member
+// functions, there is no implicit `this`, and the first explicit argument is
+// arg 1.
+//
+// Example:
+//
+//   /* arg_a cannot be null, but arg_b can */
+//   void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1);
+//
+//   class C {
+//     /* arg_a cannot be null, but arg_b can */
+//     void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2);
+//
+//     /* arg_a cannot be null, but arg_b can */
+//     static void StaticMethod(void* arg_a, void* arg_b)
+//     ABSL_ATTRIBUTE_NONNULL(1);
+//   };
+//
+// If no arguments are provided, then all pointer arguments should be non-null.
+//
+//  /* No pointer arguments may be null. */
+//  void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL();
+//
+// NOTE: The GCC nonnull attribute actually accepts a list of arguments, but
+// ABSL_ATTRIBUTE_NONNULL does not.
+#if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index)))
+#else
+#define ABSL_ATTRIBUTE_NONNULL(...)
+#endif
+
+// ABSL_ATTRIBUTE_NORETURN
+//
+// Tells the compiler that a given function never returns.
+#if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn))
+#elif defined(_MSC_VER)
+#define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn)
+#else
+#define ABSL_ATTRIBUTE_NORETURN
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
+//
+// Tells the AddressSanitizer (or other memory testing tools) to ignore a given
+// function. Useful for cases when a function reads random locations on stack,
+// calls _exit from a cloned subprocess, deliberately accesses buffer
+// out of bounds or does other scary things with memory.
+// NOTE: GCC supports AddressSanitizer(asan) since 4.8.
+// https://gcc.gnu.org/gcc-4.8/changes.html
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) && \
+    ABSL_HAVE_ATTRIBUTE(no_sanitize_address)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) && defined(_MSC_VER) && \
+    _MSC_VER >= 1928
+// https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address)
+#elif defined(ABSL_HAVE_HWADDRESS_SANITIZER) && ABSL_HAVE_ATTRIBUTE(no_sanitize)
+// HWAddressSanitizer is a sanitizer similar to AddressSanitizer, which uses CPU
+// features to detect similar bugs with less CPU and memory overhead.
+// NOTE: GCC supports HWAddressSanitizer(hwasan) since 11.
+// https://gcc.gnu.org/gcc-11/changes.html
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS \
+  __attribute__((no_sanitize("hwaddress")))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+//
+// Tells the MemorySanitizer to relax the handling of a given function. All "Use
+// of uninitialized value" warnings from such functions will be suppressed, and
+// all values loaded from memory will be considered fully initialized.  This
+// attribute is similar to the ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS attribute
+// above, but deals with initialized-ness rather than addressability issues.
+// NOTE: MemorySanitizer(msan) is supported by Clang but not GCC.
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize_memory)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_THREAD
+//
+// Tells the ThreadSanitizer to not instrument a given function.
+// NOTE: GCC supports ThreadSanitizer(tsan) since 4.8.
+// https://gcc.gnu.org/gcc-4.8/changes.html
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize_thread)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
+//
+// Tells the UndefinedSanitizer to ignore a given function. Useful for cases
+// where certain behavior (eg. division by zero) is being used intentionally.
+// NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9.
+// https://gcc.gnu.org/gcc-4.9/changes.html
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
+  __attribute__((no_sanitize_undefined))
+#elif ABSL_HAVE_ATTRIBUTE(no_sanitize)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \
+  __attribute__((no_sanitize("undefined")))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_CFI
+//
+// Tells the ControlFlowIntegrity sanitizer to not instrument a given function.
+// See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details.
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize) && defined(__llvm__)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi")))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_CFI
+#endif
+
+// ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK
+//
+// Tells the SafeStack to not instrument a given function.
+// See https://clang.llvm.org/docs/SafeStack.html for details.
+#if ABSL_HAVE_ATTRIBUTE(no_sanitize)
+#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \
+  __attribute__((no_sanitize("safe-stack")))
+#else
+#define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK
+#endif
+
+// ABSL_ATTRIBUTE_RETURNS_NONNULL
+//
+// Tells the compiler that a particular function never returns a null pointer.
+#if ABSL_HAVE_ATTRIBUTE(returns_nonnull)
+#define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull))
+#else
+#define ABSL_ATTRIBUTE_RETURNS_NONNULL
+#endif
+
+// ABSL_HAVE_ATTRIBUTE_SECTION
+//
+// Indicates whether labeled sections are supported. Weak symbol support is
+// a prerequisite. Labeled sections are not supported on Darwin/iOS.
+#ifdef ABSL_HAVE_ATTRIBUTE_SECTION
+#error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set
+#elif (ABSL_HAVE_ATTRIBUTE(section) ||                \
+       (defined(__GNUC__) && !defined(__clang__))) && \
+    !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK
+#define ABSL_HAVE_ATTRIBUTE_SECTION 1
+
+// ABSL_ATTRIBUTE_SECTION
+//
+// Tells the compiler/linker to put a given function into a section and define
+// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section.
+// This functionality is supported by GNU linker.  Any function annotated with
+// `ABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into
+// whatever section its caller is placed into.
+//
+#ifndef ABSL_ATTRIBUTE_SECTION
+#define ABSL_ATTRIBUTE_SECTION(name) \
+  __attribute__((section(#name))) __attribute__((noinline))
+#endif
+
+// ABSL_ATTRIBUTE_SECTION_VARIABLE
+//
+// Tells the compiler/linker to put a given variable into a section and define
+// `__start_ ## name` and `__stop_ ## name` symbols to bracket the section.
+// This functionality is supported by GNU linker.
+#ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE
+#ifdef _AIX
+// __attribute__((section(#name))) on AIX is achieved by using the `.csect`
+// psudo op which includes an additional integer as part of its syntax indcating
+// alignment. If data fall under different alignments then you might get a
+// compilation error indicating a `Section type conflict`.
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
+#else
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name)))
+#endif
+#endif
+
+// ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
+//
+// A weak section declaration to be used as a global declaration
+// for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link
+// even without functions with ABSL_ATTRIBUTE_SECTION(name).
+// ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's
+// a no-op on ELF but not on Mach-O.
+//
+#ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS
+#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name)   \
+  extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \
+  extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK
+#endif
+#ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS
+#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name)
+#endif
+
+// ABSL_ATTRIBUTE_SECTION_START
+//
+// Returns `void*` pointers to start/end of a section of code with
+// functions having ABSL_ATTRIBUTE_SECTION(name).
+// Returns 0 if no such functions exist.
+// One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and
+// link.
+//
+#define ABSL_ATTRIBUTE_SECTION_START(name) \
+  (reinterpret_cast<void *>(__start_##name))
+#define ABSL_ATTRIBUTE_SECTION_STOP(name) \
+  (reinterpret_cast<void *>(__stop_##name))
+
+#else  // !ABSL_HAVE_ATTRIBUTE_SECTION
+
+#define ABSL_HAVE_ATTRIBUTE_SECTION 0
+
+// provide dummy definitions
+#define ABSL_ATTRIBUTE_SECTION(name)
+#define ABSL_ATTRIBUTE_SECTION_VARIABLE(name)
+#define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name)
+#define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast<void *>(0))
+#define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast<void *>(0))
+
+#endif  // ABSL_ATTRIBUTE_SECTION
+
+// ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+//
+// Support for aligning the stack on 32-bit x86.
+#if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \
+    (defined(__GNUC__) && !defined(__clang__))
+#if defined(__i386__)
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \
+  __attribute__((force_align_arg_pointer))
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
+#elif defined(__x86_64__)
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1)
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+#else  // !__i386__ && !__x86_64
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+#endif  // __i386__
+#else
+#define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC
+#define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0)
+#endif
+
+// ABSL_MUST_USE_RESULT
+//
+// Tells the compiler to warn about unused results.
+//
+// For code or headers that are assured to only build with C++17 and up, prefer
+// just using the standard `[[nodiscard]]` directly over this macro.
+//
+// When annotating a function, it must appear as the first part of the
+// declaration or definition. The compiler will warn if the return value from
+// such a function is unused:
+//
+//   ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket();
+//   AllocateSprocket();  // Triggers a warning.
+//
+// When annotating a class, it is equivalent to annotating every function which
+// returns an instance.
+//
+//   class ABSL_MUST_USE_RESULT Sprocket {};
+//   Sprocket();  // Triggers a warning.
+//
+//   Sprocket MakeSprocket();
+//   MakeSprocket();  // Triggers a warning.
+//
+// Note that references and pointers are not instances:
+//
+//   Sprocket* SprocketPointer();
+//   SprocketPointer();  // Does *not* trigger a warning.
+//
+// ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result
+// warning. For that, warn_unused_result is used only for clang but not for gcc.
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425
+//
+// Note: past advice was to place the macro after the argument list.
+//
+// TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is
+// compliant with the stricter [[nodiscard]].
+#if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result)
+#define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result))
+#else
+#define ABSL_MUST_USE_RESULT
+#endif
+
+// ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD
+//
+// Tells GCC that a function is hot or cold. GCC can use this information to
+// improve static analysis, i.e. a conditional branch to a cold function
+// is likely to be not-taken.
+// This annotation is used for function declarations.
+//
+// Example:
+//
+//   int foo() ABSL_ATTRIBUTE_HOT;
+#if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_HOT __attribute__((hot))
+#else
+#define ABSL_ATTRIBUTE_HOT
+#endif
+
+#if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_COLD __attribute__((cold))
+#else
+#define ABSL_ATTRIBUTE_COLD
+#endif
+
+// ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS
+//
+// We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT
+// macro used as an attribute to mark functions that must always or never be
+// instrumented by XRay. Currently, this is only supported in Clang/LLVM.
+//
+// For reference on the LLVM XRay instrumentation, see
+// http://llvm.org/docs/XRay.html.
+//
+// A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration
+// will always get the XRay instrumentation sleds. These sleds may introduce
+// some binary size and runtime overhead and must be used sparingly.
+//
+// These attributes only take effect when the following conditions are met:
+//
+//   * The file/target is built in at least C++11 mode, with a Clang compiler
+//     that supports XRay attributes.
+//   * The file/target is built with the -fxray-instrument flag set for the
+//     Clang/LLVM compiler.
+//   * The function is defined in the translation unit (the compiler honors the
+//     attribute in either the definition or the declaration, and must match).
+//
+// There are cases when, even when building with XRay instrumentation, users
+// might want to control specifically which functions are instrumented for a
+// particular build using special-case lists provided to the compiler. These
+// special case lists are provided to Clang via the
+// -fxray-always-instrument=... and -fxray-never-instrument=... flags. The
+// attributes in source take precedence over these special-case lists.
+//
+// To disable the XRay attributes at build-time, users may define
+// ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific
+// packages/targets, as this may lead to conflicting definitions of functions at
+// link-time.
+//
+// XRay isn't currently supported on Android:
+// https://github.com/android/ndk/issues/368
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \
+    !defined(ABSL_NO_XRAY_ATTRIBUTES) && !defined(__ANDROID__)
+#define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]]
+#define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]]
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args)
+#define ABSL_XRAY_LOG_ARGS(N) \
+  [[clang::xray_always_instrument, clang::xray_log_args(N)]]
+#else
+#define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]]
+#endif
+#else
+#define ABSL_XRAY_ALWAYS_INSTRUMENT
+#define ABSL_XRAY_NEVER_INSTRUMENT
+#define ABSL_XRAY_LOG_ARGS(N)
+#endif
+
+// ABSL_ATTRIBUTE_REINITIALIZES
+//
+// Indicates that a member function reinitializes the entire object to a known
+// state, independent of the previous state of the object.
+//
+// The clang-tidy check bugprone-use-after-move allows member functions marked
+// with this attribute to be called on objects that have been moved from;
+// without the attribute, this would result in a use-after-move warning.
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes)
+#define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]]
+#else
+#define ABSL_ATTRIBUTE_REINITIALIZES
+#endif
+
+// -----------------------------------------------------------------------------
+// Variable Attributes
+// -----------------------------------------------------------------------------
+
+// ABSL_ATTRIBUTE_UNUSED
+//
+// Prevents the compiler from complaining about variables that appear unused.
+//
+// For code or headers that are assured to only build with C++17 and up, prefer
+// just using the standard '[[maybe_unused]]' directly over this macro.
+//
+// Due to differences in positioning requirements between the old, compiler
+// specific __attribute__ syntax and the now standard [[maybe_unused]], this
+// macro does not attempt to take advantage of '[[maybe_unused]]'.
+#if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__))
+#undef ABSL_ATTRIBUTE_UNUSED
+#define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__))
+#else
+#define ABSL_ATTRIBUTE_UNUSED
+#endif
+
+// ABSL_ATTRIBUTE_INITIAL_EXEC
+//
+// Tells the compiler to use "initial-exec" mode for a thread-local variable.
+// See http://people.redhat.com/drepper/tls.pdf for the gory details.
+#if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec")))
+#else
+#define ABSL_ATTRIBUTE_INITIAL_EXEC
+#endif
+
+// ABSL_ATTRIBUTE_PACKED
+//
+// Instructs the compiler not to use natural alignment for a tagged data
+// structure, but instead to reduce its alignment to 1.
+//
+// Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing
+// so can cause atomic variables to be mis-aligned and silently violate
+// atomicity on x86.
+//
+// This attribute can either be applied to members of a structure or to a
+// structure in its entirety. Applying this attribute (judiciously) to a
+// structure in its entirety to optimize the memory footprint of very
+// commonly-used structs is fine. Do not apply this attribute to a structure in
+// its entirety if the purpose is to control the offsets of the members in the
+// structure. Instead, apply this attribute only to structure members that need
+// it.
+//
+// When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the
+// natural alignment of structure members not annotated is preserved. Aligned
+// member accesses are faster than non-aligned member accesses even if the
+// targeted microprocessor supports non-aligned accesses.
+#if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__))
+#else
+#define ABSL_ATTRIBUTE_PACKED
+#endif
+
+// ABSL_ATTRIBUTE_FUNC_ALIGN
+//
+// Tells the compiler to align the function start at least to certain
+// alignment boundary
+#if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))
+#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes)))
+#else
+#define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes)
+#endif
+
+// ABSL_FALLTHROUGH_INTENDED
+//
+// Annotates implicit fall-through between switch labels, allowing a case to
+// indicate intentional fallthrough and turn off warnings about any lack of a
+// `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by
+// a semicolon and can be used in most places where `break` can, provided that
+// no statements exist between it and the next switch label.
+//
+// Example:
+//
+//  switch (x) {
+//    case 40:
+//    case 41:
+//      if (truth_is_out_there) {
+//        ++x;
+//        ABSL_FALLTHROUGH_INTENDED;  // Use instead of/along with annotations
+//                                    // in comments
+//      } else {
+//        return x;
+//      }
+//    case 42:
+//      ...
+//
+// Notes: When supported, GCC and Clang can issue a warning on switch labels
+// with unannotated fallthrough using the warning `-Wimplicit-fallthrough`. See
+// clang documentation on language extensions for details:
+// https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
+//
+// When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has
+// no effect on diagnostics. In any case this macro has no effect on runtime
+// behavior and performance of code.
+
+#ifdef ABSL_FALLTHROUGH_INTENDED
+#error "ABSL_FALLTHROUGH_INTENDED should not be defined."
+#elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough)
+#define ABSL_FALLTHROUGH_INTENDED [[fallthrough]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::fallthrough)
+#define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::fallthrough)
+#define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]]
+#else
+#define ABSL_FALLTHROUGH_INTENDED \
+  do {                            \
+  } while (0)
+#endif
+
+// ABSL_DEPRECATED()
+//
+// Marks a deprecated class, struct, enum, function, method and variable
+// declarations. The macro argument is used as a custom diagnostic message (e.g.
+// suggestion of a better alternative).
+//
+// For code or headers that are assured to only build with C++14 and up, prefer
+// just using the standard `[[deprecated("message")]]` directly over this macro.
+//
+// Examples:
+//
+//   class ABSL_DEPRECATED("Use Bar instead") Foo {...};
+//
+//   ABSL_DEPRECATED("Use Baz() instead") void Bar() {...}
+//
+//   template <typename T>
+//   ABSL_DEPRECATED("Use DoThat() instead")
+//   void DoThis();
+//
+//   enum FooEnum {
+//     kBar ABSL_DEPRECATED("Use kBaz instead"),
+//   };
+//
+// Every usage of a deprecated entity will trigger a warning when compiled with
+// GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain
+// turns this warning off by default, instead relying on clang-tidy to report
+// new uses of deprecated code.
+#if ABSL_HAVE_ATTRIBUTE(deprecated)
+#define ABSL_DEPRECATED(message) __attribute__((deprecated(message)))
+#else
+#define ABSL_DEPRECATED(message)
+#endif
+
+// When deprecating Abseil code, it is sometimes necessary to turn off the
+// warning within Abseil, until the deprecated code is actually removed. The
+// deprecated code can be surrounded with these directives to achieve that
+// result.
+//
+// class ABSL_DEPRECATED("Use Bar instead") Foo;
+//
+// ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+// Baz ComputeBazFromFoo(Foo f);
+// ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
+#if defined(__GNUC__) || defined(__clang__)
+// Clang also supports these GCC pragmas.
+#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \
+  _Pragma("GCC diagnostic push")             \
+  _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
+#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
+  _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING \
+  _Pragma("warning(push)") _Pragma("warning(disable: 4996)")
+#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING \
+  _Pragma("warning(pop)")
+#else
+#define ABSL_INTERNAL_DISABLE_DEPRECATED_DECLARATION_WARNING
+#define ABSL_INTERNAL_RESTORE_DEPRECATED_DECLARATION_WARNING
+#endif  // defined(__GNUC__) || defined(__clang__)
+
+// ABSL_CONST_INIT
+//
+// A variable declaration annotated with the `ABSL_CONST_INIT` attribute will
+// not compile (on supported platforms) unless the variable has a constant
+// initializer. This is useful for variables with static and thread storage
+// duration, because it guarantees that they will not suffer from the so-called
+// "static init order fiasco".
+//
+// This attribute must be placed on the initializing declaration of the
+// variable. Some compilers will give a -Wmissing-constinit warning when this
+// attribute is placed on some other declaration but missing from the
+// initializing declaration.
+//
+// In some cases (notably with thread_local variables), `ABSL_CONST_INIT` can
+// also be used in a non-initializing declaration to tell the compiler that a
+// variable is already initialized, reducing overhead that would otherwise be
+// incurred by a hidden guard variable. Thus annotating all declarations with
+// this attribute is recommended to potentially enhance optimization.
+//
+// Example:
+//
+//   class MyClass {
+//    public:
+//     ABSL_CONST_INIT static MyType my_var;
+//   };
+//
+//   ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...);
+//
+// For code or headers that are assured to only build with C++20 and up, prefer
+// just using the standard `constinit` keyword directly over this macro.
+//
+// Note that this attribute is redundant if the variable is declared constexpr.
+#if defined(__cpp_constinit) && __cpp_constinit >= 201907L
+#define ABSL_CONST_INIT constinit
+#elif ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization)
+#define ABSL_CONST_INIT [[clang::require_constant_initialization]]
+#else
+#define ABSL_CONST_INIT
+#endif
+
+// ABSL_ATTRIBUTE_PURE_FUNCTION
+//
+// ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure"
+// functions. A function is pure if its return value is only a function of its
+// arguments. The pure attribute prohibits a function from modifying the state
+// of the program that is observable by means other than inspecting the
+// function's return value. Declaring such functions with the pure attribute
+// allows the compiler to avoid emitting some calls in repeated invocations of
+// the function with the same argument values.
+//
+// Example:
+//
+//  ABSL_ATTRIBUTE_PURE_FUNCTION std::string FormatTime(Time t);
+#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure)
+#define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]]
+#elif ABSL_HAVE_ATTRIBUTE(pure)
+#define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure))
+#else
+// If the attribute isn't defined, we'll fallback to ABSL_MUST_USE_RESULT since
+// pure functions are useless if its return is ignored.
+#define ABSL_ATTRIBUTE_PURE_FUNCTION ABSL_MUST_USE_RESULT
+#endif
+
+// ABSL_ATTRIBUTE_CONST_FUNCTION
+//
+// ABSL_ATTRIBUTE_CONST_FUNCTION is used to annotate declarations of "const"
+// functions. A const function is similar to a pure function, with one
+// exception: Pure functions may return value that depend on a non-volatile
+// object that isn't provided as a function argument, while the const function
+// is guaranteed to return the same result given the same arguments.
+//
+// Example:
+//
+//  ABSL_ATTRIBUTE_CONST_FUNCTION int64_t ToInt64Milliseconds(Duration d);
+#if defined(_MSC_VER) && !defined(__clang__)
+// Put the MSVC case first since MSVC seems to parse const as a C++ keyword.
+#define ABSL_ATTRIBUTE_CONST_FUNCTION ABSL_ATTRIBUTE_PURE_FUNCTION
+#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::const)
+#define ABSL_ATTRIBUTE_CONST_FUNCTION [[gnu::const]]
+#elif ABSL_HAVE_ATTRIBUTE(const)
+#define ABSL_ATTRIBUTE_CONST_FUNCTION __attribute__((const))
+#else
+// Since const functions are more restrictive pure function, we'll fallback to a
+// pure function if the const attribute is not handled.
+#define ABSL_ATTRIBUTE_CONST_FUNCTION ABSL_ATTRIBUTE_PURE_FUNCTION
+#endif
+
+// ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function
+// parameter or implicit object parameter is retained by the return value of the
+// annotated function (or, for a parameter of a constructor, in the value of the
+// constructed object). This attribute causes warnings to be produced if a
+// temporary object does not live long enough.
+//
+// When applied to a reference parameter, the referenced object is assumed to be
+// retained by the return value of the function. When applied to a non-reference
+// parameter (for example, a pointer or a class type), all temporaries
+// referenced by the parameter are assumed to be retained by the return value of
+// the function.
+//
+// See also the upstream documentation:
+// https://clang.llvm.org/docs/AttributeReference.html#lifetimebound
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound)
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]]
+#elif ABSL_HAVE_ATTRIBUTE(lifetimebound)
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound))
+#else
+#define ABSL_ATTRIBUTE_LIFETIME_BOUND
+#endif
+
+// ABSL_INTERNAL_ATTRIBUTE_VIEW indicates that a type acts like a view i.e. a
+// raw (non-owning) pointer. This enables diagnoses similar to those enabled by
+// ABSL_ATTRIBUTE_LIFETIME_BOUND.
+//
+// See the following links for details:
+// https://reviews.llvm.org/D64448
+// https://lists.llvm.org/pipermail/cfe-dev/2018-November/060355.html
+#if ABSL_HAVE_CPP_ATTRIBUTE(gsl::Pointer)
+#define ABSL_INTERNAL_ATTRIBUTE_VIEW [[gsl::Pointer]]
+#else
+#define ABSL_INTERNAL_ATTRIBUTE_VIEW
+#endif
+
+// ABSL_INTERNAL_ATTRIBUTE_OWNER indicates that a type acts like a smart
+// (owning) pointer. This enables diagnoses similar to those enabled by
+// ABSL_ATTRIBUTE_LIFETIME_BOUND.
+//
+// See the following links for details:
+// https://reviews.llvm.org/D64448
+// https://lists.llvm.org/pipermail/cfe-dev/2018-November/060355.html
+#if ABSL_HAVE_CPP_ATTRIBUTE(gsl::Owner)
+#define ABSL_INTERNAL_ATTRIBUTE_OWNER [[gsl::Owner]]
+#else
+#define ABSL_INTERNAL_ATTRIBUTE_OWNER
+#endif
+
+// ABSL_ATTRIBUTE_TRIVIAL_ABI
+// Indicates that a type is "trivially relocatable" -- meaning it can be
+// relocated without invoking the constructor/destructor, using a form of move
+// elision.
+//
+// From a memory safety point of view, putting aside destructor ordering, it's
+// safe to apply ABSL_ATTRIBUTE_TRIVIAL_ABI if an object's location
+// can change over the course of its lifetime: if a constructor can be run one
+// place, and then the object magically teleports to another place where some
+// methods are run, and then the object teleports to yet another place where it
+// is destroyed. This is notably not true for self-referential types, where the
+// move-constructor must keep the self-reference up to date. If the type changed
+// location without invoking the move constructor, it would have a dangling
+// self-reference.
+//
+// The use of this teleporting machinery means that the number of paired
+// move/destroy operations can change, and so it is a bad idea to apply this to
+// a type meant to count the number of moves.
+//
+// Warning: applying this can, rarely, break callers. Objects passed by value
+// will be destroyed at the end of the call, instead of the end of the
+// full-expression containing the call. In addition, it changes the ABI
+// of functions accepting this type by value (e.g. to pass in registers).
+//
+// See also the upstream documentation:
+// https://clang.llvm.org/docs/AttributeReference.html#trivial-abi
+//
+// b/321691395 - This is currently disabled in open-source builds since
+// compiler support differs. If system libraries compiled with GCC are mixed
+// with libraries compiled with Clang, types will have different ideas about
+// their ABI, leading to hard to debug crashes.
+#define ABSL_ATTRIBUTE_TRIVIAL_ABI
+
+// ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
+//
+// Indicates a data member can be optimized to occupy no space (if it is empty)
+// and/or its tail padding can be used for other members.
+//
+// For code that is assured to only build with C++20 or later, prefer using
+// the standard attribute `[[no_unique_address]]` directly instead of this
+// macro.
+//
+// https://devblogs.microsoft.com/cppblog/msvc-cpp20-and-the-std-cpp20-switch/#c20-no_unique_address
+// Current versions of MSVC have disabled `[[no_unique_address]]` since it
+// breaks ABI compatibility, but offers `[[msvc::no_unique_address]]` for
+// situations when it can be assured that it is desired. Since Abseil does not
+// claim ABI compatibility in mixed builds, we can offer it unconditionally.
+#if defined(_MSC_VER) && _MSC_VER >= 1929
+#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(no_unique_address)
+#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS [[no_unique_address]]
+#else
+#define ABSL_ATTRIBUTE_NO_UNIQUE_ADDRESS
+#endif
+
+// ABSL_ATTRIBUTE_UNINITIALIZED
+//
+// GCC and Clang support a flag `-ftrivial-auto-var-init=<option>` (<option>
+// can be "zero" or "pattern") that can be used to initialize automatic stack
+// variables. Variables with this attribute will be left uninitialized,
+// overriding the compiler flag.
+//
+// See https://clang.llvm.org/docs/AttributeReference.html#uninitialized
+// and https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::uninitialized)
+#define ABSL_ATTRIBUTE_UNINITIALIZED [[clang::uninitialized]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::uninitialized)
+#define ABSL_ATTRIBUTE_UNINITIALIZED [[gnu::uninitialized]]
+#elif ABSL_HAVE_ATTRIBUTE(uninitialized)
+#define ABSL_ATTRIBUTE_UNINITIALIZED __attribute__((uninitialized))
+#else
+#define ABSL_ATTRIBUTE_UNINITIALIZED
+#endif
+
+// ABSL_ATTRIBUTE_WARN_UNUSED
+//
+// Compilers routinely warn about trivial variables that are unused.  For
+// non-trivial types, this warning is suppressed since the
+// constructor/destructor may be intentional and load-bearing, for example, with
+// a RAII scoped lock.
+//
+// For example:
+//
+// class ABSL_ATTRIBUTE_WARN_UNUSED MyType {
+//  public:
+//   MyType();
+//   ~MyType();
+// };
+//
+// void foo() {
+//   // Warns with ABSL_ATTRIBUTE_WARN_UNUSED attribute present.
+//   MyType unused;
+// }
+//
+// See https://clang.llvm.org/docs/AttributeReference.html#warn-unused and
+// https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html#index-warn_005funused-type-attribute
+#if ABSL_HAVE_CPP_ATTRIBUTE(gnu::warn_unused)
+#define ABSL_ATTRIBUTE_WARN_UNUSED [[gnu::warn_unused]]
+#else
+#define ABSL_ATTRIBUTE_WARN_UNUSED
+#endif
+
+#endif  // ABSL_BASE_ATTRIBUTES_H_

+ 109 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/bit_cast_test.cc

@@ -0,0 +1,109 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Unit test for bit_cast template.
+
+#include <cstdint>
+#include <cstring>
+
+#include "gtest/gtest.h"
+#include "absl/base/casts.h"
+#include "absl/base/macros.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+template <int N>
+struct marshall { char buf[N]; };
+
+template <typename T>
+void TestMarshall(const T values[], int num_values) {
+  for (int i = 0; i < num_values; ++i) {
+    T t0 = values[i];
+    marshall<sizeof(T)> m0 = absl::bit_cast<marshall<sizeof(T)> >(t0);
+    T t1 = absl::bit_cast<T>(m0);
+    marshall<sizeof(T)> m1 = absl::bit_cast<marshall<sizeof(T)> >(t1);
+    ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T)));
+    ASSERT_EQ(0, memcmp(&m0, &m1, sizeof(T)));
+  }
+}
+
+// Convert back and forth to an integral type.  The C++ standard does
+// not guarantee this will work, but we test that this works on all the
+// platforms we support.
+//
+// Likewise, we below make assumptions about sizeof(float) and
+// sizeof(double) which the standard does not guarantee, but which hold on the
+// platforms we support.
+
+template <typename T, typename I>
+void TestIntegral(const T values[], int num_values) {
+  for (int i = 0; i < num_values; ++i) {
+    T t0 = values[i];
+    I i0 = absl::bit_cast<I>(t0);
+    T t1 = absl::bit_cast<T>(i0);
+    I i1 = absl::bit_cast<I>(t1);
+    ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T)));
+    ASSERT_EQ(i0, i1);
+  }
+}
+
+TEST(BitCast, Bool) {
+  static const bool bool_list[] = { false, true };
+  TestMarshall<bool>(bool_list, ABSL_ARRAYSIZE(bool_list));
+}
+
+TEST(BitCast, Int32) {
+  static const int32_t int_list[] =
+    { 0, 1, 100, 2147483647, -1, -100, -2147483647, -2147483647-1 };
+  TestMarshall<int32_t>(int_list, ABSL_ARRAYSIZE(int_list));
+}
+
+TEST(BitCast, Int64) {
+  static const int64_t int64_list[] =
+    { 0, 1, 1LL << 40, -1, -(1LL<<40) };
+  TestMarshall<int64_t>(int64_list, ABSL_ARRAYSIZE(int64_list));
+}
+
+TEST(BitCast, Uint64) {
+  static const uint64_t uint64_list[] =
+    { 0, 1, 1LLU << 40, 1LLU << 63 };
+  TestMarshall<uint64_t>(uint64_list, ABSL_ARRAYSIZE(uint64_list));
+}
+
+TEST(BitCast, Float) {
+  static const float float_list[] =
+    { 0.0f, 1.0f, -1.0f, 10.0f, -10.0f,
+      1e10f, 1e20f, 1e-10f, 1e-20f,
+      2.71828f, 3.14159f };
+  TestMarshall<float>(float_list, ABSL_ARRAYSIZE(float_list));
+  TestIntegral<float, int>(float_list, ABSL_ARRAYSIZE(float_list));
+  TestIntegral<float, unsigned>(float_list, ABSL_ARRAYSIZE(float_list));
+}
+
+TEST(BitCast, Double) {
+  static const double double_list[] =
+    { 0.0, 1.0, -1.0, 10.0, -10.0,
+      1e10, 1e100, 1e-10, 1e-100,
+      2.718281828459045,
+      3.141592653589793238462643383279502884197169399375105820974944 };
+  TestMarshall<double>(double_list, ABSL_ARRAYSIZE(double_list));
+  TestIntegral<double, int64_t>(double_list, ABSL_ARRAYSIZE(double_list));
+  TestIntegral<double, uint64_t>(double_list, ABSL_ARRAYSIZE(double_list));
+}
+
+}  // namespace
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 225 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/call_once.h

@@ -0,0 +1,225 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: call_once.h
+// -----------------------------------------------------------------------------
+//
+// This header file provides an Abseil version of `std::call_once` for invoking
+// a given function at most once, across all threads. This Abseil version is
+// faster than the C++11 version and incorporates the C++17 argument-passing
+// fix, so that (for example) non-const references may be passed to the invoked
+// function.
+
+#ifndef ABSL_BASE_CALL_ONCE_H_
+#define ABSL_BASE_CALL_ONCE_H_
+
+#include <algorithm>
+#include <atomic>
+#include <cstdint>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/invoke.h"
+#include "absl/base/internal/low_level_scheduling.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/internal/spinlock_wait.h"
+#include "absl/base/macros.h"
+#include "absl/base/nullability.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+class once_flag;
+
+namespace base_internal {
+absl::Nonnull<std::atomic<uint32_t>*> ControlWord(
+    absl::Nonnull<absl::once_flag*> flag);
+}  // namespace base_internal
+
+// call_once()
+//
+// For all invocations using a given `once_flag`, invokes a given `fn` exactly
+// once across all threads. The first call to `call_once()` with a particular
+// `once_flag` argument (that does not throw an exception) will run the
+// specified function with the provided `args`; other calls with the same
+// `once_flag` argument will not run the function, but will wait
+// for the provided function to finish running (if it is still running).
+//
+// This mechanism provides a safe, simple, and fast mechanism for one-time
+// initialization in a multi-threaded process.
+//
+// Example:
+//
+// class MyInitClass {
+//  public:
+//  ...
+//  mutable absl::once_flag once_;
+//
+//  MyInitClass* init() const {
+//    absl::call_once(once_, &MyInitClass::Init, this);
+//    return ptr_;
+//  }
+//
+template <typename Callable, typename... Args>
+void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args);
+
+// once_flag
+//
+// Objects of this type are used to distinguish calls to `call_once()` and
+// ensure the provided function is only invoked once across all threads. This
+// type is not copyable or movable. However, it has a `constexpr`
+// constructor, and is safe to use as a namespace-scoped global variable.
+class once_flag {
+ public:
+  constexpr once_flag() : control_(0) {}
+  once_flag(const once_flag&) = delete;
+  once_flag& operator=(const once_flag&) = delete;
+
+ private:
+  friend absl::Nonnull<std::atomic<uint32_t>*> base_internal::ControlWord(
+      absl::Nonnull<once_flag*> flag);
+  std::atomic<uint32_t> control_;
+};
+
+//------------------------------------------------------------------------------
+// End of public interfaces.
+// Implementation details follow.
+//------------------------------------------------------------------------------
+
+namespace base_internal {
+
+// Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to
+// initialize entities used by the scheduler implementation.
+template <typename Callable, typename... Args>
+void LowLevelCallOnce(absl::Nonnull<absl::once_flag*> flag, Callable&& fn,
+                      Args&&... args);
+
+// Disables scheduling while on stack when scheduling mode is non-cooperative.
+// No effect for cooperative scheduling modes.
+class SchedulingHelper {
+ public:
+  explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) {
+    if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
+      guard_result_ = base_internal::SchedulingGuard::DisableRescheduling();
+    }
+  }
+
+  ~SchedulingHelper() {
+    if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) {
+      base_internal::SchedulingGuard::EnableRescheduling(guard_result_);
+    }
+  }
+
+ private:
+  base_internal::SchedulingMode mode_;
+  bool guard_result_ = false;
+};
+
+// Bit patterns for call_once state machine values.  Internal implementation
+// detail, not for use by clients.
+//
+// The bit patterns are arbitrarily chosen from unlikely values, to aid in
+// debugging.  However, kOnceInit must be 0, so that a zero-initialized
+// once_flag will be valid for immediate use.
+enum {
+  kOnceInit = 0,
+  kOnceRunning = 0x65C2937B,
+  kOnceWaiter = 0x05A308D2,
+  // A very small constant is chosen for kOnceDone so that it fit in a single
+  // compare with immediate instruction for most common ISAs.  This is verified
+  // for x86, POWER and ARM.
+  kOnceDone = 221,    // Random Number
+};
+
+template <typename Callable, typename... Args>
+ABSL_ATTRIBUTE_NOINLINE void CallOnceImpl(
+    absl::Nonnull<std::atomic<uint32_t>*> control,
+    base_internal::SchedulingMode scheduling_mode, Callable&& fn,
+    Args&&... args) {
+#ifndef NDEBUG
+  {
+    uint32_t old_control = control->load(std::memory_order_relaxed);
+    if (old_control != kOnceInit &&
+        old_control != kOnceRunning &&
+        old_control != kOnceWaiter &&
+        old_control != kOnceDone) {
+      ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx",
+                   static_cast<unsigned long>(old_control));  // NOLINT
+    }
+  }
+#endif  // NDEBUG
+  static const base_internal::SpinLockWaitTransition trans[] = {
+      {kOnceInit, kOnceRunning, true},
+      {kOnceRunning, kOnceWaiter, false},
+      {kOnceDone, kOnceDone, true}};
+
+  // Must do this before potentially modifying control word's state.
+  base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode);
+  // Short circuit the simplest case to avoid procedure call overhead.
+  // The base_internal::SpinLockWait() call returns either kOnceInit or
+  // kOnceDone. If it returns kOnceDone, it must have loaded the control word
+  // with std::memory_order_acquire and seen a value of kOnceDone.
+  uint32_t old_control = kOnceInit;
+  if (control->compare_exchange_strong(old_control, kOnceRunning,
+                                       std::memory_order_relaxed) ||
+      base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans,
+                                  scheduling_mode) == kOnceInit) {
+    base_internal::invoke(std::forward<Callable>(fn),
+                          std::forward<Args>(args)...);
+    old_control =
+        control->exchange(base_internal::kOnceDone, std::memory_order_release);
+    if (old_control == base_internal::kOnceWaiter) {
+      base_internal::SpinLockWake(control, true);
+    }
+  }  // else *control is already kOnceDone
+}
+
+inline absl::Nonnull<std::atomic<uint32_t>*> ControlWord(
+    absl::Nonnull<once_flag*> flag) {
+  return &flag->control_;
+}
+
+template <typename Callable, typename... Args>
+void LowLevelCallOnce(absl::Nonnull<absl::once_flag*> flag, Callable&& fn,
+                      Args&&... args) {
+  std::atomic<uint32_t>* once = base_internal::ControlWord(flag);
+  uint32_t s = once->load(std::memory_order_acquire);
+  if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
+    base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY,
+                                std::forward<Callable>(fn),
+                                std::forward<Args>(args)...);
+  }
+}
+
+}  // namespace base_internal
+
+template <typename Callable, typename... Args>
+void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) {
+  std::atomic<uint32_t>* once = base_internal::ControlWord(&flag);
+  uint32_t s = once->load(std::memory_order_acquire);
+  if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) {
+    base_internal::CallOnceImpl(
+        once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL,
+        std::forward<Callable>(fn), std::forward<Args>(args)...);
+  }
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_CALL_ONCE_H_

+ 107 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/call_once_test.cc

@@ -0,0 +1,107 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/call_once.h"
+
+#include <thread>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/const_init.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+absl::once_flag once;
+
+ABSL_CONST_INIT Mutex counters_mu(absl::kConstInit);
+
+int running_thread_count ABSL_GUARDED_BY(counters_mu) = 0;
+int call_once_invoke_count ABSL_GUARDED_BY(counters_mu) = 0;
+int call_once_finished_count ABSL_GUARDED_BY(counters_mu) = 0;
+int call_once_return_count ABSL_GUARDED_BY(counters_mu) = 0;
+bool done_blocking ABSL_GUARDED_BY(counters_mu) = false;
+
+// Function to be called from absl::call_once.  Waits for a notification.
+void WaitAndIncrement() {
+  counters_mu.Lock();
+  ++call_once_invoke_count;
+  counters_mu.Unlock();
+
+  counters_mu.LockWhen(Condition(&done_blocking));
+  ++call_once_finished_count;
+  counters_mu.Unlock();
+}
+
+void ThreadBody() {
+  counters_mu.Lock();
+  ++running_thread_count;
+  counters_mu.Unlock();
+
+  absl::call_once(once, WaitAndIncrement);
+
+  counters_mu.Lock();
+  ++call_once_return_count;
+  counters_mu.Unlock();
+}
+
+// Returns true if all threads are set up for the test.
+bool ThreadsAreSetup(void*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(counters_mu) {
+  // All ten threads must be running, and WaitAndIncrement should be blocked.
+  return running_thread_count == 10 && call_once_invoke_count == 1;
+}
+
+TEST(CallOnceTest, ExecutionCount) {
+  std::vector<std::thread> threads;
+
+  // Start 10 threads all calling call_once on the same once_flag.
+  for (int i = 0; i < 10; ++i) {
+    threads.emplace_back(ThreadBody);
+  }
+
+
+  // Wait until all ten threads have started, and WaitAndIncrement has been
+  // invoked.
+  counters_mu.LockWhen(Condition(ThreadsAreSetup, nullptr));
+
+  // WaitAndIncrement should have been invoked by exactly one call_once()
+  // instance.  That thread should be blocking on a notification, and all other
+  // call_once instances should be blocking as well.
+  EXPECT_EQ(call_once_invoke_count, 1);
+  EXPECT_EQ(call_once_finished_count, 0);
+  EXPECT_EQ(call_once_return_count, 0);
+
+  // Allow WaitAndIncrement to finish executing.  Once it does, the other
+  // call_once waiters will be unblocked.
+  done_blocking = true;
+  counters_mu.Unlock();
+
+  for (std::thread& thread : threads) {
+    thread.join();
+  }
+
+  counters_mu.Lock();
+  EXPECT_EQ(call_once_invoke_count, 1);
+  EXPECT_EQ(call_once_finished_count, 1);
+  EXPECT_EQ(call_once_return_count, 10);
+  counters_mu.Unlock();
+}
+
+}  // namespace
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 180 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/casts.h

@@ -0,0 +1,180 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: casts.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines casting templates to fit use cases not covered by
+// the standard casts provided in the C++ standard. As with all cast operations,
+// use these with caution and only if alternatives do not exist.
+
+#ifndef ABSL_BASE_CASTS_H_
+#define ABSL_BASE_CASTS_H_
+
+#include <cstring>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+#include <bit>  // For std::bit_cast.
+#endif  // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+#include "absl/base/internal/identity.h"
+#include "absl/base/macros.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// implicit_cast()
+//
+// Performs an implicit conversion between types following the language
+// rules for implicit conversion; if an implicit conversion is otherwise
+// allowed by the language in the given context, this function performs such an
+// implicit conversion.
+//
+// Example:
+//
+//   // If the context allows implicit conversion:
+//   From from;
+//   To to = from;
+//
+//   // Such code can be replaced by:
+//   implicit_cast<To>(from);
+//
+// An `implicit_cast()` may also be used to annotate numeric type conversions
+// that, although safe, may produce compiler warnings (such as `long` to `int`).
+// Additionally, an `implicit_cast()` is also useful within return statements to
+// indicate a specific implicit conversion is being undertaken.
+//
+// Example:
+//
+//   return implicit_cast<double>(size_in_bytes) / capacity_;
+//
+// Annotating code with `implicit_cast()` allows you to explicitly select
+// particular overloads and template instantiations, while providing a safer
+// cast than `reinterpret_cast()` or `static_cast()`.
+//
+// Additionally, an `implicit_cast()` can be used to allow upcasting within a
+// type hierarchy where incorrect use of `static_cast()` could accidentally
+// allow downcasting.
+//
+// Finally, an `implicit_cast()` can be used to perform implicit conversions
+// from unrelated types that otherwise couldn't be implicitly cast directly;
+// C++ will normally only implicitly cast "one step" in such conversions.
+//
+// That is, if C is a type which can be implicitly converted to B, with B being
+// a type that can be implicitly converted to A, an `implicit_cast()` can be
+// used to convert C to B (which the compiler can then implicitly convert to A
+// using language rules).
+//
+// Example:
+//
+//   // Assume an object C is convertible to B, which is implicitly convertible
+//   // to A
+//   A a = implicit_cast<B>(C);
+//
+// Such implicit cast chaining may be useful within template logic.
+template <typename To>
+constexpr To implicit_cast(typename absl::internal::type_identity_t<To> to) {
+  return to;
+}
+
+// bit_cast()
+//
+// Creates a value of the new type `Dest` whose representation is the same as
+// that of the argument, which is of (deduced) type `Source` (a "bitwise cast";
+// every bit in the value representation of the result is equal to the
+// corresponding bit in the object representation of the source). Source and
+// destination types must be of the same size, and both types must be trivially
+// copyable.
+//
+// As with most casts, use with caution. A `bit_cast()` might be needed when you
+// need to treat a value as the value of some other type, for example, to access
+// the individual bits of an object which are not normally accessible through
+// the object's type, such as for working with the binary representation of a
+// floating point value:
+//
+//   float f = 3.14159265358979;
+//   int i = bit_cast<int>(f);
+//   // i = 0x40490fdb
+//
+// Reinterpreting and accessing a value directly as a different type (as shown
+// below) usually results in undefined behavior.
+//
+// Example:
+//
+//   // WRONG
+//   float f = 3.14159265358979;
+//   int i = reinterpret_cast<int&>(f);    // Wrong
+//   int j = *reinterpret_cast<int*>(&f);  // Equally wrong
+//   int k = *bit_cast<int*>(&f);          // Equally wrong
+//
+// Reinterpret-casting results in undefined behavior according to the ISO C++
+// specification, section [basic.lval]. Roughly, this section says: if an object
+// in memory has one type, and a program accesses it with a different type, the
+// result is undefined behavior for most "different type".
+//
+// Using bit_cast on a pointer and then dereferencing it is no better than using
+// reinterpret_cast. You should only use bit_cast on the value itself.
+//
+// Such casting results in type punning: holding an object in memory of one type
+// and reading its bits back using a different type. A `bit_cast()` avoids this
+// issue by copying the object representation to a new value, which avoids
+// introducing this undefined behavior (since the original value is never
+// accessed in the wrong way).
+//
+// The requirements of `absl::bit_cast` are more strict than that of
+// `std::bit_cast` unless compiler support is available. Specifically, without
+// compiler support, this implementation also requires `Dest` to be
+// default-constructible. In C++20, `absl::bit_cast` is replaced by
+// `std::bit_cast`.
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+using std::bit_cast;
+
+#else  // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+template <
+    typename Dest, typename Source,
+    typename std::enable_if<sizeof(Dest) == sizeof(Source) &&
+                                std::is_trivially_copyable<Source>::value &&
+                                std::is_trivially_copyable<Dest>::value
+#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+                                && std::is_default_constructible<Dest>::value
+#endif  // !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+                            ,
+                            int>::type = 0>
+#if ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+inline constexpr Dest bit_cast(const Source& source) {
+  return __builtin_bit_cast(Dest, source);
+}
+#else  // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+inline Dest bit_cast(const Source& source) {
+  Dest dest;
+  memcpy(static_cast<void*>(std::addressof(dest)),
+         static_cast<const void*>(std::addressof(source)), sizeof(dest));
+  return dest;
+}
+#endif  // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+
+#endif  // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_CASTS_H_

+ 964 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/config.h

@@ -0,0 +1,964 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: config.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a set of macros for checking the presence of
+// important compiler and platform features. Such macros can be used to
+// produce portable code by parameterizing compilation based on the presence or
+// lack of a given feature.
+//
+// We define a "feature" as some interface we wish to program to: for example,
+// a library function or system call. A value of `1` indicates support for
+// that feature; any other value indicates the feature support is undefined.
+//
+// Example:
+//
+// Suppose a programmer wants to write a program that uses the 'mmap()' system
+// call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to
+// selectively include the `mmap.h` header and bracket code using that feature
+// in the macro:
+//
+//   #include "absl/base/config.h"
+//
+//   #ifdef ABSL_HAVE_MMAP
+//   #include "sys/mman.h"
+//   #endif  //ABSL_HAVE_MMAP
+//
+//   ...
+//   #ifdef ABSL_HAVE_MMAP
+//   void *ptr = mmap(...);
+//   ...
+//   #endif  // ABSL_HAVE_MMAP
+
+#ifndef ABSL_BASE_CONFIG_H_
+#define ABSL_BASE_CONFIG_H_
+
+// Included for the __GLIBC__ macro (or similar macros on other systems).
+#include <limits.h>
+
+#ifdef __cplusplus
+// Included for __GLIBCXX__, _LIBCPP_VERSION
+#include <cstddef>
+#endif  // __cplusplus
+
+// ABSL_INTERNAL_CPLUSPLUS_LANG
+//
+// MSVC does not set the value of __cplusplus correctly, but instead uses
+// _MSVC_LANG as a stand-in.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+//
+// However, there are reports that MSVC even sets _MSVC_LANG incorrectly at
+// times, for example:
+// https://github.com/microsoft/vscode-cpptools/issues/1770
+// https://reviews.llvm.org/D70996
+//
+// For this reason, this symbol is considered INTERNAL and code outside of
+// Abseil must not use it.
+#if defined(_MSVC_LANG)
+#define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG
+#elif defined(__cplusplus)
+#define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus
+#endif
+
+#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L
+// Include library feature test macros.
+#include <version>
+#endif
+
+#if defined(__APPLE__)
+// Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED,
+// __IPHONE_8_0.
+#include <Availability.h>
+#include <TargetConditionals.h>
+#endif
+
+#include "absl/base/options.h"
+#include "absl/base/policy_checks.h"
+
+// Abseil long-term support (LTS) releases will define
+// `ABSL_LTS_RELEASE_VERSION` to the integer representing the date string of the
+// LTS release version, and will define `ABSL_LTS_RELEASE_PATCH_LEVEL` to the
+// integer representing the patch-level for that release.
+//
+// For example, for LTS release version "20300401.2", this would give us
+// ABSL_LTS_RELEASE_VERSION == 20300401 && ABSL_LTS_RELEASE_PATCH_LEVEL == 2
+//
+// These symbols will not be defined in non-LTS code.
+//
+// Abseil recommends that clients live-at-head. Therefore, if you are using
+// these symbols to assert a minimum version requirement, we recommend you do it
+// as
+//
+// #if defined(ABSL_LTS_RELEASE_VERSION) && ABSL_LTS_RELEASE_VERSION < 20300401
+// #error Project foo requires Abseil LTS version >= 20300401
+// #endif
+//
+// The `defined(ABSL_LTS_RELEASE_VERSION)` part of the check excludes
+// live-at-head clients from the minimum version assertion.
+//
+// See https://abseil.io/about/releases for more information on Abseil release
+// management.
+//
+// LTS releases can be obtained from
+// https://github.com/abseil/abseil-cpp/releases.
+#undef ABSL_LTS_RELEASE_VERSION
+#undef ABSL_LTS_RELEASE_PATCH_LEVEL
+
+// Helper macro to convert a CPP variable to a string literal.
+#define ABSL_INTERNAL_DO_TOKEN_STR(x) #x
+#define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x)
+
+// -----------------------------------------------------------------------------
+// Abseil namespace annotations
+// -----------------------------------------------------------------------------
+
+// ABSL_NAMESPACE_BEGIN/ABSL_NAMESPACE_END
+//
+// An annotation placed at the beginning/end of each `namespace absl` scope.
+// This is used to inject an inline namespace.
+//
+// The proper way to write Abseil code in the `absl` namespace is:
+//
+// namespace absl {
+// ABSL_NAMESPACE_BEGIN
+//
+// void Foo();  // absl::Foo().
+//
+// ABSL_NAMESPACE_END
+// }  // namespace absl
+//
+// Users of Abseil should not use these macros, because users of Abseil should
+// not write `namespace absl {` in their own code for any reason.  (Abseil does
+// not support forward declarations of its own types, nor does it support
+// user-provided specialization of Abseil templates.  Code that violates these
+// rules may be broken without warning.)
+#if !defined(ABSL_OPTION_USE_INLINE_NAMESPACE) || \
+    !defined(ABSL_OPTION_INLINE_NAMESPACE_NAME)
+#error options.h is misconfigured.
+#endif
+
+// Check that ABSL_OPTION_INLINE_NAMESPACE_NAME is neither "head" nor ""
+#if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1
+
+#define ABSL_INTERNAL_INLINE_NAMESPACE_STR \
+  ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME)
+
+static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0',
+              "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
+              "not be empty.");
+static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||
+                  ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' ||
+                  ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' ||
+                  ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' ||
+                  ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0',
+              "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must "
+              "be changed to a new, unique identifier name.");
+
+#endif
+
+#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0
+#define ABSL_NAMESPACE_BEGIN
+#define ABSL_NAMESPACE_END
+#define ABSL_INTERNAL_C_SYMBOL(x) x
+#elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1
+#define ABSL_NAMESPACE_BEGIN \
+  inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME {
+#define ABSL_NAMESPACE_END }
+#define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v
+#define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \
+  ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v)
+#define ABSL_INTERNAL_C_SYMBOL(x) \
+  ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME)
+#else
+#error options.h is misconfigured.
+#endif
+
+// -----------------------------------------------------------------------------
+// Compiler Feature Checks
+// -----------------------------------------------------------------------------
+
+// ABSL_HAVE_BUILTIN()
+//
+// Checks whether the compiler supports a Clang Feature Checking Macro, and if
+// so, checks whether it supports the provided builtin function "x" where x
+// is one of the functions noted in
+// https://clang.llvm.org/docs/LanguageExtensions.html
+//
+// Note: Use this macro to avoid an extra level of #ifdef __has_builtin check.
+// http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html
+#ifdef __has_builtin
+#define ABSL_HAVE_BUILTIN(x) __has_builtin(x)
+#else
+#define ABSL_HAVE_BUILTIN(x) 0
+#endif
+
+#ifdef __has_feature
+#define ABSL_HAVE_FEATURE(f) __has_feature(f)
+#else
+#define ABSL_HAVE_FEATURE(f) 0
+#endif
+
+// Portable check for GCC minimum version:
+// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+#if defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \
+  (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y))
+#else
+#define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0
+#endif
+
+#if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__)
+#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \
+  (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y))
+#else
+#define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0
+#endif
+
+// ABSL_HAVE_TLS is defined to 1 when __thread should be supported.
+// We assume __thread is supported on Linux when compiled with Clang or
+// compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined.
+#ifdef ABSL_HAVE_TLS
+#error ABSL_HAVE_TLS cannot be directly set
+#elif (defined(__linux__)) && (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS))
+#define ABSL_HAVE_TLS 1
+#endif
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
+//
+// Checks whether `std::is_trivially_destructible<T>` is supported.
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set
+#define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
+#endif
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+//
+// Checks whether `std::is_trivially_default_constructible<T>` and
+// `std::is_trivially_copy_constructible<T>` are supported.
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set
+#else
+#define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1
+#endif
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+//
+// Checks whether `std::is_trivially_copy_assignable<T>` is supported.
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot be directly set
+#else
+#define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1
+#endif
+
+// ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE
+//
+// Checks whether `std::is_trivially_copyable<T>` is supported.
+#ifdef ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE
+#error ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE cannot be directly set
+#define ABSL_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
+#endif
+
+
+// ABSL_HAVE_THREAD_LOCAL
+//
+// DEPRECATED - `thread_local` is available on all supported platforms.
+// Checks whether C++11's `thread_local` storage duration specifier is
+// supported.
+#ifdef ABSL_HAVE_THREAD_LOCAL
+#error ABSL_HAVE_THREAD_LOCAL cannot be directly set
+#else
+#define ABSL_HAVE_THREAD_LOCAL 1
+#endif
+
+// ABSL_HAVE_INTRINSIC_INT128
+//
+// Checks whether the __int128 compiler extension for a 128-bit integral type is
+// supported.
+//
+// Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is
+// supported, but we avoid using it in certain cases:
+// * On Clang:
+//   * Building using Clang for Windows, where the Clang runtime library has
+//     128-bit support only on LP64 architectures, but Windows is LLP64.
+// * On Nvidia's nvcc:
+//   * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions
+//     actually support __int128.
+#ifdef ABSL_HAVE_INTRINSIC_INT128
+#error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set
+#elif defined(__SIZEOF_INT128__)
+#if (defined(__clang__) && !defined(_WIN32)) ||           \
+    (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \
+    (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__))
+#define ABSL_HAVE_INTRINSIC_INT128 1
+#elif defined(__CUDACC__)
+// __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a
+// string explaining that it has been removed starting with CUDA 9. We use
+// nested #ifs because there is no short-circuiting in the preprocessor.
+// NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined.
+#if __CUDACC_VER__ >= 70000
+#define ABSL_HAVE_INTRINSIC_INT128 1
+#endif  // __CUDACC_VER__ >= 70000
+#endif  // defined(__CUDACC__)
+#endif  // ABSL_HAVE_INTRINSIC_INT128
+
+// ABSL_HAVE_EXCEPTIONS
+//
+// Checks whether the compiler both supports and enables exceptions. Many
+// compilers support a "no exceptions" mode that disables exceptions.
+//
+// Generally, when ABSL_HAVE_EXCEPTIONS is not defined:
+//
+// * Code using `throw` and `try` may not compile.
+// * The `noexcept` specifier will still compile and behave as normal.
+// * The `noexcept` operator may still return `false`.
+//
+// For further details, consult the compiler's documentation.
+#ifdef ABSL_HAVE_EXCEPTIONS
+#error ABSL_HAVE_EXCEPTIONS cannot be directly set.
+#elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6)
+// Clang >= 3.6
+#if ABSL_HAVE_FEATURE(cxx_exceptions)
+#define ABSL_HAVE_EXCEPTIONS 1
+#endif  // ABSL_HAVE_FEATURE(cxx_exceptions)
+#elif defined(__clang__)
+// Clang < 3.6
+// http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro
+#if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
+#define ABSL_HAVE_EXCEPTIONS 1
+#endif  // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions)
+// Handle remaining special cases and default to exceptions being supported.
+#elif !(defined(__GNUC__) && !defined(__cpp_exceptions)) && \
+    !(defined(_MSC_VER) && !defined(_CPPUNWIND))
+#define ABSL_HAVE_EXCEPTIONS 1
+#endif
+
+// -----------------------------------------------------------------------------
+// Platform Feature Checks
+// -----------------------------------------------------------------------------
+
+// Currently supported operating systems and associated preprocessor
+// symbols:
+//
+//   Linux and Linux-derived           __linux__
+//   Android                           __ANDROID__ (implies __linux__)
+//   Linux (non-Android)               __linux__ && !__ANDROID__
+//   Darwin (macOS and iOS)            __APPLE__
+//   Akaros (http://akaros.org)        __ros__
+//   Windows                           _WIN32
+//   NaCL                              __native_client__
+//   AsmJS                             __asmjs__
+//   WebAssembly (Emscripten)          __EMSCRIPTEN__
+//   Fuchsia                           __Fuchsia__
+//
+// Note that since Android defines both __ANDROID__ and __linux__, one
+// may probe for either Linux or Android by simply testing for __linux__.
+
+// ABSL_HAVE_MMAP
+//
+// Checks whether the platform has an mmap(2) implementation as defined in
+// POSIX.1-2001.
+#ifdef ABSL_HAVE_MMAP
+#error ABSL_HAVE_MMAP cannot be directly set
+#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) ||    \
+    defined(_AIX) || defined(__ros__) || defined(__native_client__) ||       \
+    defined(__asmjs__) || defined(__EMSCRIPTEN__) || defined(__Fuchsia__) || \
+    defined(__sun) || defined(__myriad2__) || defined(__HAIKU__) ||          \
+    defined(__OpenBSD__) || defined(__NetBSD__) || defined(__QNX__) ||       \
+    defined(__VXWORKS__) || defined(__hexagon__)
+#define ABSL_HAVE_MMAP 1
+#endif
+
+// ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+//
+// Checks whether the platform implements the pthread_(get|set)schedparam(3)
+// functions as defined in POSIX.1-2001.
+#ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM
+#error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set
+#elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+    defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) ||          \
+    defined(__NetBSD__) || defined(__VXWORKS__)
+#define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1
+#endif
+
+// ABSL_HAVE_SCHED_GETCPU
+//
+// Checks whether sched_getcpu is available.
+#ifdef ABSL_HAVE_SCHED_GETCPU
+#error ABSL_HAVE_SCHED_GETCPU cannot be directly set
+#elif defined(__linux__)
+#define ABSL_HAVE_SCHED_GETCPU 1
+#endif
+
+// ABSL_HAVE_SCHED_YIELD
+//
+// Checks whether the platform implements sched_yield(2) as defined in
+// POSIX.1-2001.
+#ifdef ABSL_HAVE_SCHED_YIELD
+#error ABSL_HAVE_SCHED_YIELD cannot be directly set
+#elif defined(__linux__) || defined(__ros__) || defined(__native_client__) || \
+    defined(__VXWORKS__)
+#define ABSL_HAVE_SCHED_YIELD 1
+#endif
+
+// ABSL_HAVE_SEMAPHORE_H
+//
+// Checks whether the platform supports the <semaphore.h> header and sem_init(3)
+// family of functions as standardized in POSIX.1-2001.
+//
+// Note: While Apple provides <semaphore.h> for both iOS and macOS, it is
+// explicitly deprecated and will cause build failures if enabled for those
+// platforms.  We side-step the issue by not defining it here for Apple
+// platforms.
+#ifdef ABSL_HAVE_SEMAPHORE_H
+#error ABSL_HAVE_SEMAPHORE_H cannot be directly set
+#elif defined(__linux__) || defined(__ros__) || defined(__VXWORKS__)
+#define ABSL_HAVE_SEMAPHORE_H 1
+#endif
+
+// ABSL_HAVE_ALARM
+//
+// Checks whether the platform supports the <signal.h> header and alarm(2)
+// function as standardized in POSIX.1-2001.
+#ifdef ABSL_HAVE_ALARM
+#error ABSL_HAVE_ALARM cannot be directly set
+#elif defined(__GOOGLE_GRTE_VERSION__)
+// feature tests for Google's GRTE
+#define ABSL_HAVE_ALARM 1
+#elif defined(__GLIBC__)
+// feature test for glibc
+#define ABSL_HAVE_ALARM 1
+#elif defined(_MSC_VER)
+// feature tests for Microsoft's library
+#elif defined(__MINGW32__)
+// mingw32 doesn't provide alarm(2):
+// https://osdn.net/projects/mingw/scm/git/mingw-org-wsl/blobs/5.2-trunk/mingwrt/include/unistd.h
+// mingw-w64 provides a no-op implementation:
+// https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c
+#elif defined(__EMSCRIPTEN__)
+// emscripten doesn't support signals
+#elif defined(__wasi__)
+// WASI doesn't support signals
+#elif defined(__Fuchsia__)
+// Signals don't exist on fuchsia.
+#elif defined(__native_client__)
+// Signals don't exist on hexagon/QuRT
+#elif defined(__hexagon__)
+#else
+// other standard libraries
+#define ABSL_HAVE_ALARM 1
+#endif
+
+// ABSL_IS_LITTLE_ENDIAN
+// ABSL_IS_BIG_ENDIAN
+//
+// Checks the endianness of the platform.
+//
+// Notes: uses the built in endian macros provided by GCC (since 4.6) and
+// Clang (since 3.2); see
+// https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html.
+// Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error.
+#if defined(ABSL_IS_BIG_ENDIAN)
+#error "ABSL_IS_BIG_ENDIAN cannot be directly set."
+#endif
+#if defined(ABSL_IS_LITTLE_ENDIAN)
+#error "ABSL_IS_LITTLE_ENDIAN cannot be directly set."
+#endif
+
+#if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \
+     __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#define ABSL_IS_LITTLE_ENDIAN 1
+#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+    __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define ABSL_IS_BIG_ENDIAN 1
+#elif defined(_WIN32)
+#define ABSL_IS_LITTLE_ENDIAN 1
+#else
+#error "absl endian detection needs to be set up for your compiler"
+#endif
+
+// macOS < 10.13 and iOS < 12 don't support <any>, <optional>, or <variant>
+// because the libc++ shared library shipped on the system doesn't have the
+// requisite exported symbols.  See
+// https://github.com/abseil/abseil-cpp/issues/207 and
+// https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes
+//
+// libc++ spells out the availability requirements in the file
+// llvm-project/libcxx/include/__config via the #define
+// _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. The set of versions has been
+// modified a few times, via
+// https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953
+// and
+// https://github.com/llvm/llvm-project/commit/0bc451e7e137c4ccadcd3377250874f641ca514a
+// The second has the actually correct versions, thus, is what we copy here.
+#if defined(__APPLE__) &&                                         \
+    ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) &&   \
+      __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) ||  \
+     (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) &&  \
+      __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \
+     (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) &&   \
+      __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) ||   \
+     (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) &&      \
+      __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000))
+#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1
+#else
+#define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0
+#endif
+
+// ABSL_HAVE_STD_ANY
+//
+// Checks whether C++17 std::any is available.
+#ifdef ABSL_HAVE_STD_ANY
+#error "ABSL_HAVE_STD_ANY cannot be directly set."
+#elif defined(__cpp_lib_any) && __cpp_lib_any >= 201606L
+#define ABSL_HAVE_STD_ANY 1
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
+    !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#define ABSL_HAVE_STD_ANY 1
+#endif
+
+// ABSL_HAVE_STD_OPTIONAL
+//
+// Checks whether C++17 std::optional is available.
+#ifdef ABSL_HAVE_STD_OPTIONAL
+#error "ABSL_HAVE_STD_OPTIONAL cannot be directly set."
+#elif defined(__cpp_lib_optional) && __cpp_lib_optional >= 202106L
+#define ABSL_HAVE_STD_OPTIONAL 1
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
+    !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#define ABSL_HAVE_STD_OPTIONAL 1
+#endif
+
+// ABSL_HAVE_STD_VARIANT
+//
+// Checks whether C++17 std::variant is available.
+#ifdef ABSL_HAVE_STD_VARIANT
+#error "ABSL_HAVE_STD_VARIANT cannot be directly set."
+#elif defined(__cpp_lib_variant) && __cpp_lib_variant >= 201606L
+#define ABSL_HAVE_STD_VARIANT 1
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L && \
+    !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE
+#define ABSL_HAVE_STD_VARIANT 1
+#endif
+
+// ABSL_HAVE_STD_STRING_VIEW
+//
+// Checks whether C++17 std::string_view is available.
+#ifdef ABSL_HAVE_STD_STRING_VIEW
+#error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set."
+#elif defined(__cpp_lib_string_view) && __cpp_lib_string_view >= 201606L
+#define ABSL_HAVE_STD_STRING_VIEW 1
+#elif defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+#define ABSL_HAVE_STD_STRING_VIEW 1
+#endif
+
+// ABSL_HAVE_STD_ORDERING
+//
+// Checks whether C++20 std::{partial,weak,strong}_ordering are available.
+//
+// __cpp_lib_three_way_comparison is missing on libc++
+// (https://github.com/llvm/llvm-project/issues/73953) so treat it as defined
+// when building in C++20 mode.
+#ifdef ABSL_HAVE_STD_ORDERING
+#error "ABSL_HAVE_STD_ORDERING cannot be directly set."
+#elif (defined(__cpp_lib_three_way_comparison) &&    \
+       __cpp_lib_three_way_comparison >= 201907L) || \
+    (defined(ABSL_INTERNAL_CPLUSPLUS_LANG) &&        \
+     ABSL_INTERNAL_CPLUSPLUS_LANG >= 202002L)
+#define ABSL_HAVE_STD_ORDERING 1
+#endif
+
+// ABSL_USES_STD_ANY
+//
+// Indicates whether absl::any is an alias for std::any.
+#if !defined(ABSL_OPTION_USE_STD_ANY)
+#error options.h is misconfigured.
+#elif ABSL_OPTION_USE_STD_ANY == 0 || \
+    (ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY))
+#undef ABSL_USES_STD_ANY
+#elif ABSL_OPTION_USE_STD_ANY == 1 || \
+    (ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY))
+#define ABSL_USES_STD_ANY 1
+#else
+#error options.h is misconfigured.
+#endif
+
+// ABSL_USES_STD_OPTIONAL
+//
+// Indicates whether absl::optional is an alias for std::optional.
+#if !defined(ABSL_OPTION_USE_STD_OPTIONAL)
+#error options.h is misconfigured.
+#elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \
+    (ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL))
+#undef ABSL_USES_STD_OPTIONAL
+#elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \
+    (ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL))
+#define ABSL_USES_STD_OPTIONAL 1
+#else
+#error options.h is misconfigured.
+#endif
+
+// ABSL_USES_STD_VARIANT
+//
+// Indicates whether absl::variant is an alias for std::variant.
+#if !defined(ABSL_OPTION_USE_STD_VARIANT)
+#error options.h is misconfigured.
+#elif ABSL_OPTION_USE_STD_VARIANT == 0 || \
+    (ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT))
+#undef ABSL_USES_STD_VARIANT
+#elif ABSL_OPTION_USE_STD_VARIANT == 1 || \
+    (ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT))
+#define ABSL_USES_STD_VARIANT 1
+#else
+#error options.h is misconfigured.
+#endif
+
+// ABSL_USES_STD_STRING_VIEW
+//
+// Indicates whether absl::string_view is an alias for std::string_view.
+#if !defined(ABSL_OPTION_USE_STD_STRING_VIEW)
+#error options.h is misconfigured.
+#elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \
+    (ABSL_OPTION_USE_STD_STRING_VIEW == 2 &&  \
+     !defined(ABSL_HAVE_STD_STRING_VIEW))
+#undef ABSL_USES_STD_STRING_VIEW
+#elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \
+    (ABSL_OPTION_USE_STD_STRING_VIEW == 2 &&  \
+     defined(ABSL_HAVE_STD_STRING_VIEW))
+#define ABSL_USES_STD_STRING_VIEW 1
+#else
+#error options.h is misconfigured.
+#endif
+
+// ABSL_USES_STD_ORDERING
+//
+// Indicates whether absl::{partial,weak,strong}_ordering are aliases for the
+// std:: ordering types.
+#if !defined(ABSL_OPTION_USE_STD_ORDERING)
+#error options.h is misconfigured.
+#elif ABSL_OPTION_USE_STD_ORDERING == 0 || \
+    (ABSL_OPTION_USE_STD_ORDERING == 2 && !defined(ABSL_HAVE_STD_ORDERING))
+#undef ABSL_USES_STD_ORDERING
+#elif ABSL_OPTION_USE_STD_ORDERING == 1 || \
+    (ABSL_OPTION_USE_STD_ORDERING == 2 && defined(ABSL_HAVE_STD_ORDERING))
+#define ABSL_USES_STD_ORDERING 1
+#else
+#error options.h is misconfigured.
+#endif
+
+// In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION
+// SEH exception from emplace for variant<SomeStruct> when constructing the
+// struct can throw. This defeats some of variant_test and
+// variant_exception_safety_test.
+#if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG)
+#define ABSL_INTERNAL_MSVC_2017_DBG_MODE
+#endif
+
+// ABSL_INTERNAL_MANGLED_NS
+// ABSL_INTERNAL_MANGLED_BACKREFERENCE
+//
+// Internal macros for building up mangled names in our internal fork of CCTZ.
+// This implementation detail is only needed and provided for the MSVC build.
+//
+// These macros both expand to string literals.  ABSL_INTERNAL_MANGLED_NS is
+// the mangled spelling of the `absl` namespace, and
+// ABSL_INTERNAL_MANGLED_BACKREFERENCE is a back-reference integer representing
+// the proper count to skip past the CCTZ fork namespace names.  (This number
+// is one larger when there is an inline namespace name to skip.)
+#if defined(_MSC_VER)
+#if ABSL_OPTION_USE_INLINE_NAMESPACE == 0
+#define ABSL_INTERNAL_MANGLED_NS "absl"
+#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5"
+#else
+#define ABSL_INTERNAL_MANGLED_NS \
+  ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) "@absl"
+#define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6"
+#endif
+#endif
+
+// ABSL_DLL
+//
+// When building Abseil as a DLL, this macro expands to `__declspec(dllexport)`
+// so we can annotate symbols appropriately as being exported. When used in
+// headers consuming a DLL, this macro expands to `__declspec(dllimport)` so
+// that consumers know the symbol is defined inside the DLL. In all other cases,
+// the macro expands to nothing.
+#if defined(_MSC_VER)
+#if defined(ABSL_BUILD_DLL)
+#define ABSL_DLL __declspec(dllexport)
+#elif defined(ABSL_CONSUME_DLL)
+#define ABSL_DLL __declspec(dllimport)
+#else
+#define ABSL_DLL
+#endif
+#else
+#define ABSL_DLL
+#endif  // defined(_MSC_VER)
+
+#if defined(_MSC_VER)
+#if defined(ABSL_BUILD_TEST_DLL)
+#define ABSL_TEST_DLL __declspec(dllexport)
+#elif defined(ABSL_CONSUME_TEST_DLL)
+#define ABSL_TEST_DLL __declspec(dllimport)
+#else
+#define ABSL_TEST_DLL
+#endif
+#else
+#define ABSL_TEST_DLL
+#endif  // defined(_MSC_VER)
+
+// ABSL_HAVE_MEMORY_SANITIZER
+//
+// MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of
+// a compiler instrumentation module and a run-time library.
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+#error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set."
+#elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer)
+#define ABSL_HAVE_MEMORY_SANITIZER 1
+#endif
+
+// ABSL_HAVE_THREAD_SANITIZER
+//
+// ThreadSanitizer (TSan) is a fast data race detector.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set."
+#elif defined(__SANITIZE_THREAD__)
+#define ABSL_HAVE_THREAD_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(thread_sanitizer)
+#define ABSL_HAVE_THREAD_SANITIZER 1
+#endif
+
+// ABSL_HAVE_ADDRESS_SANITIZER
+//
+// AddressSanitizer (ASan) is a fast memory error detector.
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set."
+#elif defined(__SANITIZE_ADDRESS__)
+#define ABSL_HAVE_ADDRESS_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(address_sanitizer)
+#define ABSL_HAVE_ADDRESS_SANITIZER 1
+#endif
+
+// ABSL_HAVE_HWADDRESS_SANITIZER
+//
+// Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan
+// memory error detector which can use CPU features like ARM TBI, Intel LAM or
+// AMD UAI.
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+#error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set."
+#elif defined(__SANITIZE_HWADDRESS__)
+#define ABSL_HAVE_HWADDRESS_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(hwaddress_sanitizer)
+#define ABSL_HAVE_HWADDRESS_SANITIZER 1
+#endif
+
+// ABSL_HAVE_DATAFLOW_SANITIZER
+//
+// Dataflow Sanitizer (or DFSAN) is a generalised dynamic data flow analysis.
+#ifdef ABSL_HAVE_DATAFLOW_SANITIZER
+#error "ABSL_HAVE_DATAFLOW_SANITIZER cannot be directly set."
+#elif defined(DATAFLOW_SANITIZER)
+// GCC provides no method for detecting the presence of the standalone
+// DataFlowSanitizer (-fsanitize=dataflow), so GCC users of -fsanitize=dataflow
+// should also use -DDATAFLOW_SANITIZER.
+#define ABSL_HAVE_DATAFLOW_SANITIZER 1
+#elif ABSL_HAVE_FEATURE(dataflow_sanitizer)
+#define ABSL_HAVE_DATAFLOW_SANITIZER 1
+#endif
+
+// ABSL_HAVE_LEAK_SANITIZER
+//
+// LeakSanitizer (or lsan) is a detector of memory leaks.
+// https://clang.llvm.org/docs/LeakSanitizer.html
+// https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
+//
+// The macro ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time
+// whether the LeakSanitizer is potentially available. However, just because the
+// LeakSanitizer is available does not mean it is active. Use the
+// always-available run-time interface in //absl/debugging/leak_check.h for
+// interacting with LeakSanitizer.
+#ifdef ABSL_HAVE_LEAK_SANITIZER
+#error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set."
+#elif defined(LEAK_SANITIZER)
+// GCC provides no method for detecting the presence of the standalone
+// LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also
+// use -DLEAK_SANITIZER.
+#define ABSL_HAVE_LEAK_SANITIZER 1
+// Clang standalone LeakSanitizer (-fsanitize=leak)
+#elif ABSL_HAVE_FEATURE(leak_sanitizer)
+#define ABSL_HAVE_LEAK_SANITIZER 1
+#elif defined(ABSL_HAVE_ADDRESS_SANITIZER)
+// GCC or Clang using the LeakSanitizer integrated into AddressSanitizer.
+#define ABSL_HAVE_LEAK_SANITIZER 1
+#endif
+
+// ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+//
+// Class template argument deduction is a language feature added in C++17.
+#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+#error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set."
+#elif defined(__cpp_deduction_guides)
+#define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1
+#endif
+
+// ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+//
+// Prior to C++17, static constexpr variables defined in classes required a
+// separate definition outside of the class body, for example:
+//
+// class Foo {
+//   static constexpr int kBar = 0;
+// };
+// constexpr int Foo::kBar;
+//
+// In C++17, these variables defined in classes are considered inline variables,
+// and the extra declaration is redundant. Since some compilers warn on the
+// extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used
+// conditionally ignore them:
+//
+// #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+// constexpr int Foo::kBar;
+// #endif
+#if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \
+    ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L
+#define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1
+#endif
+
+// `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with
+// RTTI support.
+#ifdef ABSL_INTERNAL_HAS_RTTI
+#error ABSL_INTERNAL_HAS_RTTI cannot be directly set
+#elif ABSL_HAVE_FEATURE(cxx_rtti)
+#define ABSL_INTERNAL_HAS_RTTI 1
+#elif defined(__GNUC__) && defined(__GXX_RTTI)
+#define ABSL_INTERNAL_HAS_RTTI 1
+#elif defined(_MSC_VER) && defined(_CPPRTTI)
+#define ABSL_INTERNAL_HAS_RTTI 1
+#elif !defined(__GNUC__) && !defined(_MSC_VER)
+// Unknown compiler, default to RTTI
+#define ABSL_INTERNAL_HAS_RTTI 1
+#endif
+
+// `ABSL_INTERNAL_HAS_CXA_DEMANGLE` determines whether `abi::__cxa_demangle` is
+// available.
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#error ABSL_INTERNAL_HAS_CXA_DEMANGLE cannot be directly set
+#elif defined(OS_ANDROID) && (defined(__i386__) || defined(__x86_64__))
+#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 0
+#elif defined(__GNUC__)
+#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
+#elif defined(__clang__) && !defined(_MSC_VER)
+#define ABSL_INTERNAL_HAS_CXA_DEMANGLE 1
+#endif
+
+// ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+#ifdef ABSL_INTERNAL_HAVE_SSE
+#error ABSL_INTERNAL_HAVE_SSE cannot be directly set
+#elif defined(__SSE__)
+#define ABSL_INTERNAL_HAVE_SSE 1
+#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1)) && \
+    !defined(_M_ARM64EC)
+// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1
+// indicates that at least SSE was targeted with the /arch:SSE option.
+// All x86-64 processors support SSE, so support can be assumed.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+#define ABSL_INTERNAL_HAVE_SSE 1
+#endif
+
+// ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+#ifdef ABSL_INTERNAL_HAVE_SSE2
+#error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set
+#elif defined(__SSE2__)
+#define ABSL_INTERNAL_HAVE_SSE2 1
+#elif (defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2)) && \
+    !defined(_M_ARM64EC)
+// MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2
+// indicates that at least SSE2 was targeted with the /arch:SSE2 option.
+// All x86-64 processors support SSE2, so support can be assumed.
+// https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros
+#define ABSL_INTERNAL_HAVE_SSE2 1
+#endif
+
+// ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support.
+// See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of
+// which architectures support the various x86 instruction sets.
+//
+// MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3
+// with MSVC requires either assuming that the code will only every run on CPUs
+// that support SSSE3, otherwise __cpuid() can be used to detect support at
+// runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported
+// by the CPU.
+#ifdef ABSL_INTERNAL_HAVE_SSSE3
+#error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set
+#elif defined(__SSSE3__)
+#define ABSL_INTERNAL_HAVE_SSSE3 1
+#endif
+
+// ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM
+// SIMD).
+//
+// If __CUDA_ARCH__ is defined, then we are compiling CUDA code in device mode.
+// In device mode, NEON intrinsics are not available, regardless of host
+// platform.
+// https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code
+#ifdef ABSL_INTERNAL_HAVE_ARM_NEON
+#error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set
+#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__)
+#define ABSL_INTERNAL_HAVE_ARM_NEON 1
+#endif
+
+// ABSL_HAVE_CONSTANT_EVALUATED is used for compile-time detection of
+// constant evaluation support through `absl::is_constant_evaluated`.
+#ifdef ABSL_HAVE_CONSTANT_EVALUATED
+#error ABSL_HAVE_CONSTANT_EVALUATED cannot be directly set
+#endif
+#ifdef __cpp_lib_is_constant_evaluated
+#define ABSL_HAVE_CONSTANT_EVALUATED 1
+#elif ABSL_HAVE_BUILTIN(__builtin_is_constant_evaluated)
+#define ABSL_HAVE_CONSTANT_EVALUATED 1
+#endif
+
+// ABSL_INTERNAL_EMSCRIPTEN_VERSION combines Emscripten's three version macros
+// into an integer that can be compared against.
+#ifdef ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#error ABSL_INTERNAL_EMSCRIPTEN_VERSION cannot be directly set
+#endif
+#ifdef __EMSCRIPTEN__
+#include <emscripten/version.h>
+#ifdef __EMSCRIPTEN_major__
+#if __EMSCRIPTEN_minor__ >= 1000
+#error __EMSCRIPTEN_minor__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#endif
+#if __EMSCRIPTEN_tiny__ >= 1000
+#error __EMSCRIPTEN_tiny__ is too big to fit in ABSL_INTERNAL_EMSCRIPTEN_VERSION
+#endif
+#define ABSL_INTERNAL_EMSCRIPTEN_VERSION                              \
+  ((__EMSCRIPTEN_major__) * 1000000 + (__EMSCRIPTEN_minor__) * 1000 + \
+   (__EMSCRIPTEN_tiny__))
+#endif
+#endif
+
+#endif  // ABSL_BASE_CONFIG_H_

+ 60 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/config_test.cc

@@ -0,0 +1,60 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/config.h"
+
+#include <cstdint>
+
+#include "gtest/gtest.h"
+#include "absl/synchronization/internal/thread_pool.h"
+
+namespace {
+
+TEST(ConfigTest, Endianness) {
+  union {
+    uint32_t value;
+    uint8_t data[sizeof(uint32_t)];
+  } number;
+  number.data[0] = 0x00;
+  number.data[1] = 0x01;
+  number.data[2] = 0x02;
+  number.data[3] = 0x03;
+#if defined(ABSL_IS_LITTLE_ENDIAN) && defined(ABSL_IS_BIG_ENDIAN)
+#error Both ABSL_IS_LITTLE_ENDIAN and ABSL_IS_BIG_ENDIAN are defined
+#elif defined(ABSL_IS_LITTLE_ENDIAN)
+  EXPECT_EQ(UINT32_C(0x03020100), number.value);
+#elif defined(ABSL_IS_BIG_ENDIAN)
+  EXPECT_EQ(UINT32_C(0x00010203), number.value);
+#else
+#error Unknown endianness
+#endif
+}
+
+#if defined(ABSL_HAVE_THREAD_LOCAL)
+TEST(ConfigTest, ThreadLocal) {
+  static thread_local int mine_mine_mine = 16;
+  EXPECT_EQ(16, mine_mine_mine);
+  {
+    absl::synchronization_internal::ThreadPool pool(1);
+    pool.Schedule([&] {
+      EXPECT_EQ(16, mine_mine_mine);
+      mine_mine_mine = 32;
+      EXPECT_EQ(32, mine_mine_mine);
+    });
+  }
+  EXPECT_EQ(16, mine_mine_mine);
+}
+#endif
+
+}  // namespace

+ 76 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/const_init.h

@@ -0,0 +1,76 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// kConstInit
+// -----------------------------------------------------------------------------
+//
+// A constructor tag used to mark an object as safe for use as a global
+// variable, avoiding the usual lifetime issues that can affect globals.
+
+#ifndef ABSL_BASE_CONST_INIT_H_
+#define ABSL_BASE_CONST_INIT_H_
+
+#include "absl/base/config.h"
+
+// In general, objects with static storage duration (such as global variables)
+// can trigger tricky object lifetime situations.  Attempting to access them
+// from the constructors or destructors of other global objects can result in
+// undefined behavior, unless their constructors and destructors are designed
+// with this issue in mind.
+//
+// The normal way to deal with this issue in C++11 is to use constant
+// initialization and trivial destructors.
+//
+// Constant initialization is guaranteed to occur before any other code
+// executes.  Constructors that are declared 'constexpr' are eligible for
+// constant initialization.  You can annotate a variable declaration with the
+// ABSL_CONST_INIT macro to express this intent.  For compilers that support
+// it, this annotation will cause a compilation error for declarations that
+// aren't subject to constant initialization (perhaps because a runtime value
+// was passed as a constructor argument).
+//
+// On program shutdown, lifetime issues can be avoided on global objects by
+// ensuring that they contain  trivial destructors.  A class has a trivial
+// destructor unless it has a user-defined destructor, a virtual method or base
+// class, or a data member or base class with a non-trivial destructor of its
+// own.  Objects with static storage duration and a trivial destructor are not
+// cleaned up on program shutdown, and are thus safe to access from other code
+// running during shutdown.
+//
+// For a few core Abseil classes, we make a best effort to allow for safe global
+// instances, even though these classes have non-trivial destructors.  These
+// objects can be created with the absl::kConstInit tag.  For example:
+//   ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit);
+//
+// The line above declares a global variable of type absl::Mutex which can be
+// accessed at any point during startup or shutdown.  global_mutex's destructor
+// will still run, but will not invalidate the object.  Note that C++ specifies
+// that accessing an object after its destructor has run results in undefined
+// behavior, but this pattern works on the toolchains we support.
+//
+// The absl::kConstInit tag should only be used to define objects with static
+// or thread_local storage duration.
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+enum ConstInitType {
+  kConstInit,
+};
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_CONST_INIT_H_

+ 480 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/dynamic_annotations.h

@@ -0,0 +1,480 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines dynamic annotations for use with dynamic analysis tool
+// such as valgrind, PIN, etc.
+//
+// Dynamic annotation is a source code annotation that affects the generated
+// code (that is, the annotation is not a comment). Each such annotation is
+// attached to a particular instruction and/or to a particular object (address)
+// in the program.
+//
+// The annotations that should be used by users are macros in all upper-case
+// (e.g., ABSL_ANNOTATE_THREAD_NAME).
+//
+// Actual implementation of these macros may differ depending on the dynamic
+// analysis tool being used.
+//
+// This file supports the following configurations:
+// - Dynamic Annotations enabled (with static thread-safety warnings disabled).
+//   In this case, macros expand to functions implemented by Thread Sanitizer,
+//   when building with TSan. When not provided an external implementation,
+//   dynamic_annotations.cc provides no-op implementations.
+//
+// - Static Clang thread-safety warnings enabled.
+//   When building with a Clang compiler that supports thread-safety warnings,
+//   a subset of annotations can be statically-checked at compile-time. We
+//   expand these macros to static-inline functions that can be analyzed for
+//   thread-safety, but afterwards elided when building the final binary.
+//
+// - All annotations are disabled.
+//   If neither Dynamic Annotations nor Clang thread-safety warnings are
+//   enabled, then all annotation-macros expand to empty.
+
+#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
+#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#ifdef __cplusplus
+#include "absl/base/macros.h"
+#endif
+
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+#include <sanitizer/hwasan_interface.h>
+#endif
+
+// TODO(rogeeff): Remove after the backward compatibility period.
+#include "absl/base/internal/dynamic_annotations.h"  // IWYU pragma: export
+
+// -------------------------------------------------------------------------
+// Decide which features are enabled.
+
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1
+
+#else
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0
+
+// Clang provides limited support for static thread-safety analysis through a
+// feature called Annotalysis. We configure macro-definitions according to
+// whether Annotalysis support is available. When running in opt-mode, GCC
+// will issue a warning, if these attributes are compiled. Only include them
+// when compiling using Clang.
+
+#if defined(__clang__)
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1
+#if !defined(SWIG)
+#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
+#endif
+#else
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#endif
+
+// Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
+  ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+
+#endif  // ABSL_HAVE_THREAD_SANITIZER
+
+#ifdef __cplusplus
+#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
+#define ABSL_INTERNAL_END_EXTERN_C }  // extern "C"
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
+#define ABSL_INTERNAL_STATIC_INLINE inline
+#else
+#define ABSL_INTERNAL_BEGIN_EXTERN_C  // empty
+#define ABSL_INTERNAL_END_EXTERN_C    // empty
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F
+#define ABSL_INTERNAL_STATIC_INLINE static inline
+#endif
+
+// -------------------------------------------------------------------------
+// Define race annotations.
+
+#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
+// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are
+// defined by the compiler-based sanitizer implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
+
+// -------------------------------------------------------------
+// Annotations that suppress errors. It is usually better to express the
+// program's synchronization using the other annotations, but these can be used
+// when all else fails.
+
+// Report that we may have a benign race at `pointer`, with size
+// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
+// point where `pointer` has been allocated, preferably close to the point
+// where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC.
+#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized)  \
+  (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+// Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
+// the memory range [`address`, `address`+`size`).
+#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized)              \
+  (__FILE__, __LINE__, address, size, description)
+
+// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
+// This annotation could be useful if you want to skip expensive race analysis
+// during some period of program execution, e.g. during initialization.
+#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable)        \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
+  (__FILE__, __LINE__, enable)
+
+// -------------------------------------------------------------
+// Annotations useful for debugging.
+
+// Report the current thread `name` to a race detector.
+#define ABSL_ANNOTATE_THREAD_NAME(name) \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+// -------------------------------------------------------------
+// Annotations useful when implementing locks. They are not normally needed by
+// modules that merely use locks. The `lock` argument is a pointer to the lock
+// object.
+
+// Report that a lock has been created at address `lock`.
+#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+// Report that a linker initialized lock has been created at address `lock`.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock)          \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
+  (__FILE__, __LINE__, lock)
+#else
+#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+  ABSL_ANNOTATE_RWLOCK_CREATE(lock)
+#endif
+
+// Report that the lock at address `lock` is about to be destroyed.
+#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+// Report that the lock at address `lock` has been acquired.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)     \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
+  (__FILE__, __LINE__, lock, is_w)
+
+// Report that the lock at address `lock` is about to be released.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w)     \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
+  (__FILE__, __LINE__, lock, is_w)
+
+// Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
+#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description)      \
+  namespace {                                                          \
+  class static_var##_annotator {                                       \
+   public:                                                             \
+    static_var##_annotator() {                                         \
+      ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
+                                      #static_var ": " description);   \
+    }                                                                  \
+  };                                                                   \
+  static static_var##_annotator the##static_var##_annotator;           \
+  }  // namespace
+
+// Function prototypes of annotations provided by the compiler-based sanitizer
+// implementation.
+ABSL_INTERNAL_BEGIN_EXTERN_C
+void AnnotateRWLockCreate(const char* file, int line,
+                          const volatile void* lock);
+void AnnotateRWLockCreateStatic(const char* file, int line,
+                                const volatile void* lock);
+void AnnotateRWLockDestroy(const char* file, int line,
+                           const volatile void* lock);
+void AnnotateRWLockAcquired(const char* file, int line,
+                            const volatile void* lock, long is_w);  // NOLINT
+void AnnotateRWLockReleased(const char* file, int line,
+                            const volatile void* lock, long is_w);  // NOLINT
+void AnnotateBenignRace(const char* file, int line,
+                        const volatile void* address, const char* description);
+void AnnotateBenignRaceSized(const char* file, int line,
+                             const volatile void* address, size_t size,
+                             const char* description);
+void AnnotateThreadName(const char* file, int line, const char* name);
+void AnnotateEnableRaceDetection(const char* file, int line, int enable);
+ABSL_INTERNAL_END_EXTERN_C
+
+#else  // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0
+
+#define ABSL_ANNOTATE_RWLOCK_CREATE(lock)                            // empty
+#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock)                     // empty
+#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock)                           // empty
+#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)                    // empty
+#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w)                    // empty
+#define ABSL_ANNOTATE_BENIGN_RACE(address, description)              // empty
+#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description)  // empty
+#define ABSL_ANNOTATE_THREAD_NAME(name)                              // empty
+#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable)                  // empty
+#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description)    // empty
+
+#endif  // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define memory annotations.
+
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+
+#include <sanitizer/msan_interface.h>
+
+#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+  __msan_unpoison(address, size)
+
+#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+  __msan_allocated_memory(address, size)
+
+#else  // !defined(ABSL_HAVE_MEMORY_SANITIZER)
+
+#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size)    // empty
+#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size)  // empty
+
+#endif  // ABSL_HAVE_MEMORY_SANITIZER
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END attributes.
+
+#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
+  __attribute((exclusive_lock_function("*")))
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
+  __attribute((unlock_function("*")))
+
+#else  // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE  // empty
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE    // empty
+
+#endif  // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
+// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are
+// defined by the compiler-based implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
+
+// Request the analysis tool to ignore all reads in the current thread until
+// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
+// reads, while still checking other reads and all writes.
+// See also ABSL_ANNOTATE_UNPROTECTED_READ.
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN()              \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
+  (__FILE__, __LINE__)
+
+// Stop ignoring reads.
+#define ABSL_ANNOTATE_IGNORE_READS_END()              \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
+  (__FILE__, __LINE__)
+
+// Function prototypes of annotations provided by the compiler-based sanitizer
+// implementation.
+ABSL_INTERNAL_BEGIN_EXTERN_C
+void AnnotateIgnoreReadsBegin(const char* file, int line)
+    ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE;
+void AnnotateIgnoreReadsEnd(const char* file,
+                            int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE;
+ABSL_INTERNAL_END_EXTERN_C
+
+#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
+
+// When Annotalysis is enabled without Dynamic Annotations, the use of
+// static-inline functions allows the annotations to be read at compile-time,
+// while still letting the compiler elide the functions from the final build.
+//
+// TODO(delesley) -- The exclusive lock here ignores writes as well, but
+// allows IGNORE_READS_AND_WRITES to work properly.
+
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN()                          \
+  ABSL_INTERNAL_GLOBAL_SCOPED(                                      \
+      ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \
+  ()
+
+#define ABSL_ANNOTATE_IGNORE_READS_END()                          \
+  ABSL_INTERNAL_GLOBAL_SCOPED(                                    \
+      ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \
+  ()
+
+ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
+    AbslInternalAnnotateIgnoreReadsBegin)()
+    ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {}
+
+ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL(
+    AbslInternalAnnotateIgnoreReadsEnd)()
+    ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {}
+
+#else
+
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN()  // empty
+#define ABSL_ANNOTATE_IGNORE_READS_END()    // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define IGNORE_WRITES_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1
+
+// Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
+#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+// Stop ignoring writes.
+#define ABSL_ANNOTATE_IGNORE_WRITES_END() \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+// Function prototypes of annotations provided by the compiler-based sanitizer
+// implementation.
+ABSL_INTERNAL_BEGIN_EXTERN_C
+void AnnotateIgnoreWritesBegin(const char* file, int line);
+void AnnotateIgnoreWritesEnd(const char* file, int line);
+ABSL_INTERNAL_END_EXTERN_C
+
+#else
+
+#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN()  // empty
+#define ABSL_ANNOTATE_IGNORE_WRITES_END()    // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define the ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
+// primitive annotations defined above.
+//
+//     Instead of doing
+//        ABSL_ANNOTATE_IGNORE_READS_BEGIN();
+//        ... = x;
+//        ABSL_ANNOTATE_IGNORE_READS_END();
+//     one can use
+//        ... = ABSL_ANNOTATE_UNPROTECTED_READ(x);
+
+#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED)
+
+// Start ignoring all memory accesses (both reads and writes).
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+  do {                                                \
+    ABSL_ANNOTATE_IGNORE_READS_BEGIN();               \
+    ABSL_ANNOTATE_IGNORE_WRITES_BEGIN();              \
+  } while (0)
+
+// Stop ignoring both reads and writes.
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+  do {                                              \
+    ABSL_ANNOTATE_IGNORE_WRITES_END();              \
+    ABSL_ANNOTATE_IGNORE_READS_END();               \
+  } while (0)
+
+#ifdef __cplusplus
+// ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+#define ABSL_ANNOTATE_UNPROTECTED_READ(x) \
+  absl::base_internal::AnnotateUnprotectedRead(x)
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename T>
+inline T AnnotateUnprotectedRead(const volatile T& x) {  // NOLINT
+  ABSL_ANNOTATE_IGNORE_READS_BEGIN();
+  T res = x;
+  ABSL_ANNOTATE_IGNORE_READS_END();
+  return res;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+#endif
+
+#else
+
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN()  // empty
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END()    // empty
+#define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x)
+
+#endif
+
+// -------------------------------------------------------------------------
+// Address sanitizer annotations
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+// Describe the current state of a contiguous container such as e.g.
+// std::vector or std::string. For more details see
+// sanitizer/common_interface_defs.h, which is provided by the compiler.
+#include <sanitizer/common_interface_defs.h>
+
+#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
+  __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
+#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \
+  struct {                                   \
+    alignas(8) char x[8];                    \
+  } name
+
+#else
+
+#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)  // empty
+#define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
+
+#endif  // ABSL_HAVE_ADDRESS_SANITIZER
+
+// -------------------------------------------------------------------------
+// HWAddress sanitizer annotations
+
+#ifdef __cplusplus
+namespace absl {
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+// Under HWASAN changes the tag of the pointer.
+template <typename T>
+T* HwasanTagPointer(T* ptr, uintptr_t tag) {
+  return reinterpret_cast<T*>(__hwasan_tag_pointer(ptr, tag));
+}
+#else
+template <typename T>
+T* HwasanTagPointer(T* ptr, uintptr_t) {
+  return ptr;
+}
+#endif
+}  // namespace absl
+#endif
+
+// -------------------------------------------------------------------------
+// Undefine the macros intended only for this file.
+
+#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_BEGIN_EXTERN_C
+#undef ABSL_INTERNAL_END_EXTERN_C
+#undef ABSL_INTERNAL_STATIC_INLINE
+
+#endif  // ABSL_BASE_DYNAMIC_ANNOTATIONS_H_

+ 962 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/exception_safety_testing_test.cc

@@ -0,0 +1,962 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/exception_safety_testing.h"
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#include <cstddef>
+#include <exception>
+#include <iostream>
+#include <list>
+#include <type_traits>
+#include <vector>
+
+#include "gtest/gtest-spi.h"
+#include "gtest/gtest.h"
+#include "absl/memory/memory.h"
+
+namespace testing {
+
+namespace {
+
+using ::testing::exceptions_internal::SetCountdown;
+using ::testing::exceptions_internal::TestException;
+using ::testing::exceptions_internal::UnsetCountdown;
+
+// EXPECT_NO_THROW can't inspect the thrown inspection in general.
+template <typename F>
+void ExpectNoThrow(const F& f) {
+  try {
+    f();
+  } catch (const TestException& e) {
+    ADD_FAILURE() << "Unexpected exception thrown from " << e.what();
+  }
+}
+
+TEST(ThrowingValueTest, Throws) {
+  SetCountdown();
+  EXPECT_THROW(ThrowingValue<> bomb, TestException);
+
+  // It's not guaranteed that every operator only throws *once*.  The default
+  // ctor only throws once, though, so use it to make sure we only throw when
+  // the countdown hits 0
+  SetCountdown(2);
+  ExpectNoThrow([]() { ThrowingValue<> bomb; });
+  ExpectNoThrow([]() { ThrowingValue<> bomb; });
+  EXPECT_THROW(ThrowingValue<> bomb, TestException);
+
+  UnsetCountdown();
+}
+
+// Tests that an operation throws when the countdown is at 0, doesn't throw when
+// the countdown doesn't hit 0, and doesn't modify the state of the
+// ThrowingValue if it throws
+template <typename F>
+void TestOp(const F& f) {
+  ExpectNoThrow(f);
+
+  SetCountdown();
+  EXPECT_THROW(f(), TestException);
+  UnsetCountdown();
+}
+
+TEST(ThrowingValueTest, ThrowingCtors) {
+  ThrowingValue<> bomb;
+
+  TestOp([]() { ThrowingValue<> bomb(1); });
+  TestOp([&]() { ThrowingValue<> bomb1 = bomb; });
+  TestOp([&]() { ThrowingValue<> bomb1 = std::move(bomb); });
+}
+
+TEST(ThrowingValueTest, ThrowingAssignment) {
+  ThrowingValue<> bomb, bomb1;
+
+  TestOp([&]() { bomb = bomb1; });
+  TestOp([&]() { bomb = std::move(bomb1); });
+
+  // Test that when assignment throws, the assignment should fail (lhs != rhs)
+  // and strong guarantee fails (lhs != lhs_copy).
+  {
+    ThrowingValue<> lhs(39), rhs(42);
+    ThrowingValue<> lhs_copy(lhs);
+    SetCountdown();
+    EXPECT_THROW(lhs = rhs, TestException);
+    UnsetCountdown();
+    EXPECT_NE(lhs, rhs);
+    EXPECT_NE(lhs_copy, lhs);
+  }
+  {
+    ThrowingValue<> lhs(39), rhs(42);
+    ThrowingValue<> lhs_copy(lhs), rhs_copy(rhs);
+    SetCountdown();
+    EXPECT_THROW(lhs = std::move(rhs), TestException);
+    UnsetCountdown();
+    EXPECT_NE(lhs, rhs_copy);
+    EXPECT_NE(lhs_copy, lhs);
+  }
+}
+
+TEST(ThrowingValueTest, ThrowingComparisons) {
+  ThrowingValue<> bomb1, bomb2;
+  TestOp([&]() { return bomb1 == bomb2; });
+  TestOp([&]() { return bomb1 != bomb2; });
+  TestOp([&]() { return bomb1 < bomb2; });
+  TestOp([&]() { return bomb1 <= bomb2; });
+  TestOp([&]() { return bomb1 > bomb2; });
+  TestOp([&]() { return bomb1 >= bomb2; });
+}
+
+TEST(ThrowingValueTest, ThrowingArithmeticOps) {
+  ThrowingValue<> bomb1(1), bomb2(2);
+
+  TestOp([&bomb1]() { +bomb1; });
+  TestOp([&bomb1]() { -bomb1; });
+  TestOp([&bomb1]() { ++bomb1; });
+  TestOp([&bomb1]() { bomb1++; });
+  TestOp([&bomb1]() { --bomb1; });
+  TestOp([&bomb1]() { bomb1--; });
+
+  TestOp([&]() { bomb1 + bomb2; });
+  TestOp([&]() { bomb1 - bomb2; });
+  TestOp([&]() { bomb1* bomb2; });
+  TestOp([&]() { bomb1 / bomb2; });
+  TestOp([&]() { bomb1 << 1; });
+  TestOp([&]() { bomb1 >> 1; });
+}
+
+TEST(ThrowingValueTest, ThrowingLogicalOps) {
+  ThrowingValue<> bomb1, bomb2;
+
+  TestOp([&bomb1]() { !bomb1; });
+  TestOp([&]() { bomb1&& bomb2; });
+  TestOp([&]() { bomb1 || bomb2; });
+}
+
+TEST(ThrowingValueTest, ThrowingBitwiseOps) {
+  ThrowingValue<> bomb1, bomb2;
+
+  TestOp([&bomb1]() { ~bomb1; });
+  TestOp([&]() { bomb1 & bomb2; });
+  TestOp([&]() { bomb1 | bomb2; });
+  TestOp([&]() { bomb1 ^ bomb2; });
+}
+
+TEST(ThrowingValueTest, ThrowingCompoundAssignmentOps) {
+  ThrowingValue<> bomb1(1), bomb2(2);
+
+  TestOp([&]() { bomb1 += bomb2; });
+  TestOp([&]() { bomb1 -= bomb2; });
+  TestOp([&]() { bomb1 *= bomb2; });
+  TestOp([&]() { bomb1 /= bomb2; });
+  TestOp([&]() { bomb1 %= bomb2; });
+  TestOp([&]() { bomb1 &= bomb2; });
+  TestOp([&]() { bomb1 |= bomb2; });
+  TestOp([&]() { bomb1 ^= bomb2; });
+  TestOp([&]() { bomb1 *= bomb2; });
+}
+
+TEST(ThrowingValueTest, ThrowingStreamOps) {
+  ThrowingValue<> bomb;
+
+  TestOp([&]() {
+    std::istringstream stream;
+    stream >> bomb;
+  });
+  TestOp([&]() {
+    std::stringstream stream;
+    stream << bomb;
+  });
+}
+
+// Tests the operator<< of ThrowingValue by forcing ConstructorTracker to emit
+// a nonfatal failure that contains the string representation of the Thrower
+TEST(ThrowingValueTest, StreamOpsOutput) {
+  using ::testing::TypeSpec;
+  exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
+
+  // Test default spec list (kEverythingThrows)
+  EXPECT_NONFATAL_FAILURE(
+      {
+        using Thrower = ThrowingValue<TypeSpec{}>;
+        auto thrower = Thrower(123);
+        thrower.~Thrower();
+      },
+      "ThrowingValue<>(123)");
+
+  // Test with one item in spec list (kNoThrowCopy)
+  EXPECT_NONFATAL_FAILURE(
+      {
+        using Thrower = ThrowingValue<TypeSpec::kNoThrowCopy>;
+        auto thrower = Thrower(234);
+        thrower.~Thrower();
+      },
+      "ThrowingValue<kNoThrowCopy>(234)");
+
+  // Test with multiple items in spec list (kNoThrowMove, kNoThrowNew)
+  EXPECT_NONFATAL_FAILURE(
+      {
+        using Thrower =
+            ThrowingValue<TypeSpec::kNoThrowMove | TypeSpec::kNoThrowNew>;
+        auto thrower = Thrower(345);
+        thrower.~Thrower();
+      },
+      "ThrowingValue<kNoThrowMove | kNoThrowNew>(345)");
+
+  // Test with all items in spec list (kNoThrowCopy, kNoThrowMove, kNoThrowNew)
+  EXPECT_NONFATAL_FAILURE(
+      {
+        using Thrower = ThrowingValue<static_cast<TypeSpec>(-1)>;
+        auto thrower = Thrower(456);
+        thrower.~Thrower();
+      },
+      "ThrowingValue<kNoThrowCopy | kNoThrowMove | kNoThrowNew>(456)");
+}
+
+template <typename F>
+void TestAllocatingOp(const F& f) {
+  ExpectNoThrow(f);
+
+  SetCountdown();
+  EXPECT_THROW(f(), exceptions_internal::TestBadAllocException);
+  UnsetCountdown();
+}
+
+TEST(ThrowingValueTest, ThrowingAllocatingOps) {
+  // make_unique calls unqualified operator new, so these exercise the
+  // ThrowingValue overloads.
+  TestAllocatingOp([]() { return absl::make_unique<ThrowingValue<>>(1); });
+  TestAllocatingOp([]() { return absl::make_unique<ThrowingValue<>[]>(2); });
+}
+
+TEST(ThrowingValueTest, NonThrowingMoveCtor) {
+  ThrowingValue<TypeSpec::kNoThrowMove> nothrow_ctor;
+
+  SetCountdown();
+  ExpectNoThrow([&nothrow_ctor]() {
+    ThrowingValue<TypeSpec::kNoThrowMove> nothrow1 = std::move(nothrow_ctor);
+  });
+  UnsetCountdown();
+}
+
+TEST(ThrowingValueTest, NonThrowingMoveAssign) {
+  ThrowingValue<TypeSpec::kNoThrowMove> nothrow_assign1, nothrow_assign2;
+
+  SetCountdown();
+  ExpectNoThrow([&nothrow_assign1, &nothrow_assign2]() {
+    nothrow_assign1 = std::move(nothrow_assign2);
+  });
+  UnsetCountdown();
+}
+
+TEST(ThrowingValueTest, ThrowingCopyCtor) {
+  ThrowingValue<> tv;
+
+  TestOp([&]() { ThrowingValue<> tv_copy(tv); });
+}
+
+TEST(ThrowingValueTest, ThrowingCopyAssign) {
+  ThrowingValue<> tv1, tv2;
+
+  TestOp([&]() { tv1 = tv2; });
+}
+
+TEST(ThrowingValueTest, NonThrowingCopyCtor) {
+  ThrowingValue<TypeSpec::kNoThrowCopy> nothrow_ctor;
+
+  SetCountdown();
+  ExpectNoThrow([&nothrow_ctor]() {
+    ThrowingValue<TypeSpec::kNoThrowCopy> nothrow1(nothrow_ctor);
+  });
+  UnsetCountdown();
+}
+
+TEST(ThrowingValueTest, NonThrowingCopyAssign) {
+  ThrowingValue<TypeSpec::kNoThrowCopy> nothrow_assign1, nothrow_assign2;
+
+  SetCountdown();
+  ExpectNoThrow([&nothrow_assign1, &nothrow_assign2]() {
+    nothrow_assign1 = nothrow_assign2;
+  });
+  UnsetCountdown();
+}
+
+TEST(ThrowingValueTest, ThrowingSwap) {
+  ThrowingValue<> bomb1, bomb2;
+  TestOp([&]() { std::swap(bomb1, bomb2); });
+}
+
+TEST(ThrowingValueTest, NonThrowingSwap) {
+  ThrowingValue<TypeSpec::kNoThrowMove> bomb1, bomb2;
+  ExpectNoThrow([&]() { std::swap(bomb1, bomb2); });
+}
+
+TEST(ThrowingValueTest, NonThrowingAllocation) {
+  ThrowingValue<TypeSpec::kNoThrowNew>* allocated;
+  ThrowingValue<TypeSpec::kNoThrowNew>* array;
+
+  ExpectNoThrow([&allocated]() {
+    allocated = new ThrowingValue<TypeSpec::kNoThrowNew>(1);
+    delete allocated;
+  });
+  ExpectNoThrow([&array]() {
+    array = new ThrowingValue<TypeSpec::kNoThrowNew>[2];
+    delete[] array;
+  });
+}
+
+TEST(ThrowingValueTest, NonThrowingDelete) {
+  auto* allocated = new ThrowingValue<>(1);
+  auto* array = new ThrowingValue<>[2];
+
+  SetCountdown();
+  ExpectNoThrow([allocated]() { delete allocated; });
+  SetCountdown();
+  ExpectNoThrow([array]() { delete[] array; });
+
+  UnsetCountdown();
+}
+
+TEST(ThrowingValueTest, NonThrowingPlacementDelete) {
+  constexpr int kArrayLen = 2;
+  // We intentionally create extra space to store the tag allocated by placement
+  // new[].
+  constexpr size_t kExtraSpaceLen = sizeof(size_t) * 2;
+
+  alignas(ThrowingValue<>) unsigned char buf[sizeof(ThrowingValue<>)];
+  alignas(ThrowingValue<>) unsigned char
+      array_buf[kExtraSpaceLen + sizeof(ThrowingValue<>[kArrayLen])];
+  auto* placed = new (&buf) ThrowingValue<>(1);
+  auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen];
+  auto* placed_array_end = reinterpret_cast<unsigned char*>(placed_array) +
+                           sizeof(ThrowingValue<>[kArrayLen]);
+  EXPECT_LE(placed_array_end, array_buf + sizeof(array_buf));
+
+  SetCountdown();
+  ExpectNoThrow([placed, &buf]() {
+    placed->~ThrowingValue<>();
+    ThrowingValue<>::operator delete(placed, &buf);
+  });
+
+  SetCountdown();
+  ExpectNoThrow([&, placed_array]() {
+    for (int i = 0; i < kArrayLen; ++i) placed_array[i].~ThrowingValue<>();
+    ThrowingValue<>::operator delete[](placed_array, &array_buf);
+  });
+
+  UnsetCountdown();
+}
+
+TEST(ThrowingValueTest, NonThrowingDestructor) {
+  auto* allocated = new ThrowingValue<>();
+
+  SetCountdown();
+  ExpectNoThrow([allocated]() { delete allocated; });
+  UnsetCountdown();
+}
+
+TEST(ThrowingBoolTest, ThrowingBool) {
+  ThrowingBool t = true;
+
+  // Test that it's contextually convertible to bool
+  if (t) {  // NOLINT(whitespace/empty_if_body)
+  }
+  EXPECT_TRUE(t);
+
+  TestOp([&]() { (void)!t; });
+}
+
+TEST(ThrowingAllocatorTest, MemoryManagement) {
+  // Just exercise the memory management capabilities under LSan to make sure we
+  // don't leak.
+  ThrowingAllocator<int> int_alloc;
+  int* ip = int_alloc.allocate(1);
+  int_alloc.deallocate(ip, 1);
+  int* i_array = int_alloc.allocate(2);
+  int_alloc.deallocate(i_array, 2);
+
+  ThrowingAllocator<ThrowingValue<>> tv_alloc;
+  ThrowingValue<>* ptr = tv_alloc.allocate(1);
+  tv_alloc.deallocate(ptr, 1);
+  ThrowingValue<>* tv_array = tv_alloc.allocate(2);
+  tv_alloc.deallocate(tv_array, 2);
+}
+
+TEST(ThrowingAllocatorTest, CallsGlobalNew) {
+  ThrowingAllocator<ThrowingValue<>, AllocSpec::kNoThrowAllocate> nothrow_alloc;
+  ThrowingValue<>* ptr;
+
+  SetCountdown();
+  // This will only throw if ThrowingValue::new is called.
+  ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); });
+  nothrow_alloc.deallocate(ptr, 1);
+
+  UnsetCountdown();
+}
+
+TEST(ThrowingAllocatorTest, ThrowingConstructors) {
+  ThrowingAllocator<int> int_alloc;
+  int* ip = nullptr;
+
+  SetCountdown();
+  EXPECT_THROW(ip = int_alloc.allocate(1), TestException);
+  ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
+
+  *ip = 1;
+  SetCountdown();
+  EXPECT_THROW(int_alloc.construct(ip, 2), TestException);
+  EXPECT_EQ(*ip, 1);
+  int_alloc.deallocate(ip, 1);
+
+  UnsetCountdown();
+}
+
+TEST(ThrowingAllocatorTest, NonThrowingConstruction) {
+  {
+    ThrowingAllocator<int, AllocSpec::kNoThrowAllocate> int_alloc;
+    int* ip = nullptr;
+
+    SetCountdown();
+    ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
+
+    SetCountdown();
+    ExpectNoThrow([&]() { int_alloc.construct(ip, 2); });
+
+    EXPECT_EQ(*ip, 2);
+    int_alloc.deallocate(ip, 1);
+
+    UnsetCountdown();
+  }
+
+  {
+    ThrowingAllocator<int> int_alloc;
+    int* ip = nullptr;
+    ExpectNoThrow([&]() { ip = int_alloc.allocate(1); });
+    ExpectNoThrow([&]() { int_alloc.construct(ip, 2); });
+    EXPECT_EQ(*ip, 2);
+    int_alloc.deallocate(ip, 1);
+  }
+
+  {
+    ThrowingAllocator<ThrowingValue<>, AllocSpec::kNoThrowAllocate>
+        nothrow_alloc;
+    ThrowingValue<>* ptr;
+
+    SetCountdown();
+    ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); });
+
+    SetCountdown();
+    ExpectNoThrow(
+        [&]() { nothrow_alloc.construct(ptr, 2, testing::nothrow_ctor); });
+
+    EXPECT_EQ(ptr->Get(), 2);
+    nothrow_alloc.destroy(ptr);
+    nothrow_alloc.deallocate(ptr, 1);
+
+    UnsetCountdown();
+  }
+
+  {
+    ThrowingAllocator<int> a;
+
+    SetCountdown();
+    ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = a; });
+
+    SetCountdown();
+    ExpectNoThrow([&]() { ThrowingAllocator<double> a1 = std::move(a); });
+
+    UnsetCountdown();
+  }
+}
+
+TEST(ThrowingAllocatorTest, ThrowingAllocatorConstruction) {
+  ThrowingAllocator<int> a;
+  TestOp([]() { ThrowingAllocator<int> a; });
+  TestOp([&]() { a.select_on_container_copy_construction(); });
+}
+
+TEST(ThrowingAllocatorTest, State) {
+  ThrowingAllocator<int> a1, a2;
+  EXPECT_NE(a1, a2);
+
+  auto a3 = a1;
+  EXPECT_EQ(a3, a1);
+  int* ip = a1.allocate(1);
+  EXPECT_EQ(a3, a1);
+  a3.deallocate(ip, 1);
+  EXPECT_EQ(a3, a1);
+}
+
+TEST(ThrowingAllocatorTest, InVector) {
+  std::vector<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> v;
+  for (int i = 0; i < 20; ++i) v.push_back({});
+  for (int i = 0; i < 20; ++i) v.pop_back();
+}
+
+TEST(ThrowingAllocatorTest, InList) {
+  std::list<ThrowingValue<>, ThrowingAllocator<ThrowingValue<>>> l;
+  for (int i = 0; i < 20; ++i) l.push_back({});
+  for (int i = 0; i < 20; ++i) l.pop_back();
+  for (int i = 0; i < 20; ++i) l.push_front({});
+  for (int i = 0; i < 20; ++i) l.pop_front();
+}
+
+template <typename TesterInstance, typename = void>
+struct NullaryTestValidator : public std::false_type {};
+
+template <typename TesterInstance>
+struct NullaryTestValidator<
+    TesterInstance,
+    absl::void_t<decltype(std::declval<TesterInstance>().Test())>>
+    : public std::true_type {};
+
+template <typename TesterInstance>
+bool HasNullaryTest(const TesterInstance&) {
+  return NullaryTestValidator<TesterInstance>::value;
+}
+
+void DummyOp(void*) {}
+
+template <typename TesterInstance, typename = void>
+struct UnaryTestValidator : public std::false_type {};
+
+template <typename TesterInstance>
+struct UnaryTestValidator<
+    TesterInstance,
+    absl::void_t<decltype(std::declval<TesterInstance>().Test(DummyOp))>>
+    : public std::true_type {};
+
+template <typename TesterInstance>
+bool HasUnaryTest(const TesterInstance&) {
+  return UnaryTestValidator<TesterInstance>::value;
+}
+
+TEST(ExceptionSafetyTesterTest, IncompleteTypesAreNotTestable) {
+  using T = exceptions_internal::UninitializedT;
+  auto op = [](T* t) {};
+  auto inv = [](T*) { return testing::AssertionSuccess(); };
+  auto fac = []() { return absl::make_unique<T>(); };
+
+  // Test that providing operation and inveriants still does not allow for the
+  // the invocation of .Test() and .Test(op) because it lacks a factory
+  auto without_fac =
+      testing::MakeExceptionSafetyTester().WithOperation(op).WithContracts(
+          inv, testing::strong_guarantee);
+  EXPECT_FALSE(HasNullaryTest(without_fac));
+  EXPECT_FALSE(HasUnaryTest(without_fac));
+
+  // Test that providing contracts and factory allows the invocation of
+  // .Test(op) but does not allow for .Test() because it lacks an operation
+  auto without_op = testing::MakeExceptionSafetyTester()
+                        .WithContracts(inv, testing::strong_guarantee)
+                        .WithFactory(fac);
+  EXPECT_FALSE(HasNullaryTest(without_op));
+  EXPECT_TRUE(HasUnaryTest(without_op));
+
+  // Test that providing operation and factory still does not allow for the
+  // the invocation of .Test() and .Test(op) because it lacks contracts
+  auto without_inv =
+      testing::MakeExceptionSafetyTester().WithOperation(op).WithFactory(fac);
+  EXPECT_FALSE(HasNullaryTest(without_inv));
+  EXPECT_FALSE(HasUnaryTest(without_inv));
+}
+
+struct ExampleStruct {};
+
+std::unique_ptr<ExampleStruct> ExampleFunctionFactory() {
+  return absl::make_unique<ExampleStruct>();
+}
+
+void ExampleFunctionOperation(ExampleStruct*) {}
+
+testing::AssertionResult ExampleFunctionContract(ExampleStruct*) {
+  return testing::AssertionSuccess();
+}
+
+struct {
+  std::unique_ptr<ExampleStruct> operator()() const {
+    return ExampleFunctionFactory();
+  }
+} example_struct_factory;
+
+struct {
+  void operator()(ExampleStruct*) const {}
+} example_struct_operation;
+
+struct {
+  testing::AssertionResult operator()(ExampleStruct* example_struct) const {
+    return ExampleFunctionContract(example_struct);
+  }
+} example_struct_contract;
+
+auto example_lambda_factory = []() { return ExampleFunctionFactory(); };
+
+auto example_lambda_operation = [](ExampleStruct*) {};
+
+auto example_lambda_contract = [](ExampleStruct* example_struct) {
+  return ExampleFunctionContract(example_struct);
+};
+
+// Testing that function references, pointers, structs with operator() and
+// lambdas can all be used with ExceptionSafetyTester
+TEST(ExceptionSafetyTesterTest, MixedFunctionTypes) {
+  // function reference
+  EXPECT_TRUE(testing::MakeExceptionSafetyTester()
+                  .WithFactory(ExampleFunctionFactory)
+                  .WithOperation(ExampleFunctionOperation)
+                  .WithContracts(ExampleFunctionContract)
+                  .Test());
+
+  // function pointer
+  EXPECT_TRUE(testing::MakeExceptionSafetyTester()
+                  .WithFactory(&ExampleFunctionFactory)
+                  .WithOperation(&ExampleFunctionOperation)
+                  .WithContracts(&ExampleFunctionContract)
+                  .Test());
+
+  // struct
+  EXPECT_TRUE(testing::MakeExceptionSafetyTester()
+                  .WithFactory(example_struct_factory)
+                  .WithOperation(example_struct_operation)
+                  .WithContracts(example_struct_contract)
+                  .Test());
+
+  // lambda
+  EXPECT_TRUE(testing::MakeExceptionSafetyTester()
+                  .WithFactory(example_lambda_factory)
+                  .WithOperation(example_lambda_operation)
+                  .WithContracts(example_lambda_contract)
+                  .Test());
+}
+
+struct NonNegative {
+  bool operator==(const NonNegative& other) const { return i == other.i; }
+  int i;
+};
+
+testing::AssertionResult CheckNonNegativeInvariants(NonNegative* g) {
+  if (g->i >= 0) {
+    return testing::AssertionSuccess();
+  }
+  return testing::AssertionFailure()
+         << "i should be non-negative but is " << g->i;
+}
+
+struct {
+  template <typename T>
+  void operator()(T* t) const {
+    (*t)();
+  }
+} invoker;
+
+auto tester =
+    testing::MakeExceptionSafetyTester().WithOperation(invoker).WithContracts(
+        CheckNonNegativeInvariants);
+auto strong_tester = tester.WithContracts(testing::strong_guarantee);
+
+struct FailsBasicGuarantee : public NonNegative {
+  void operator()() {
+    --i;
+    ThrowingValue<> bomb;
+    ++i;
+  }
+};
+
+TEST(ExceptionCheckTest, BasicGuaranteeFailure) {
+  EXPECT_FALSE(tester.WithInitialValue(FailsBasicGuarantee{}).Test());
+}
+
+struct FollowsBasicGuarantee : public NonNegative {
+  void operator()() {
+    ++i;
+    ThrowingValue<> bomb;
+  }
+};
+
+TEST(ExceptionCheckTest, BasicGuarantee) {
+  EXPECT_TRUE(tester.WithInitialValue(FollowsBasicGuarantee{}).Test());
+}
+
+TEST(ExceptionCheckTest, StrongGuaranteeFailure) {
+  EXPECT_FALSE(strong_tester.WithInitialValue(FailsBasicGuarantee{}).Test());
+  EXPECT_FALSE(strong_tester.WithInitialValue(FollowsBasicGuarantee{}).Test());
+}
+
+struct BasicGuaranteeWithExtraContracts : public NonNegative {
+  // After operator(), i is incremented.  If operator() throws, i is set to 9999
+  void operator()() {
+    int old_i = i;
+    i = kExceptionSentinel;
+    ThrowingValue<> bomb;
+    i = ++old_i;
+  }
+
+  static constexpr int kExceptionSentinel = 9999;
+};
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel;
+#endif
+
+TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) {
+  auto tester_with_val =
+      tester.WithInitialValue(BasicGuaranteeWithExtraContracts{});
+  EXPECT_TRUE(tester_with_val.Test());
+  EXPECT_TRUE(
+      tester_with_val
+          .WithContracts([](BasicGuaranteeWithExtraContracts* o) {
+            if (o->i == BasicGuaranteeWithExtraContracts::kExceptionSentinel) {
+              return testing::AssertionSuccess();
+            }
+            return testing::AssertionFailure()
+                   << "i should be "
+                   << BasicGuaranteeWithExtraContracts::kExceptionSentinel
+                   << ", but is " << o->i;
+          })
+          .Test());
+}
+
+struct FollowsStrongGuarantee : public NonNegative {
+  void operator()() { ThrowingValue<> bomb; }
+};
+
+TEST(ExceptionCheckTest, StrongGuarantee) {
+  EXPECT_TRUE(tester.WithInitialValue(FollowsStrongGuarantee{}).Test());
+  EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}).Test());
+}
+
+struct HasReset : public NonNegative {
+  void operator()() {
+    i = -1;
+    ThrowingValue<> bomb;
+    i = 1;
+  }
+
+  void reset() { i = 0; }
+};
+
+testing::AssertionResult CheckHasResetContracts(HasReset* h) {
+  h->reset();
+  return testing::AssertionResult(h->i == 0);
+}
+
+TEST(ExceptionCheckTest, ModifyingChecker) {
+  auto set_to_1000 = [](FollowsBasicGuarantee* g) {
+    g->i = 1000;
+    return testing::AssertionSuccess();
+  };
+  auto is_1000 = [](FollowsBasicGuarantee* g) {
+    return testing::AssertionResult(g->i == 1000);
+  };
+  auto increment = [](FollowsStrongGuarantee* g) {
+    ++g->i;
+    return testing::AssertionSuccess();
+  };
+
+  EXPECT_FALSE(tester.WithInitialValue(FollowsBasicGuarantee{})
+                   .WithContracts(set_to_1000, is_1000)
+                   .Test());
+  EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{})
+                  .WithContracts(increment)
+                  .Test());
+  EXPECT_TRUE(testing::MakeExceptionSafetyTester()
+                  .WithInitialValue(HasReset{})
+                  .WithContracts(CheckHasResetContracts)
+                  .Test(invoker));
+}
+
+TEST(ExceptionSafetyTesterTest, ResetsCountdown) {
+  auto test =
+      testing::MakeExceptionSafetyTester()
+          .WithInitialValue(ThrowingValue<>())
+          .WithContracts([](ThrowingValue<>*) { return AssertionSuccess(); })
+          .WithOperation([](ThrowingValue<>*) {});
+  ASSERT_TRUE(test.Test());
+  // If the countdown isn't reset because there were no exceptions thrown, then
+  // this will fail with a termination from an unhandled exception
+  EXPECT_TRUE(test.Test());
+}
+
+struct NonCopyable : public NonNegative {
+  NonCopyable(const NonCopyable&) = delete;
+  NonCopyable() : NonNegative{0} {}
+
+  void operator()() { ThrowingValue<> bomb; }
+};
+
+TEST(ExceptionCheckTest, NonCopyable) {
+  auto factory = []() { return absl::make_unique<NonCopyable>(); };
+  EXPECT_TRUE(tester.WithFactory(factory).Test());
+  EXPECT_TRUE(strong_tester.WithFactory(factory).Test());
+}
+
+struct NonEqualityComparable : public NonNegative {
+  void operator()() { ThrowingValue<> bomb; }
+
+  void ModifyOnThrow() {
+    ++i;
+    ThrowingValue<> bomb;
+    static_cast<void>(bomb);
+    --i;
+  }
+};
+
+TEST(ExceptionCheckTest, NonEqualityComparable) {
+  auto nec_is_strong = [](NonEqualityComparable* nec) {
+    return testing::AssertionResult(nec->i == NonEqualityComparable().i);
+  };
+  auto strong_nec_tester = tester.WithInitialValue(NonEqualityComparable{})
+                               .WithContracts(nec_is_strong);
+
+  EXPECT_TRUE(strong_nec_tester.Test());
+  EXPECT_FALSE(strong_nec_tester.Test(
+      [](NonEqualityComparable* n) { n->ModifyOnThrow(); }));
+}
+
+template <typename T>
+struct ExhaustivenessTester {
+  void operator()() {
+    successes |= 1;
+    T b1;
+    static_cast<void>(b1);
+    successes |= (1 << 1);
+    T b2;
+    static_cast<void>(b2);
+    successes |= (1 << 2);
+    T b3;
+    static_cast<void>(b3);
+    successes |= (1 << 3);
+  }
+
+  bool operator==(const ExhaustivenessTester<ThrowingValue<>>&) const {
+    return true;
+  }
+
+  static unsigned char successes;
+};
+
+struct {
+  template <typename T>
+  testing::AssertionResult operator()(ExhaustivenessTester<T>*) const {
+    return testing::AssertionSuccess();
+  }
+} CheckExhaustivenessTesterContracts;
+
+template <typename T>
+unsigned char ExhaustivenessTester<T>::successes = 0;
+
+TEST(ExceptionCheckTest, Exhaustiveness) {
+  auto exhaust_tester = testing::MakeExceptionSafetyTester()
+                            .WithContracts(CheckExhaustivenessTesterContracts)
+                            .WithOperation(invoker);
+
+  EXPECT_TRUE(
+      exhaust_tester.WithInitialValue(ExhaustivenessTester<int>{}).Test());
+  EXPECT_EQ(ExhaustivenessTester<int>::successes, 0xF);
+
+  EXPECT_TRUE(
+      exhaust_tester.WithInitialValue(ExhaustivenessTester<ThrowingValue<>>{})
+          .WithContracts(testing::strong_guarantee)
+          .Test());
+  EXPECT_EQ(ExhaustivenessTester<ThrowingValue<>>::successes, 0xF);
+}
+
+struct LeaksIfCtorThrows : private exceptions_internal::TrackedObject {
+  LeaksIfCtorThrows() : TrackedObject(ABSL_PRETTY_FUNCTION) {
+    ++counter;
+    ThrowingValue<> v;
+    static_cast<void>(v);
+    --counter;
+  }
+  LeaksIfCtorThrows(const LeaksIfCtorThrows&) noexcept
+      : TrackedObject(ABSL_PRETTY_FUNCTION) {}
+  static int counter;
+};
+int LeaksIfCtorThrows::counter = 0;
+
+TEST(ExceptionCheckTest, TestLeakyCtor) {
+  testing::TestThrowingCtor<LeaksIfCtorThrows>();
+  EXPECT_EQ(LeaksIfCtorThrows::counter, 1);
+  LeaksIfCtorThrows::counter = 0;
+}
+
+struct Tracked : private exceptions_internal::TrackedObject {
+  Tracked() : TrackedObject(ABSL_PRETTY_FUNCTION) {}
+};
+
+TEST(ConstructorTrackerTest, CreatedBefore) {
+  Tracked a, b, c;
+  exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
+}
+
+TEST(ConstructorTrackerTest, CreatedAfter) {
+  exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
+  Tracked a, b, c;
+}
+
+TEST(ConstructorTrackerTest, NotDestroyedAfter) {
+  alignas(Tracked) unsigned char storage[sizeof(Tracked)];
+  EXPECT_NONFATAL_FAILURE(
+      {
+        exceptions_internal::ConstructorTracker ct(
+            exceptions_internal::countdown);
+        new (&storage) Tracked();
+      },
+      "not destroyed");
+}
+
+TEST(ConstructorTrackerTest, DestroyedTwice) {
+  exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
+  EXPECT_NONFATAL_FAILURE(
+      {
+        Tracked t;
+        t.~Tracked();
+      },
+      "re-destroyed");
+}
+
+TEST(ConstructorTrackerTest, ConstructedTwice) {
+  exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown);
+  alignas(Tracked) unsigned char storage[sizeof(Tracked)];
+  EXPECT_NONFATAL_FAILURE(
+      {
+        new (&storage) Tracked();
+        new (&storage) Tracked();
+        reinterpret_cast<Tracked*>(&storage)->~Tracked();
+      },
+      "re-constructed");
+}
+
+TEST(ThrowingValueTraitsTest, RelationalOperators) {
+  ThrowingValue<> a, b;
+  EXPECT_TRUE((std::is_convertible<decltype(a == b), bool>::value));
+  EXPECT_TRUE((std::is_convertible<decltype(a != b), bool>::value));
+  EXPECT_TRUE((std::is_convertible<decltype(a < b), bool>::value));
+  EXPECT_TRUE((std::is_convertible<decltype(a <= b), bool>::value));
+  EXPECT_TRUE((std::is_convertible<decltype(a > b), bool>::value));
+  EXPECT_TRUE((std::is_convertible<decltype(a >= b), bool>::value));
+}
+
+TEST(ThrowingAllocatorTraitsTest, Assignablility) {
+  EXPECT_TRUE(absl::is_move_assignable<ThrowingAllocator<int>>::value);
+  EXPECT_TRUE(absl::is_copy_assignable<ThrowingAllocator<int>>::value);
+  EXPECT_TRUE(std::is_nothrow_move_assignable<ThrowingAllocator<int>>::value);
+  EXPECT_TRUE(std::is_nothrow_copy_assignable<ThrowingAllocator<int>>::value);
+}
+
+}  // namespace
+
+}  // namespace testing
+
+#endif  // ABSL_HAVE_EXCEPTIONS

+ 64 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/inline_variable_test.cc

@@ -0,0 +1,64 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <type_traits>
+
+#include "absl/base/internal/inline_variable.h"
+#include "absl/base/internal/inline_variable_testing.h"
+
+#include "gtest/gtest.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace inline_variable_testing_internal {
+namespace {
+
+TEST(InlineVariableTest, Constexpr) {
+  static_assert(inline_variable_foo.value == 5, "");
+  static_assert(other_inline_variable_foo.value == 5, "");
+  static_assert(inline_variable_int == 5, "");
+  static_assert(other_inline_variable_int == 5, "");
+}
+
+TEST(InlineVariableTest, DefaultConstructedIdentityEquality) {
+  EXPECT_EQ(get_foo_a().value, 5);
+  EXPECT_EQ(get_foo_b().value, 5);
+  EXPECT_EQ(&get_foo_a(), &get_foo_b());
+}
+
+TEST(InlineVariableTest, DefaultConstructedIdentityInequality) {
+  EXPECT_NE(&inline_variable_foo, &other_inline_variable_foo);
+}
+
+TEST(InlineVariableTest, InitializedIdentityEquality) {
+  EXPECT_EQ(get_int_a(), 5);
+  EXPECT_EQ(get_int_b(), 5);
+  EXPECT_EQ(&get_int_a(), &get_int_b());
+}
+
+TEST(InlineVariableTest, InitializedIdentityInequality) {
+  EXPECT_NE(&inline_variable_int, &other_inline_variable_int);
+}
+
+TEST(InlineVariableTest, FunPtrType) {
+  static_assert(
+      std::is_same<void(*)(),
+                   std::decay<decltype(inline_variable_fun_ptr)>::type>::value,
+      "");
+}
+
+}  // namespace
+}  // namespace inline_variable_testing_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 27 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/inline_variable_test_a.cc

@@ -0,0 +1,27 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/inline_variable_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace inline_variable_testing_internal {
+
+const Foo& get_foo_a() { return inline_variable_foo; }
+
+const int& get_int_a() { return inline_variable_int; }
+
+}  // namespace inline_variable_testing_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 27 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/inline_variable_test_b.cc

@@ -0,0 +1,27 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/inline_variable_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace inline_variable_testing_internal {
+
+const Foo& get_foo_b() { return inline_variable_foo; }
+
+const int& get_int_b() { return inline_variable_int; }
+
+}  // namespace inline_variable_testing_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 200 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/atomic_hook.h

@@ -0,0 +1,200 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_
+
+#include <atomic>
+#include <cassert>
+#include <cstdint>
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0
+#else
+#define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1
+#endif
+
+#if defined(_MSC_VER)
+#define ABSL_HAVE_WORKING_ATOMIC_POINTER 0
+#else
+#define ABSL_HAVE_WORKING_ATOMIC_POINTER 1
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename T>
+class AtomicHook;
+
+// To workaround AtomicHook not being constant-initializable on some platforms,
+// prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES`
+// instead of `ABSL_CONST_INIT`.
+#if ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_CONST_INIT
+#else
+#define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+#endif
+
+// `AtomicHook` is a helper class, templatized on a raw function pointer type,
+// for implementing Abseil customization hooks.  It is a callable object that
+// dispatches to the registered hook.  Objects of type `AtomicHook` must have
+// static or thread storage duration.
+//
+// A default constructed object performs a no-op (and returns a default
+// constructed object) if no hook has been registered.
+//
+// Hooks can be pre-registered via constant initialization, for example:
+//
+// ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook<void(*)()>
+//     my_hook(DefaultAction);
+//
+// and then changed at runtime via a call to `Store()`.
+//
+// Reads and writes guarantee memory_order_acquire/memory_order_release
+// semantics.
+template <typename ReturnType, typename... Args>
+class AtomicHook<ReturnType (*)(Args...)> {
+ public:
+  using FnPtr = ReturnType (*)(Args...);
+
+  // Constructs an object that by default performs a no-op (and
+  // returns a default constructed object) when no hook as been registered.
+  constexpr AtomicHook() : AtomicHook(DummyFunction) {}
+
+  // Constructs an object that by default dispatches to/returns the
+  // pre-registered default_fn when no hook has been registered at runtime.
+#if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+  explicit constexpr AtomicHook(FnPtr default_fn)
+      : hook_(default_fn), default_fn_(default_fn) {}
+#elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+  explicit constexpr AtomicHook(FnPtr default_fn)
+      : hook_(kUninitialized), default_fn_(default_fn) {}
+#else
+  // As of January 2020, on all known versions of MSVC this constructor runs in
+  // the global constructor sequence.  If `Store()` is called by a dynamic
+  // initializer, we want to preserve the value, even if this constructor runs
+  // after the call to `Store()`.  If not, `hook_` will be
+  // zero-initialized by the linker and we have no need to set it.
+  // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
+  explicit constexpr AtomicHook(FnPtr default_fn)
+      : /* hook_(deliberately omitted), */ default_fn_(default_fn) {
+    static_assert(kUninitialized == 0, "here we rely on zero-initialization");
+  }
+#endif
+
+  // Stores the provided function pointer as the value for this hook.
+  //
+  // This is intended to be called once.  Multiple calls are legal only if the
+  // same function pointer is provided for each call.  The store is implemented
+  // as a memory_order_release operation, and read accesses are implemented as
+  // memory_order_acquire.
+  void Store(FnPtr fn) {
+    bool success = DoStore(fn);
+    static_cast<void>(success);
+    assert(success);
+  }
+
+  // Invokes the registered callback.  If no callback has yet been registered, a
+  // default-constructed object of the appropriate type is returned instead.
+  template <typename... CallArgs>
+  ReturnType operator()(CallArgs&&... args) const {
+    return DoLoad()(std::forward<CallArgs>(args)...);
+  }
+
+  // Returns the registered callback, or nullptr if none has been registered.
+  // Useful if client code needs to conditionalize behavior based on whether a
+  // callback was registered.
+  //
+  // Note that atomic_hook.Load()() and atomic_hook() have different semantics:
+  // operator()() will perform a no-op if no callback was registered, while
+  // Load()() will dereference a null function pointer.  Prefer operator()() to
+  // Load()() unless you must conditionalize behavior on whether a hook was
+  // registered.
+  FnPtr Load() const {
+    FnPtr ptr = DoLoad();
+    return (ptr == DummyFunction) ? nullptr : ptr;
+  }
+
+ private:
+  static ReturnType DummyFunction(Args...) {
+    return ReturnType();
+  }
+
+  // Current versions of MSVC (as of September 2017) have a broken
+  // implementation of std::atomic<T*>:  Its constructor attempts to do the
+  // equivalent of a reinterpret_cast in a constexpr context, which is not
+  // allowed.
+  //
+  // This causes an issue when building with LLVM under Windows.  To avoid this,
+  // we use a less-efficient, intptr_t-based implementation on Windows.
+#if ABSL_HAVE_WORKING_ATOMIC_POINTER
+  // Return the stored value, or DummyFunction if no value has been stored.
+  FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); }
+
+  // Store the given value.  Returns false if a different value was already
+  // stored to this object.
+  bool DoStore(FnPtr fn) {
+    assert(fn);
+    FnPtr expected = default_fn_;
+    const bool store_succeeded = hook_.compare_exchange_strong(
+        expected, fn, std::memory_order_acq_rel, std::memory_order_acquire);
+    const bool same_value_already_stored = (expected == fn);
+    return store_succeeded || same_value_already_stored;
+  }
+
+  std::atomic<FnPtr> hook_;
+#else  // !ABSL_HAVE_WORKING_ATOMIC_POINTER
+  // Use a sentinel value unlikely to be the address of an actual function.
+  static constexpr intptr_t kUninitialized = 0;
+
+  static_assert(sizeof(intptr_t) >= sizeof(FnPtr),
+                "intptr_t can't contain a function pointer");
+
+  FnPtr DoLoad() const {
+    const intptr_t value = hook_.load(std::memory_order_acquire);
+    if (value == kUninitialized) {
+      return default_fn_;
+    }
+    return reinterpret_cast<FnPtr>(value);
+  }
+
+  bool DoStore(FnPtr fn) {
+    assert(fn);
+    const auto value = reinterpret_cast<intptr_t>(fn);
+    intptr_t expected = kUninitialized;
+    const bool store_succeeded = hook_.compare_exchange_strong(
+        expected, value, std::memory_order_acq_rel, std::memory_order_acquire);
+    const bool same_value_already_stored = (expected == value);
+    return store_succeeded || same_value_already_stored;
+  }
+
+  std::atomic<intptr_t> hook_;
+#endif
+
+  const FnPtr default_fn_;
+};
+
+#undef ABSL_HAVE_WORKING_ATOMIC_POINTER
+#undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_

+ 97 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/atomic_hook_test.cc

@@ -0,0 +1,97 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/atomic_hook.h"
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/internal/atomic_hook_test_helper.h"
+
+namespace {
+
+using ::testing::Eq;
+
+int value = 0;
+void TestHook(int x) { value = x; }
+
+TEST(AtomicHookTest, NoDefaultFunction) {
+  ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
+      void (*)(int)>
+      hook;
+  value = 0;
+
+  // Test the default DummyFunction.
+  EXPECT_TRUE(hook.Load() == nullptr);
+  EXPECT_EQ(value, 0);
+  hook(1);
+  EXPECT_EQ(value, 0);
+
+  // Test a stored hook.
+  hook.Store(TestHook);
+  EXPECT_TRUE(hook.Load() == TestHook);
+  EXPECT_EQ(value, 0);
+  hook(1);
+  EXPECT_EQ(value, 1);
+
+  // Calling Store() with the same hook should not crash.
+  hook.Store(TestHook);
+  EXPECT_TRUE(hook.Load() == TestHook);
+  EXPECT_EQ(value, 1);
+  hook(2);
+  EXPECT_EQ(value, 2);
+}
+
+TEST(AtomicHookTest, WithDefaultFunction) {
+  // Set the default value to TestHook at compile-time.
+  ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook<
+      void (*)(int)>
+      hook(TestHook);
+  value = 0;
+
+  // Test the default value is TestHook.
+  EXPECT_TRUE(hook.Load() == TestHook);
+  EXPECT_EQ(value, 0);
+  hook(1);
+  EXPECT_EQ(value, 1);
+
+  // Calling Store() with the same hook should not crash.
+  hook.Store(TestHook);
+  EXPECT_TRUE(hook.Load() == TestHook);
+  EXPECT_EQ(value, 1);
+  hook(2);
+  EXPECT_EQ(value, 2);
+}
+
+ABSL_CONST_INIT int override_func_calls = 0;
+void OverrideFunc() { override_func_calls++; }
+static struct OverrideInstaller {
+  OverrideInstaller() { absl::atomic_hook_internal::func.Store(OverrideFunc); }
+} override_installer;
+
+TEST(AtomicHookTest, DynamicInitFromAnotherTU) {
+  // MSVC 14.2 doesn't do constexpr static init correctly; in particular it
+  // tends to sequence static init (i.e. defaults) of `AtomicHook` objects
+  // after their dynamic init (i.e. overrides), overwriting whatever value was
+  // written during dynamic init.  This regression test validates the fix.
+  // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html
+  EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0));
+  EXPECT_THAT(override_func_calls, Eq(0));
+  absl::atomic_hook_internal::func();
+  EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0));
+  EXPECT_THAT(override_func_calls, Eq(1));
+  EXPECT_THAT(absl::atomic_hook_internal::func.Load(), Eq(OverrideFunc));
+}
+
+}  // namespace

+ 32 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/atomic_hook_test_helper.cc

@@ -0,0 +1,32 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/atomic_hook_test_helper.h"
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/atomic_hook.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace atomic_hook_internal {
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook<VoidF>
+    func(DefaultFunc);
+ABSL_CONST_INIT int default_func_calls = 0;
+void DefaultFunc() { default_func_calls++; }
+void RegisterFunc(VoidF f) { func.Store(f); }
+
+}  // namespace atomic_hook_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 34 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/atomic_hook_test_helper.h

@@ -0,0 +1,34 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_
+#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_
+
+#include "absl/base/internal/atomic_hook.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace atomic_hook_internal {
+
+using VoidF = void (*)();
+extern absl::base_internal::AtomicHook<VoidF> func;
+extern int default_func_calls;
+void DefaultFunc();
+void RegisterFunc(VoidF func);
+
+}  // namespace atomic_hook_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_

+ 22 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/cmake_thread_test.cc

@@ -0,0 +1,22 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <iostream>
+#include "absl/base/internal/thread_identity.h"
+
+int main() {
+  auto* tid = absl::base_internal::CurrentThreadIdentityIfPresent();
+  // Make sure the above call can't be optimized out
+  std::cout << (void*)tid << std::endl;
+}

+ 77 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/cycleclock.cc

@@ -0,0 +1,77 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The implementation of CycleClock::Frequency.
+//
+// NOTE: only i386 and x86_64 have been well tested.
+// PPC, sparc, alpha, and ia64 are based on
+//    http://peter.kuscsik.com/wordpress/?p=14
+// with modifications by m3b.  See also
+//    https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
+
+#include "absl/base/internal/cycleclock.h"
+
+#include <atomic>
+#include <chrono>  // NOLINT(build/c++11)
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/unscaledcycleclock.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+constexpr int32_t CycleClock::kShift;
+constexpr double CycleClock::kFrequencyScale;
+#endif
+
+ABSL_CONST_INIT std::atomic<CycleClockSourceFunc>
+    CycleClock::cycle_clock_source_{nullptr};
+
+void CycleClockSource::Register(CycleClockSourceFunc source) {
+  // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource.
+  CycleClock::cycle_clock_source_.store(source, std::memory_order_release);
+}
+
+#ifdef _WIN32
+int64_t CycleClock::Now() {
+  auto fn = LoadCycleClockSource();
+  if (fn == nullptr) {
+    return base_internal::UnscaledCycleClock::Now() >> kShift;
+  }
+  return fn() >> kShift;
+}
+#endif
+
+#else
+
+int64_t CycleClock::Now() {
+  return std::chrono::duration_cast<std::chrono::nanoseconds>(
+             std::chrono::steady_clock::now().time_since_epoch())
+      .count();
+}
+
+double CycleClock::Frequency() {
+  return 1e9;
+}
+
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 144 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/cycleclock.h

@@ -0,0 +1,144 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// -----------------------------------------------------------------------------
+// File: cycleclock.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `CycleClock`, which yields the value and frequency
+// of a cycle counter that increments at a rate that is approximately constant.
+//
+// NOTE:
+//
+// The cycle counter frequency is not necessarily related to the core clock
+// frequency and should not be treated as such. That is, `CycleClock` cycles are
+// not necessarily "CPU cycles" and code should not rely on that behavior, even
+// if experimentally observed.
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor. Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately.   If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/cycleclock_config.h"
+#include "absl/base/internal/unscaledcycleclock.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+using CycleClockSourceFunc = int64_t (*)();
+
+// -----------------------------------------------------------------------------
+// CycleClock
+// -----------------------------------------------------------------------------
+class CycleClock {
+ public:
+  // CycleClock::Now()
+  //
+  // Returns the value of a cycle counter that counts at a rate that is
+  // approximately constant.
+  static int64_t Now();
+
+  // CycleClock::Frequency()
+  //
+  // Returns the amount by which `CycleClock::Now()` increases per second. Note
+  // that this value may not necessarily match the core CPU clock frequency.
+  static double Frequency();
+
+ private:
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+  static CycleClockSourceFunc LoadCycleClockSource();
+
+  static constexpr int32_t kShift = kCycleClockShift;
+  static constexpr double kFrequencyScale = kCycleClockFrequencyScale;
+
+  ABSL_CONST_INIT static std::atomic<CycleClockSourceFunc> cycle_clock_source_;
+#endif  //  ABSL_USE_UNSCALED_CYCLECLOC
+
+  CycleClock() = delete;  // no instances
+  CycleClock(const CycleClock&) = delete;
+  CycleClock& operator=(const CycleClock&) = delete;
+
+  friend class CycleClockSource;
+};
+
+class CycleClockSource {
+ private:
+  // CycleClockSource::Register()
+  //
+  // Register a function that provides an alternate source for the unscaled CPU
+  // cycle count value. The source function must be async signal safe, must not
+  // call CycleClock::Now(), and must have a frequency that matches that of the
+  // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use
+  // the default source.
+  static void Register(CycleClockSourceFunc source);
+};
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() {
+#if !defined(__x86_64__)
+  // Optimize for the common case (no callback) by first doing a relaxed load;
+  // this is significantly faster on non-x86 platforms.
+  if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) {
+    return nullptr;
+  }
+#endif  // !defined(__x86_64__)
+
+  // This corresponds to the store(std::memory_order_release) in
+  // CycleClockSource::Register, and makes sure that any updates made prior to
+  // registering the callback are visible to this thread before the callback
+  // is invoked.
+  return cycle_clock_source_.load(std::memory_order_acquire);
+}
+
+// Accessing globals in inlined code in Window DLLs is problematic.
+#ifndef _WIN32
+inline int64_t CycleClock::Now() {
+  auto fn = LoadCycleClockSource();
+  if (fn == nullptr) {
+    return base_internal::UnscaledCycleClock::Now() >> kShift;
+  }
+  return fn() >> kShift;
+}
+#endif
+
+inline double CycleClock::Frequency() {
+  return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+#endif  // ABSL_USE_UNSCALED_CYCLECLOCK
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_CYCLECLOCK_H_

+ 55 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/cycleclock_config.h

@@ -0,0 +1,55 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/inline_variable.h"
+#include "absl/base/internal/unscaledcycleclock_config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+// Not debug mode and the UnscaledCycleClock frequency is the CPU
+// frequency.  Scale the CycleClock to prevent overflow if someone
+// tries to represent the time as cycles since the Unix epoch.
+ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 1);
+#else
+// Not debug mode and the UnscaledCycleClock isn't operating at the
+// raw CPU frequency. There is no need to do any scaling, so don't
+// needlessly sacrifice precision.
+ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 0);
+#endif
+#else   // NDEBUG
+// In debug mode use a different shift to discourage depending on a
+// particular shift value.
+ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 2);
+#endif  // NDEBUG
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(double, kCycleClockFrequencyScale,
+                               1.0 / (1 << kCycleClockShift));
+#endif  //  ABSL_USE_UNSCALED_CYCLECLOC
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_

+ 170 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/direct_mmap.h

@@ -0,0 +1,170 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Functions for directly invoking mmap() via syscall, avoiding the case where
+// mmap() has been locally overridden.
+
+#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_MMAP
+
+#include <sys/mman.h>
+
+#ifdef __linux__
+
+#include <sys/types.h>
+#ifdef __BIONIC__
+#include <sys/syscall.h>
+#else
+#include <syscall.h>
+#endif
+
+#include <linux/unistd.h>
+#include <unistd.h>
+#include <cerrno>
+#include <cstdarg>
+#include <cstdint>
+
+#ifdef __mips__
+// Include definitions of the ABI currently in use.
+#if defined(__BIONIC__) || !defined(__GLIBC__)
+// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
+// definitions we need.
+#include <asm/sgidefs.h>
+#else
+#include <sgidefs.h>
+#endif  // __BIONIC__ || !__GLIBC__
+#endif  // __mips__
+
+// SYS_mmap and SYS_munmap are not defined in Android.
+#ifdef __BIONIC__
+extern "C" void* __mmap2(void*, size_t, int, int, int, size_t);
+#if defined(__NR_mmap) && !defined(SYS_mmap)
+#define SYS_mmap __NR_mmap
+#endif
+#ifndef SYS_munmap
+#define SYS_munmap __NR_munmap
+#endif
+#endif  // __BIONIC__
+
+#if defined(__NR_mmap2) && !defined(SYS_mmap2)
+#define SYS_mmap2 __NR_mmap2
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Platform specific logic extracted from
+// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+                        off_t offset) noexcept {
+#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
+    defined(__m68k__) || defined(__sh__) ||                                  \
+    (defined(__hppa__) && !defined(__LP64__)) ||                             \
+    (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) ||                   \
+    (defined(__PPC__) && !defined(__PPC64__)) ||                             \
+    (defined(__riscv) && __riscv_xlen == 32) ||                              \
+    (defined(__s390__) && !defined(__s390x__)) ||                            \
+    (defined(__sparc__) && !defined(__arch64__))
+  // On these architectures, implement mmap with mmap2.
+  static int pagesize = 0;
+  if (pagesize == 0) {
+#if defined(__wasm__) || defined(__asmjs__)
+    pagesize = getpagesize();
+#else
+    pagesize = sysconf(_SC_PAGESIZE);
+#endif
+  }
+  if (offset < 0 || offset % pagesize != 0) {
+    errno = EINVAL;
+    return MAP_FAILED;
+  }
+#ifdef __BIONIC__
+  // SYS_mmap2 has problems on Android API level <= 16.
+  // Workaround by invoking __mmap2() instead.
+  return __mmap2(start, length, prot, flags, fd,
+                 static_cast<size_t>(offset / pagesize));
+#else
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap2, start, length, prot, flags, fd,
+              static_cast<unsigned long>(offset / pagesize)));  // NOLINT
+#endif
+#elif defined(__s390x__)
+  // On s390x, mmap() arguments are passed in memory.
+  unsigned long buf[6] = {reinterpret_cast<unsigned long>(start),  // NOLINT
+                          static_cast<unsigned long>(length),      // NOLINT
+                          static_cast<unsigned long>(prot),        // NOLINT
+                          static_cast<unsigned long>(flags),       // NOLINT
+                          static_cast<unsigned long>(fd),          // NOLINT
+                          static_cast<unsigned long>(offset)};     // NOLINT
+  return reinterpret_cast<void*>(syscall(SYS_mmap, buf));
+#elif defined(__x86_64__)
+// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
+// We need to explicitly cast to an unsigned 64 bit type to avoid implicit
+// sign extension.  We can't cast pointers directly because those are
+// 32 bits, and gcc will dump ugly warnings about casting from a pointer
+// to an integer of a different size. We also need to make sure __off64_t
+// isn't truncated to 32-bits under x32.
+#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
+              MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
+              MMAP_SYSCALL_ARG(fd), static_cast<uint64_t>(offset)));
+#undef MMAP_SYSCALL_ARG
+#else  // Remaining 64-bit aritectures.
+  static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
+  return reinterpret_cast<void*>(
+      syscall(SYS_mmap, start, length, prot, flags, fd, offset));
+#endif
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+  return static_cast<int>(syscall(SYS_munmap, start, length));
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#else  // !__linux__
+
+// For non-linux platforms where we have mmap, just dispatch directly to the
+// actual mmap()/munmap() methods.
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd,
+                        off_t offset) {
+  return mmap(start, length, prot, flags, fd, offset);
+}
+
+inline int DirectMunmap(void* start, size_t length) {
+  return munmap(start, length);
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // __linux__
+
+#endif  // ABSL_HAVE_MMAP
+
+#endif  // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_

+ 398 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/dynamic_annotations.h

@@ -0,0 +1,398 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines dynamic annotations for use with dynamic analysis tool
+// such as valgrind, PIN, etc.
+//
+// Dynamic annotation is a source code annotation that affects the generated
+// code (that is, the annotation is not a comment). Each such annotation is
+// attached to a particular instruction and/or to a particular object (address)
+// in the program.
+//
+// The annotations that should be used by users are macros in all upper-case
+// (e.g., ANNOTATE_THREAD_NAME).
+//
+// Actual implementation of these macros may differ depending on the dynamic
+// analysis tool being used.
+//
+// This file supports the following configurations:
+// - Dynamic Annotations enabled (with static thread-safety warnings disabled).
+//   In this case, macros expand to functions implemented by Thread Sanitizer,
+//   when building with TSan. When not provided an external implementation,
+//   dynamic_annotations.cc provides no-op implementations.
+//
+// - Static Clang thread-safety warnings enabled.
+//   When building with a Clang compiler that supports thread-safety warnings,
+//   a subset of annotations can be statically-checked at compile-time. We
+//   expand these macros to static-inline functions that can be analyzed for
+//   thread-safety, but afterwards elided when building the final binary.
+//
+// - All annotations are disabled.
+//   If neither Dynamic Annotations nor Clang thread-safety warnings are
+//   enabled, then all annotation-macros expand to empty.
+
+#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+
+#include <stddef.h>
+
+#include "absl/base/config.h"
+
+// -------------------------------------------------------------------------
+// Decide which features are enabled
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+#define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if defined(__clang__) && !defined(SWIG)
+#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1
+
+#else
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0
+
+// Clang provides limited support for static thread-safety analysis through a
+// feature called Annotalysis. We configure macro-definitions according to
+// whether Annotalysis support is available. When running in opt-mode, GCC
+// will issue a warning, if these attributes are compiled. Only include them
+// when compiling using Clang.
+
+// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \
+  defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+// Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
+  ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#endif
+
+// Memory annotations are also made available to LLVM's Memory Sanitizer
+#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__)
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1
+#endif
+
+#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0
+#endif
+
+#ifdef __cplusplus
+#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
+#define ABSL_INTERNAL_END_EXTERN_C }  // extern "C"
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
+#define ABSL_INTERNAL_STATIC_INLINE inline
+#else
+#define ABSL_INTERNAL_BEGIN_EXTERN_C  // empty
+#define ABSL_INTERNAL_END_EXTERN_C    // empty
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F
+#define ABSL_INTERNAL_STATIC_INLINE static inline
+#endif
+
+// -------------------------------------------------------------------------
+// Define race annotations.
+
+#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
+
+// -------------------------------------------------------------
+// Annotations that suppress errors. It is usually better to express the
+// program's synchronization using the other annotations, but these can be used
+// when all else fails.
+
+// Report that we may have a benign race at `pointer`, with size
+// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
+// point where `pointer` has been allocated, preferably close to the point
+// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
+#define ANNOTATE_BENIGN_RACE(pointer, description)     \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+  (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
+// the memory range [`address`, `address`+`size`).
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized)         \
+  (__FILE__, __LINE__, address, size, description)
+
+// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
+// This annotation could be useful if you want to skip expensive race analysis
+// during some period of program execution, e.g. during initialization.
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable)             \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
+  (__FILE__, __LINE__, enable)
+
+// -------------------------------------------------------------
+// Annotations useful for debugging.
+
+// Report the current thread `name` to a race detector.
+#define ANNOTATE_THREAD_NAME(name) \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+// -------------------------------------------------------------
+// Annotations useful when implementing locks. They are not normally needed by
+// modules that merely use locks. The `lock` argument is a pointer to the lock
+// object.
+
+// Report that a lock has been created at address `lock`.
+#define ANNOTATE_RWLOCK_CREATE(lock) \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+// Report that a linker initialized lock has been created at address `lock`.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock)               \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
+  (__FILE__, __LINE__, lock)
+#else
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock)
+#endif
+
+// Report that the lock at address `lock` is about to be destroyed.
+#define ANNOTATE_RWLOCK_DESTROY(lock) \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+// Report that the lock at address `lock` has been acquired.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)          \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
+  (__FILE__, __LINE__, lock, is_w)
+
+// Report that the lock at address `lock` is about to be released.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)          \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
+  (__FILE__, __LINE__, lock, is_w)
+
+// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)      \
+  namespace {                                                     \
+  class static_var##_annotator {                                  \
+   public:                                                        \
+    static_var##_annotator() {                                    \
+      ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
+                                 #static_var ": " description);   \
+    }                                                             \
+  };                                                              \
+  static static_var##_annotator the##static_var##_annotator;      \
+  }  // namespace
+
+#else  // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0
+
+#define ANNOTATE_RWLOCK_CREATE(lock)                            // empty
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock)                     // empty
+#define ANNOTATE_RWLOCK_DESTROY(lock)                           // empty
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w)                    // empty
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w)                    // empty
+#define ANNOTATE_BENIGN_RACE(address, description)              // empty
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description)  // empty
+#define ANNOTATE_THREAD_NAME(name)                              // empty
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable)                  // empty
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description)    // empty
+
+#endif  // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define memory annotations.
+
+#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1
+
+#include <sanitizer/msan_interface.h>
+
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+  __msan_unpoison(address, size)
+
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+  __msan_allocated_memory(address, size)
+
+#else  // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0
+
+#if DYNAMIC_ANNOTATIONS_ENABLED == 1
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+  do {                                                \
+    (void)(address);                                  \
+    (void)(size);                                     \
+  } while (0)
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+  do {                                                  \
+    (void)(address);                                    \
+    (void)(size);                                       \
+  } while (0)
+#else
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size)    // empty
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size)  // empty
+#endif
+
+#endif  // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END attributes.
+
+#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
+  __attribute((exclusive_lock_function("*")))
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
+  __attribute((unlock_function("*")))
+
+#else  // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE  // empty
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE    // empty
+
+#endif  // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
+
+// Request the analysis tool to ignore all reads in the current thread until
+// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
+// reads, while still checking other reads and all writes.
+// See also ANNOTATE_UNPROTECTED_READ.
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+// Stop ignoring reads.
+#define ANNOTATE_IGNORE_READS_END() \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
+
+// When Annotalysis is enabled without Dynamic Annotations, the use of
+// static-inline functions allows the annotations to be read at compile-time,
+// while still letting the compiler elide the functions from the final build.
+//
+// TODO(delesley) -- The exclusive lock here ignores writes as well, but
+// allows IGNORE_READS_AND_WRITES to work properly.
+
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)()
+
+#define ANNOTATE_IGNORE_READS_END() \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)()
+
+#else
+
+#define ANNOTATE_IGNORE_READS_BEGIN()  // empty
+#define ANNOTATE_IGNORE_READS_END()    // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define IGNORE_WRITES_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1
+
+// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
+#define ANNOTATE_IGNORE_WRITES_BEGIN() \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+// Stop ignoring writes.
+#define ANNOTATE_IGNORE_WRITES_END() \
+  ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+#else
+
+#define ANNOTATE_IGNORE_WRITES_BEGIN()  // empty
+#define ANNOTATE_IGNORE_WRITES_END()    // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
+// primitive annotations defined above.
+//
+//     Instead of doing
+//        ANNOTATE_IGNORE_READS_BEGIN();
+//        ... = x;
+//        ANNOTATE_IGNORE_READS_END();
+//     one can use
+//        ... = ANNOTATE_UNPROTECTED_READ(x);
+
+#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED)
+
+// Start ignoring all memory accesses (both reads and writes).
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+  do {                                           \
+    ANNOTATE_IGNORE_READS_BEGIN();               \
+    ANNOTATE_IGNORE_WRITES_BEGIN();              \
+  } while (0)
+
+// Stop ignoring both reads and writes.
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+  do {                                         \
+    ANNOTATE_IGNORE_WRITES_END();              \
+    ANNOTATE_IGNORE_READS_END();               \
+  } while (0)
+
+#ifdef __cplusplus
+// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+#define ANNOTATE_UNPROTECTED_READ(x) \
+  absl::base_internal::AnnotateUnprotectedRead(x)
+
+#endif
+
+#else
+
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN()  // empty
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END()    // empty
+#define ANNOTATE_UNPROTECTED_READ(x) (x)
+
+#endif
+
+// -------------------------------------------------------------------------
+// Address sanitizer annotations
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+// Describe the current state of a contiguous container such as e.g.
+// std::vector or std::string. For more details see
+// sanitizer/common_interface_defs.h, which is provided by the compiler.
+#include <sanitizer/common_interface_defs.h>
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
+  __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name)    \
+  struct {                                 \
+    char x[8] __attribute__((aligned(8))); \
+  } name
+
+#else
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
+
+#endif  // ABSL_HAVE_ADDRESS_SANITIZER
+
+// -------------------------------------------------------------------------
+// Undefine the macros intended only for this file.
+
+#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_BEGIN_EXTERN_C
+#undef ABSL_INTERNAL_END_EXTERN_C
+#undef ABSL_INTERNAL_STATIC_INLINE
+
+#endif  // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_

+ 283 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/endian.h

@@ -0,0 +1,283 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
+#define ABSL_BASE_INTERNAL_ENDIAN_H_
+
+#include <cstdint>
+#include <cstdlib>
+
+#include "absl/base/casts.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/nullability.h"
+#include "absl/base/port.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+inline uint64_t gbswap_64(uint64_t host_int) {
+#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)
+  return __builtin_bswap64(host_int);
+#elif defined(_MSC_VER)
+  return _byteswap_uint64(host_int);
+#else
+  return (((host_int & uint64_t{0xFF}) << 56) |
+          ((host_int & uint64_t{0xFF00}) << 40) |
+          ((host_int & uint64_t{0xFF0000}) << 24) |
+          ((host_int & uint64_t{0xFF000000}) << 8) |
+          ((host_int & uint64_t{0xFF00000000}) >> 8) |
+          ((host_int & uint64_t{0xFF0000000000}) >> 24) |
+          ((host_int & uint64_t{0xFF000000000000}) >> 40) |
+          ((host_int & uint64_t{0xFF00000000000000}) >> 56));
+#endif
+}
+
+inline uint32_t gbswap_32(uint32_t host_int) {
+#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)
+  return __builtin_bswap32(host_int);
+#elif defined(_MSC_VER)
+  return _byteswap_ulong(host_int);
+#else
+  return (((host_int & uint32_t{0xFF}) << 24) |
+          ((host_int & uint32_t{0xFF00}) << 8) |
+          ((host_int & uint32_t{0xFF0000}) >> 8) |
+          ((host_int & uint32_t{0xFF000000}) >> 24));
+#endif
+}
+
+inline uint16_t gbswap_16(uint16_t host_int) {
+#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)
+  return __builtin_bswap16(host_int);
+#elif defined(_MSC_VER)
+  return _byteswap_ushort(host_int);
+#else
+  return (((host_int & uint16_t{0xFF}) << 8) |
+          ((host_int & uint16_t{0xFF00}) >> 8));
+#endif
+}
+
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+// Portable definitions for htonl (host-to-network) and friends on little-endian
+// architectures.
+inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
+inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
+inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+// Portable definitions for htonl (host-to-network) etc on big-endian
+// architectures. These definitions are simpler since the host byte order is the
+// same as network byte order.
+inline uint16_t ghtons(uint16_t x) { return x; }
+inline uint32_t ghtonl(uint32_t x) { return x; }
+inline uint64_t ghtonll(uint64_t x) { return x; }
+
+#else
+#error \
+    "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
+       "ABSL_IS_LITTLE_ENDIAN must be defined"
+#endif  // byte order
+
+inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
+inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
+inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and little-endian byte order
+//
+// Load/Store methods are alignment safe
+namespace little_endian {
+// Conversion functions.
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+  return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+  return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+  return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+  return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+  return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+  return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
+// Functions to do unaligned loads and stores in little-endian order.
+inline uint16_t Load16(absl::Nonnull<const void *> p) {
+  return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(absl::Nonnull<void *> p, uint16_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(absl::Nonnull<const void *> p) {
+  return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(absl::Nonnull<void *> p, uint32_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(absl::Nonnull<const void *> p) {
+  return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(absl::Nonnull<void *> p, uint64_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+}  // namespace little_endian
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and big-endian byte order (same as network byte order)
+//
+// Load/Store methods are alignment safe
+namespace big_endian {
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+  return bit_cast<int16_t>(FromHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t FromHost(int32_t x) {
+  return bit_cast<int32_t>(FromHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t FromHost(int64_t x) {
+  return bit_cast<int64_t>(FromHost64(bit_cast<uint64_t>(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+  return bit_cast<int16_t>(ToHost16(bit_cast<uint16_t>(x)));
+}
+inline int32_t ToHost(int32_t x) {
+  return bit_cast<int32_t>(ToHost32(bit_cast<uint32_t>(x)));
+}
+inline int64_t ToHost(int64_t x) {
+  return bit_cast<int64_t>(ToHost64(bit_cast<uint64_t>(x)));
+}
+
+// Functions to do unaligned loads and stores in big-endian order.
+inline uint16_t Load16(absl::Nonnull<const void *> p) {
+  return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(absl::Nonnull<void *> p, uint16_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(absl::Nonnull<const void *> p) {
+  return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(absl::Nonnull<void *>p, uint32_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(absl::Nonnull<const void *> p) {
+  return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(absl::Nonnull<void *> p, uint64_t v) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+}  // namespace big_endian
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ENDIAN_H_

+ 263 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/endian_test.cc

@@ -0,0 +1,263 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/endian.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <limits>
+#include <random>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace {
+
+const uint64_t kInitialNumber{0x0123456789abcdef};
+const uint64_t k64Value{kInitialNumber};
+const uint32_t k32Value{0x01234567};
+const uint16_t k16Value{0x0123};
+const int kNumValuesToTest = 1000000;
+const int kRandomSeed = 12345;
+
+#if defined(ABSL_IS_BIG_ENDIAN)
+const uint64_t kInitialInNetworkOrder{kInitialNumber};
+const uint64_t k64ValueLE{0xefcdab8967452301};
+const uint32_t k32ValueLE{0x67452301};
+const uint16_t k16ValueLE{0x2301};
+
+const uint64_t k64ValueBE{kInitialNumber};
+const uint32_t k32ValueBE{k32Value};
+const uint16_t k16ValueBE{k16Value};
+#elif defined(ABSL_IS_LITTLE_ENDIAN)
+const uint64_t kInitialInNetworkOrder{0xefcdab8967452301};
+const uint64_t k64ValueLE{kInitialNumber};
+const uint32_t k32ValueLE{k32Value};
+const uint16_t k16ValueLE{k16Value};
+
+const uint64_t k64ValueBE{0xefcdab8967452301};
+const uint32_t k32ValueBE{0x67452301};
+const uint16_t k16ValueBE{0x2301};
+#endif
+
+std::vector<uint16_t> GenerateAllUint16Values() {
+  std::vector<uint16_t> result;
+  result.reserve(size_t{1} << (sizeof(uint16_t) * 8));
+  for (uint32_t i = std::numeric_limits<uint16_t>::min();
+       i <= std::numeric_limits<uint16_t>::max(); ++i) {
+    result.push_back(static_cast<uint16_t>(i));
+  }
+  return result;
+}
+
+template<typename T>
+std::vector<T> GenerateRandomIntegers(size_t num_values_to_test) {
+  std::vector<T> result;
+  result.reserve(num_values_to_test);
+  std::mt19937_64 rng(kRandomSeed);
+  for (size_t i = 0; i < num_values_to_test; ++i) {
+    result.push_back(rng());
+  }
+  return result;
+}
+
+void ManualByteSwap(char* bytes, int length) {
+  if (length == 1)
+    return;
+
+  EXPECT_EQ(0, length % 2);
+  for (int i = 0; i < length / 2; ++i) {
+    int j = (length - 1) - i;
+    using std::swap;
+    swap(bytes[i], bytes[j]);
+  }
+}
+
+template<typename T>
+inline T UnalignedLoad(const char* p) {
+  static_assert(
+      sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8,
+      "Unexpected type size");
+
+  switch (sizeof(T)) {
+    case 1: return *reinterpret_cast<const T*>(p);
+    case 2:
+      return ABSL_INTERNAL_UNALIGNED_LOAD16(p);
+    case 4:
+      return ABSL_INTERNAL_UNALIGNED_LOAD32(p);
+    case 8:
+      return ABSL_INTERNAL_UNALIGNED_LOAD64(p);
+    default:
+      // Suppresses invalid "not all control paths return a value" on MSVC
+      return {};
+  }
+}
+
+template <typename T, typename ByteSwapper>
+static void GBSwapHelper(const std::vector<T>& host_values_to_test,
+                         const ByteSwapper& byte_swapper) {
+  // Test byte_swapper against a manual byte swap.
+  for (typename std::vector<T>::const_iterator it = host_values_to_test.begin();
+       it != host_values_to_test.end(); ++it) {
+    T host_value = *it;
+
+    char actual_value[sizeof(host_value)];
+    memcpy(actual_value, &host_value, sizeof(host_value));
+    byte_swapper(actual_value);
+
+    char expected_value[sizeof(host_value)];
+    memcpy(expected_value, &host_value, sizeof(host_value));
+    ManualByteSwap(expected_value, sizeof(host_value));
+
+    ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value)))
+        << "Swap output for 0x" << std::hex << host_value << " does not match. "
+        << "Expected: 0x" << UnalignedLoad<T>(expected_value) << "; "
+        << "actual: 0x" <<  UnalignedLoad<T>(actual_value);
+  }
+}
+
+void Swap16(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE16(
+      bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes)));
+}
+
+void Swap32(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE32(
+      bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes)));
+}
+
+void Swap64(char* bytes) {
+  ABSL_INTERNAL_UNALIGNED_STORE64(
+      bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes)));
+}
+
+TEST(EndianessTest, Uint16) {
+  GBSwapHelper(GenerateAllUint16Values(), &Swap16);
+}
+
+TEST(EndianessTest, Uint32) {
+  GBSwapHelper(GenerateRandomIntegers<uint32_t>(kNumValuesToTest), &Swap32);
+}
+
+TEST(EndianessTest, Uint64) {
+  GBSwapHelper(GenerateRandomIntegers<uint64_t>(kNumValuesToTest), &Swap64);
+}
+
+TEST(EndianessTest, ghtonll_gntohll) {
+  // Test that absl::ghtonl compiles correctly
+  uint32_t test = 0x01234567;
+  EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test);
+
+  uint64_t comp = absl::ghtonll(kInitialNumber);
+  EXPECT_EQ(comp, kInitialInNetworkOrder);
+  comp = absl::gntohll(kInitialInNetworkOrder);
+  EXPECT_EQ(comp, kInitialNumber);
+
+  // Test that htonll and ntohll are each others' inverse functions on a
+  // somewhat assorted batch of numbers. 37 is chosen to not be anything
+  // particularly nice base 2.
+  uint64_t value = 1;
+  for (int i = 0; i < 100; ++i) {
+    comp = absl::ghtonll(absl::gntohll(value));
+    EXPECT_EQ(value, comp);
+    comp = absl::gntohll(absl::ghtonll(value));
+    EXPECT_EQ(value, comp);
+    value *= 37;
+  }
+}
+
+TEST(EndianessTest, little_endian) {
+  // Check little_endian uint16_t.
+  uint64_t comp = little_endian::FromHost16(k16Value);
+  EXPECT_EQ(comp, k16ValueLE);
+  comp = little_endian::ToHost16(k16ValueLE);
+  EXPECT_EQ(comp, k16Value);
+
+  // Check little_endian uint32_t.
+  comp = little_endian::FromHost32(k32Value);
+  EXPECT_EQ(comp, k32ValueLE);
+  comp = little_endian::ToHost32(k32ValueLE);
+  EXPECT_EQ(comp, k32Value);
+
+  // Check little_endian uint64_t.
+  comp = little_endian::FromHost64(k64Value);
+  EXPECT_EQ(comp, k64ValueLE);
+  comp = little_endian::ToHost64(k64ValueLE);
+  EXPECT_EQ(comp, k64Value);
+
+  // Check little-endian Load and store functions.
+  uint16_t u16Buf;
+  uint32_t u32Buf;
+  uint64_t u64Buf;
+
+  little_endian::Store16(&u16Buf, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueLE);
+  comp = little_endian::Load16(&u16Buf);
+  EXPECT_EQ(comp, k16Value);
+
+  little_endian::Store32(&u32Buf, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueLE);
+  comp = little_endian::Load32(&u32Buf);
+  EXPECT_EQ(comp, k32Value);
+
+  little_endian::Store64(&u64Buf, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueLE);
+  comp = little_endian::Load64(&u64Buf);
+  EXPECT_EQ(comp, k64Value);
+}
+
+TEST(EndianessTest, big_endian) {
+  // Check big-endian Load and store functions.
+  uint16_t u16Buf;
+  uint32_t u32Buf;
+  uint64_t u64Buf;
+
+  unsigned char buffer[10];
+  big_endian::Store16(&u16Buf, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueBE);
+  uint64_t comp = big_endian::Load16(&u16Buf);
+  EXPECT_EQ(comp, k16Value);
+
+  big_endian::Store32(&u32Buf, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueBE);
+  comp = big_endian::Load32(&u32Buf);
+  EXPECT_EQ(comp, k32Value);
+
+  big_endian::Store64(&u64Buf, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueBE);
+  comp = big_endian::Load64(&u64Buf);
+  EXPECT_EQ(comp, k64Value);
+
+  big_endian::Store16(buffer + 1, k16Value);
+  EXPECT_EQ(u16Buf, k16ValueBE);
+  comp = big_endian::Load16(buffer + 1);
+  EXPECT_EQ(comp, k16Value);
+
+  big_endian::Store32(buffer + 1, k32Value);
+  EXPECT_EQ(u32Buf, k32ValueBE);
+  comp = big_endian::Load32(buffer + 1);
+  EXPECT_EQ(comp, k32Value);
+
+  big_endian::Store64(buffer + 1, k64Value);
+  EXPECT_EQ(u64Buf, k64ValueBE);
+  comp = big_endian::Load64(buffer + 1);
+  EXPECT_EQ(comp, k64Value);
+}
+
+}  // namespace
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 43 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/errno_saver.h

@@ -0,0 +1,43 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
+#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
+
+#include <cerrno>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// `ErrnoSaver` captures the value of `errno` upon construction and restores it
+// upon deletion.  It is used in low-level code and must be super fast.  Do not
+// add instrumentation, even in debug modes.
+class ErrnoSaver {
+ public:
+  ErrnoSaver() : saved_errno_(errno) {}
+  ~ErrnoSaver() { errno = saved_errno_; }
+  int operator()() const { return saved_errno_; }
+
+ private:
+  const int saved_errno_;
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_

+ 45 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/errno_saver_test.cc

@@ -0,0 +1,45 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/errno_saver.h"
+
+#include <cerrno>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/internal/strerror.h"
+
+namespace {
+using ::testing::Eq;
+
+struct ErrnoPrinter {
+  int no;
+};
+std::ostream &operator<<(std::ostream &os, ErrnoPrinter ep) {
+  return os << absl::base_internal::StrError(ep.no) << " [" << ep.no << "]";
+}
+bool operator==(ErrnoPrinter one, ErrnoPrinter two) { return one.no == two.no; }
+
+TEST(ErrnoSaverTest, Works) {
+  errno = EDOM;
+  {
+    absl::base_internal::ErrnoSaver errno_saver;
+    EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM}));
+    errno = ERANGE;
+    EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{ERANGE}));
+    EXPECT_THAT(ErrnoPrinter{errno_saver()}, Eq(ErrnoPrinter{EDOM}));
+  }
+  EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM}));
+}
+}  // namespace

+ 79 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/exception_safety_testing.cc

@@ -0,0 +1,79 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/exception_safety_testing.h"
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace testing {
+
+exceptions_internal::NoThrowTag nothrow_ctor;
+
+exceptions_internal::StrongGuaranteeTagType strong_guarantee;
+
+exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester() {
+  return {};
+}
+
+namespace exceptions_internal {
+
+int countdown = -1;
+
+ConstructorTracker* ConstructorTracker::current_tracker_instance_ = nullptr;
+
+void MaybeThrow(absl::string_view msg, bool throw_bad_alloc) {
+  if (countdown-- == 0) {
+    if (throw_bad_alloc) throw TestBadAllocException(msg);
+    throw TestException(msg);
+  }
+}
+
+testing::AssertionResult FailureMessage(const TestException& e,
+                                        int countdown) noexcept {
+  return testing::AssertionFailure() << "Exception thrown from " << e.what();
+}
+
+std::string GetSpecString(TypeSpec spec) {
+  std::string out;
+  absl::string_view sep;
+  const auto append = [&](absl::string_view s) {
+    absl::StrAppend(&out, sep, s);
+    sep = " | ";
+  };
+  if (static_cast<bool>(TypeSpec::kNoThrowCopy & spec)) {
+    append("kNoThrowCopy");
+  }
+  if (static_cast<bool>(TypeSpec::kNoThrowMove & spec)) {
+    append("kNoThrowMove");
+  }
+  if (static_cast<bool>(TypeSpec::kNoThrowNew & spec)) {
+    append("kNoThrowNew");
+  }
+  return out;
+}
+
+std::string GetSpecString(AllocSpec spec) {
+  return static_cast<bool>(AllocSpec::kNoThrowAllocate & spec)
+             ? "kNoThrowAllocate"
+             : "";
+}
+
+}  // namespace exceptions_internal
+
+}  // namespace testing
+
+#endif  // ABSL_HAVE_EXCEPTIONS

+ 1109 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/exception_safety_testing.h

@@ -0,0 +1,1109 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Utilities for testing exception-safety
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#include <cstddef>
+#include <cstdint>
+#include <functional>
+#include <initializer_list>
+#include <iosfwd>
+#include <string>
+#include <tuple>
+#include <unordered_map>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/pretty_function.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/substitute.h"
+#include "absl/utility/utility.h"
+
+namespace testing {
+
+enum class TypeSpec;
+enum class AllocSpec;
+
+constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) {
+  using T = absl::underlying_type_t<TypeSpec>;
+  return static_cast<TypeSpec>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) {
+  using T = absl::underlying_type_t<TypeSpec>;
+  return static_cast<TypeSpec>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) {
+  using T = absl::underlying_type_t<AllocSpec>;
+  return static_cast<AllocSpec>(static_cast<T>(a) | static_cast<T>(b));
+}
+
+constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) {
+  using T = absl::underlying_type_t<AllocSpec>;
+  return static_cast<AllocSpec>(static_cast<T>(a) & static_cast<T>(b));
+}
+
+namespace exceptions_internal {
+
+std::string GetSpecString(TypeSpec);
+std::string GetSpecString(AllocSpec);
+
+struct NoThrowTag {};
+struct StrongGuaranteeTagType {};
+
+// A simple exception class.  We throw this so that test code can catch
+// exceptions specifically thrown by ThrowingValue.
+class TestException {
+ public:
+  explicit TestException(absl::string_view msg) : msg_(msg) {}
+  virtual ~TestException() {}
+  virtual const char* what() const noexcept { return msg_.c_str(); }
+
+ private:
+  std::string msg_;
+};
+
+// TestBadAllocException exists because allocation functions must throw an
+// exception which can be caught by a handler of std::bad_alloc.  We use a child
+// class of std::bad_alloc so we can customise the error message, and also
+// derive from TestException so we don't accidentally end up catching an actual
+// bad_alloc exception in TestExceptionSafety.
+class TestBadAllocException : public std::bad_alloc, public TestException {
+ public:
+  explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {}
+  using TestException::what;
+};
+
+extern int countdown;
+
+// Allows the countdown variable to be set manually (defaulting to the initial
+// value of 0)
+inline void SetCountdown(int i = 0) { countdown = i; }
+// Sets the countdown to the terminal value -1
+inline void UnsetCountdown() { SetCountdown(-1); }
+
+void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false);
+
+testing::AssertionResult FailureMessage(const TestException& e,
+                                        int countdown) noexcept;
+
+struct TrackedAddress {
+  bool is_alive;
+  std::string description;
+};
+
+// Inspects the constructions and destructions of anything inheriting from
+// TrackedObject. This allows us to safely "leak" TrackedObjects, as
+// ConstructorTracker will destroy everything left over in its destructor.
+class ConstructorTracker {
+ public:
+  explicit ConstructorTracker(int count) : countdown_(count) {
+    assert(current_tracker_instance_ == nullptr);
+    current_tracker_instance_ = this;
+  }
+
+  ~ConstructorTracker() {
+    assert(current_tracker_instance_ == this);
+    current_tracker_instance_ = nullptr;
+
+    for (auto& it : address_map_) {
+      void* address = it.first;
+      TrackedAddress& tracked_address = it.second;
+      if (tracked_address.is_alive) {
+        ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+                                      countdown_, "Object was not destroyed.");
+      }
+    }
+  }
+
+  static void ObjectConstructed(void* address, std::string description) {
+    if (!CurrentlyTracking()) return;
+
+    TrackedAddress& tracked_address =
+        current_tracker_instance_->address_map_[address];
+    if (tracked_address.is_alive) {
+      ADD_FAILURE() << ErrorMessage(
+          address, tracked_address.description,
+          current_tracker_instance_->countdown_,
+          "Object was re-constructed. Current object was constructed by " +
+              description);
+    }
+    tracked_address = {true, std::move(description)};
+  }
+
+  static void ObjectDestructed(void* address) {
+    if (!CurrentlyTracking()) return;
+
+    auto it = current_tracker_instance_->address_map_.find(address);
+    // Not tracked. Ignore.
+    if (it == current_tracker_instance_->address_map_.end()) return;
+
+    TrackedAddress& tracked_address = it->second;
+    if (!tracked_address.is_alive) {
+      ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+                                    current_tracker_instance_->countdown_,
+                                    "Object was re-destroyed.");
+    }
+    tracked_address.is_alive = false;
+  }
+
+ private:
+  static bool CurrentlyTracking() {
+    return current_tracker_instance_ != nullptr;
+  }
+
+  static std::string ErrorMessage(void* address,
+                                  const std::string& address_description,
+                                  int countdown,
+                                  const std::string& error_description) {
+    return absl::Substitute(
+        "With coundtown at $0:\n"
+        "  $1\n"
+        "  Object originally constructed by $2\n"
+        "  Object address: $3\n",
+        countdown, error_description, address_description, address);
+  }
+
+  std::unordered_map<void*, TrackedAddress> address_map_;
+  int countdown_;
+
+  static ConstructorTracker* current_tracker_instance_;
+};
+
+class TrackedObject {
+ public:
+  TrackedObject(const TrackedObject&) = delete;
+  TrackedObject(TrackedObject&&) = delete;
+
+ protected:
+  explicit TrackedObject(std::string description) {
+    ConstructorTracker::ObjectConstructed(this, std::move(description));
+  }
+
+  ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); }
+};
+}  // namespace exceptions_internal
+
+extern exceptions_internal::NoThrowTag nothrow_ctor;
+
+extern exceptions_internal::StrongGuaranteeTagType strong_guarantee;
+
+// A test class which is convertible to bool.  The conversion can be
+// instrumented to throw at a controlled time.
+class ThrowingBool {
+ public:
+  ThrowingBool(bool b) noexcept : b_(b) {}  // NOLINT(runtime/explicit)
+  operator bool() const {                   // NOLINT
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return b_;
+  }
+
+ private:
+  bool b_;
+};
+
+/*
+ * Configuration enum for the ThrowingValue type that defines behavior for the
+ * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer
+ * constructor from throwing.
+ *
+ * kEverythingThrows: Every operation can throw an exception
+ * kNoThrowCopy: Copy construction and copy assignment will not throw
+ * kNoThrowMove: Move construction and move assignment will not throw
+ * kNoThrowNew: Overloaded operators new and new[] will not throw
+ */
+enum class TypeSpec {
+  kEverythingThrows = 0,
+  kNoThrowCopy = 1,
+  kNoThrowMove = 1 << 1,
+  kNoThrowNew = 1 << 2,
+};
+
+/*
+ * A testing class instrumented to throw an exception at a controlled time.
+ *
+ * ThrowingValue implements a slightly relaxed version of the Regular concept --
+ * that is it's a value type with the expected semantics.  It also implements
+ * arithmetic operations.  It doesn't implement member and pointer operators
+ * like operator-> or operator[].
+ *
+ * ThrowingValue can be instrumented to have certain operations be noexcept by
+ * using compile-time bitfield template arguments.  That is, to make an
+ * ThrowingValue which has noexcept move construction/assignment and noexcept
+ * copy construction/assignment, use the following:
+ *   ThrowingValue<testing::kNoThrowMove | testing::kNoThrowCopy> my_thrwr{val};
+ */
+template <TypeSpec Spec = TypeSpec::kEverythingThrows>
+class ThrowingValue : private exceptions_internal::TrackedObject {
+  static constexpr bool IsSpecified(TypeSpec spec) {
+    return static_cast<bool>(Spec & spec);
+  }
+
+  static constexpr int kDefaultValue = 0;
+  static constexpr int kBadValue = 938550620;
+
+ public:
+  ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = kDefaultValue;
+  }
+
+  ThrowingValue(const ThrowingValue& other) noexcept(
+      IsSpecified(TypeSpec::kNoThrowCopy))
+      : TrackedObject(GetInstanceString(other.dummy_)) {
+    if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+  }
+
+  ThrowingValue(ThrowingValue&& other) noexcept(
+      IsSpecified(TypeSpec::kNoThrowMove))
+      : TrackedObject(GetInstanceString(other.dummy_)) {
+    if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+  }
+
+  explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = i;
+  }
+
+  ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept
+      : TrackedObject(GetInstanceString(i)), dummy_(i) {}
+
+  // absl expects nothrow destructors
+  ~ThrowingValue() noexcept = default;
+
+  ThrowingValue& operator=(const ThrowingValue& other) noexcept(
+      IsSpecified(TypeSpec::kNoThrowCopy)) {
+    dummy_ = kBadValue;
+    if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator=(ThrowingValue&& other) noexcept(
+      IsSpecified(TypeSpec::kNoThrowMove)) {
+    dummy_ = kBadValue;
+    if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    }
+    dummy_ = other.dummy_;
+    return *this;
+  }
+
+  // Arithmetic Operators
+  ThrowingValue operator+(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator+() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator-(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator-() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(-dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue& operator++() {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    ++dummy_;
+    return *this;
+  }
+
+  ThrowingValue operator++(int) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    auto out = ThrowingValue(dummy_, nothrow_ctor);
+    ++dummy_;
+    return out;
+  }
+
+  ThrowingValue& operator--() {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    --dummy_;
+    return *this;
+  }
+
+  ThrowingValue operator--(int) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    auto out = ThrowingValue(dummy_, nothrow_ctor);
+    --dummy_;
+    return out;
+  }
+
+  ThrowingValue operator*(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator/(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator%(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator<<(int shift) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ << shift, nothrow_ctor);
+  }
+
+  ThrowingValue operator>>(int shift) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ >> shift, nothrow_ctor);
+  }
+
+  // Comparison Operators
+  // NOTE: We use `ThrowingBool` instead of `bool` because most STL
+  // types/containers requires T to be convertible to bool.
+  friend ThrowingBool operator==(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ == b.dummy_;
+  }
+  friend ThrowingBool operator!=(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ != b.dummy_;
+  }
+  friend ThrowingBool operator<(const ThrowingValue& a,
+                                const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ < b.dummy_;
+  }
+  friend ThrowingBool operator<=(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ <= b.dummy_;
+  }
+  friend ThrowingBool operator>(const ThrowingValue& a,
+                                const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ > b.dummy_;
+  }
+  friend ThrowingBool operator>=(const ThrowingValue& a,
+                                 const ThrowingValue& b) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return a.dummy_ >= b.dummy_;
+  }
+
+  // Logical Operators
+  ThrowingBool operator!() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return !dummy_;
+  }
+
+  ThrowingBool operator&&(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return dummy_ && other.dummy_;
+  }
+
+  ThrowingBool operator||(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return dummy_ || other.dummy_;
+  }
+
+  // Bitwise Logical Operators
+  ThrowingValue operator~() const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(~dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator&(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator|(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor);
+  }
+
+  ThrowingValue operator^(const ThrowingValue& other) const {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor);
+  }
+
+  // Compound Assignment operators
+  ThrowingValue& operator+=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ += other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator-=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ -= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator*=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ *= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator/=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ /= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator%=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ %= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator&=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ &= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator|=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ |= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator^=(const ThrowingValue& other) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ ^= other.dummy_;
+    return *this;
+  }
+
+  ThrowingValue& operator<<=(int shift) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ <<= shift;
+    return *this;
+  }
+
+  ThrowingValue& operator>>=(int shift) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ >>= shift;
+    return *this;
+  }
+
+  // Pointer operators
+  void operator&() const = delete;  // NOLINT(runtime/operator)
+
+  // Stream operators
+  friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return os << GetInstanceString(tv.dummy_);
+  }
+
+  friend std::istream& operator>>(std::istream& is, const ThrowingValue&) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    return is;
+  }
+
+  // Memory management operators
+  static void* operator new(size_t s) noexcept(
+      IsSpecified(TypeSpec::kNoThrowNew)) {
+    if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new(s);
+  }
+
+  static void* operator new[](size_t s) noexcept(
+      IsSpecified(TypeSpec::kNoThrowNew)) {
+    if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new[](s);
+  }
+
+  template <typename... Args>
+  static void* operator new(size_t s, Args&&... args) noexcept(
+      IsSpecified(TypeSpec::kNoThrowNew)) {
+    if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new(s, std::forward<Args>(args)...);
+  }
+
+  template <typename... Args>
+  static void* operator new[](size_t s, Args&&... args) noexcept(
+      IsSpecified(TypeSpec::kNoThrowNew)) {
+    if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+      exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+    }
+    return ::operator new[](s, std::forward<Args>(args)...);
+  }
+
+  // Abseil doesn't support throwing overloaded operator delete.  These are
+  // provided so a throwing operator-new can clean up after itself.
+  void operator delete(void* p) noexcept { ::operator delete(p); }
+
+  template <typename... Args>
+  void operator delete(void* p, Args&&... args) noexcept {
+    ::operator delete(p, std::forward<Args>(args)...);
+  }
+
+  void operator delete[](void* p) noexcept { return ::operator delete[](p); }
+
+  template <typename... Args>
+  void operator delete[](void* p, Args&&... args) noexcept {
+    return ::operator delete[](p, std::forward<Args>(args)...);
+  }
+
+  // Non-standard access to the actual contained value.  No need for this to
+  // throw.
+  int& Get() noexcept { return dummy_; }
+  const int& Get() const noexcept { return dummy_; }
+
+ private:
+  static std::string GetInstanceString(int dummy) {
+    return absl::StrCat("ThrowingValue<",
+                        exceptions_internal::GetSpecString(Spec), ">(", dummy,
+                        ")");
+  }
+
+  int dummy_;
+};
+// While not having to do with exceptions, explicitly delete comma operator, to
+// make sure we don't use it on user-supplied types.
+template <TypeSpec Spec, typename T>
+void operator,(const ThrowingValue<Spec>&, T&&) = delete;
+template <TypeSpec Spec, typename T>
+void operator,(T&&, const ThrowingValue<Spec>&) = delete;
+
+/*
+ * Configuration enum for the ThrowingAllocator type that defines behavior for
+ * the lifetime of the instance.
+ *
+ * kEverythingThrows: Calls to the member functions may throw
+ * kNoThrowAllocate: Calls to the member functions will not throw
+ */
+enum class AllocSpec {
+  kEverythingThrows = 0,
+  kNoThrowAllocate = 1,
+};
+
+/*
+ * An allocator type which is instrumented to throw at a controlled time, or not
+ * to throw, using AllocSpec. The supported settings are the default of every
+ * function which is allowed to throw in a conforming allocator possibly
+ * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS
+ * configuration macro.
+ */
+template <typename T, AllocSpec Spec = AllocSpec::kEverythingThrows>
+class ThrowingAllocator : private exceptions_internal::TrackedObject {
+  static constexpr bool IsSpecified(AllocSpec spec) {
+    return static_cast<bool>(Spec & spec);
+  }
+
+ public:
+  using pointer = T*;
+  using const_pointer = const T*;
+  using reference = T&;
+  using const_reference = const T&;
+  using void_pointer = void*;
+  using const_void_pointer = const void*;
+  using value_type = T;
+  using size_type = size_t;
+  using difference_type = ptrdiff_t;
+
+  using is_nothrow =
+      std::integral_constant<bool, Spec == AllocSpec::kNoThrowAllocate>;
+  using propagate_on_container_copy_assignment = std::true_type;
+  using propagate_on_container_move_assignment = std::true_type;
+  using propagate_on_container_swap = std::true_type;
+  using is_always_equal = std::false_type;
+
+  ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) {
+    exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+    dummy_ = std::make_shared<const int>(next_id_++);
+  }
+
+  template <typename U>
+  ThrowingAllocator(const ThrowingAllocator<U, Spec>& other) noexcept  // NOLINT
+      : TrackedObject(GetInstanceString(*other.State())),
+        dummy_(other.State()) {}
+
+  // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of
+  // allocator shall not exit via an exception, thus they are marked noexcept.
+  ThrowingAllocator(const ThrowingAllocator& other) noexcept
+      : TrackedObject(GetInstanceString(*other.State())),
+        dummy_(other.State()) {}
+
+  template <typename U>
+  ThrowingAllocator(ThrowingAllocator<U, Spec>&& other) noexcept  // NOLINT
+      : TrackedObject(GetInstanceString(*other.State())),
+        dummy_(std::move(other.State())) {}
+
+  ThrowingAllocator(ThrowingAllocator&& other) noexcept
+      : TrackedObject(GetInstanceString(*other.State())),
+        dummy_(std::move(other.State())) {}
+
+  ~ThrowingAllocator() noexcept = default;
+
+  ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept {
+    dummy_ = other.State();
+    return *this;
+  }
+
+  template <typename U>
+  ThrowingAllocator& operator=(
+      const ThrowingAllocator<U, Spec>& other) noexcept {
+    dummy_ = other.State();
+    return *this;
+  }
+
+  template <typename U>
+  ThrowingAllocator& operator=(ThrowingAllocator<U, Spec>&& other) noexcept {
+    dummy_ = std::move(other.State());
+    return *this;
+  }
+
+  template <typename U>
+  struct rebind {
+    using other = ThrowingAllocator<U, Spec>;
+  };
+
+  pointer allocate(size_type n) noexcept(
+      IsSpecified(AllocSpec::kNoThrowAllocate)) {
+    ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+    return static_cast<pointer>(::operator new(n * sizeof(T)));
+  }
+
+  pointer allocate(size_type n, const_void_pointer) noexcept(
+      IsSpecified(AllocSpec::kNoThrowAllocate)) {
+    return allocate(n);
+  }
+
+  void deallocate(pointer ptr, size_type) noexcept {
+    ReadState();
+    ::operator delete(static_cast<void*>(ptr));
+  }
+
+  template <typename U, typename... Args>
+  void construct(U* ptr, Args&&... args) noexcept(
+      IsSpecified(AllocSpec::kNoThrowAllocate)) {
+    ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+    ::new (static_cast<void*>(ptr)) U(std::forward<Args>(args)...);
+  }
+
+  template <typename U>
+  void destroy(U* p) noexcept {
+    ReadState();
+    p->~U();
+  }
+
+  size_type max_size() const noexcept {
+    return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
+  }
+
+  ThrowingAllocator select_on_container_copy_construction() noexcept(
+      IsSpecified(AllocSpec::kNoThrowAllocate)) {
+    ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+    return *this;
+  }
+
+  template <typename U>
+  bool operator==(const ThrowingAllocator<U, Spec>& other) const noexcept {
+    return dummy_ == other.dummy_;
+  }
+
+  template <typename U>
+  bool operator!=(const ThrowingAllocator<U, Spec>& other) const noexcept {
+    return dummy_ != other.dummy_;
+  }
+
+  template <typename, AllocSpec>
+  friend class ThrowingAllocator;
+
+ private:
+  static std::string GetInstanceString(int dummy) {
+    return absl::StrCat("ThrowingAllocator<",
+                        exceptions_internal::GetSpecString(Spec), ">(", dummy,
+                        ")");
+  }
+
+  const std::shared_ptr<const int>& State() const { return dummy_; }
+  std::shared_ptr<const int>& State() { return dummy_; }
+
+  void ReadState() {
+    // we know that this will never be true, but the compiler doesn't, so this
+    // should safely force a read of the value.
+    if (*dummy_ < 0) std::abort();
+  }
+
+  void ReadStateAndMaybeThrow(absl::string_view msg) const {
+    if (!IsSpecified(AllocSpec::kNoThrowAllocate)) {
+      exceptions_internal::MaybeThrow(
+          absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg));
+    }
+  }
+
+  static int next_id_;
+  std::shared_ptr<const int> dummy_;
+};
+
+template <typename T, AllocSpec Spec>
+int ThrowingAllocator<T, Spec>::next_id_ = 0;
+
+// Tests for resource leaks by attempting to construct a T using args repeatedly
+// until successful, using the countdown method.  Side effects can then be
+// tested for resource leaks.
+template <typename T, typename... Args>
+void TestThrowingCtor(Args&&... args) {
+  struct Cleanup {
+    ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+  } c;
+  for (int count = 0;; ++count) {
+    exceptions_internal::ConstructorTracker ct(count);
+    exceptions_internal::SetCountdown(count);
+    try {
+      T temp(std::forward<Args>(args)...);
+      static_cast<void>(temp);
+      break;
+    } catch (const exceptions_internal::TestException&) {
+    }
+  }
+}
+
+// Tests the nothrow guarantee of the provided nullary operation. If the an
+// exception is thrown, the result will be AssertionFailure(). Otherwise, it
+// will be AssertionSuccess().
+template <typename Operation>
+testing::AssertionResult TestNothrowOp(const Operation& operation) {
+  struct Cleanup {
+    Cleanup() { exceptions_internal::SetCountdown(); }
+    ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+  } c;
+  try {
+    operation();
+    return testing::AssertionSuccess();
+  } catch (const exceptions_internal::TestException&) {
+    return testing::AssertionFailure()
+           << "TestException thrown during call to operation() when nothrow "
+              "guarantee was expected.";
+  } catch (...) {
+    return testing::AssertionFailure()
+           << "Unknown exception thrown during call to operation() when "
+              "nothrow guarantee was expected.";
+  }
+}
+
+namespace exceptions_internal {
+
+// Dummy struct for ExceptionSafetyTestBuilder<> partial state.
+struct UninitializedT {};
+
+template <typename T>
+class DefaultFactory {
+ public:
+  explicit DefaultFactory(const T& t) : t_(t) {}
+  std::unique_ptr<T> operator()() const { return absl::make_unique<T>(t_); }
+
+ private:
+  T t_;
+};
+
+template <size_t LazyContractsCount, typename LazyFactory,
+          typename LazyOperation>
+using EnableIfTestable = typename absl::enable_if_t<
+    LazyContractsCount != 0 &&
+    !std::is_same<LazyFactory, UninitializedT>::value &&
+    !std::is_same<LazyOperation, UninitializedT>::value>;
+
+template <typename Factory = UninitializedT,
+          typename Operation = UninitializedT, typename... Contracts>
+class ExceptionSafetyTestBuilder;
+
+}  // namespace exceptions_internal
+
+/*
+ * Constructs an empty ExceptionSafetyTestBuilder. All
+ * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation
+ * methods return new instances of ExceptionSafetyTestBuilder.
+ *
+ * In order to test a T for exception safety, a factory for that T, a testable
+ * operation, and at least one contract callback returning an assertion
+ * result must be applied using the respective methods.
+ */
+exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester();
+
+namespace exceptions_internal {
+template <typename T>
+struct IsUniquePtr : std::false_type {};
+
+template <typename T, typename D>
+struct IsUniquePtr<std::unique_ptr<T, D>> : std::true_type {};
+
+template <typename Factory>
+struct FactoryPtrTypeHelper {
+  using type = decltype(std::declval<const Factory&>()());
+
+  static_assert(IsUniquePtr<type>::value, "Factories must return a unique_ptr");
+};
+
+template <typename Factory>
+using FactoryPtrType = typename FactoryPtrTypeHelper<Factory>::type;
+
+template <typename Factory>
+using FactoryElementType = typename FactoryPtrType<Factory>::element_type;
+
+template <typename T>
+class ExceptionSafetyTest {
+  using Factory = std::function<std::unique_ptr<T>()>;
+  using Operation = std::function<void(T*)>;
+  using Contract = std::function<AssertionResult(T*)>;
+
+ public:
+  template <typename... Contracts>
+  explicit ExceptionSafetyTest(const Factory& f, const Operation& op,
+                               const Contracts&... contracts)
+      : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {}
+
+  AssertionResult Test() const {
+    for (int count = 0;; ++count) {
+      exceptions_internal::ConstructorTracker ct(count);
+
+      for (const auto& contract : contracts_) {
+        auto t_ptr = factory_();
+        try {
+          SetCountdown(count);
+          operation_(t_ptr.get());
+          // Unset for the case that the operation throws no exceptions, which
+          // would leave the countdown set and break the *next* exception safety
+          // test after this one.
+          UnsetCountdown();
+          return AssertionSuccess();
+        } catch (const exceptions_internal::TestException& e) {
+          if (!contract(t_ptr.get())) {
+            return AssertionFailure() << e.what() << " failed contract check";
+          }
+        }
+      }
+    }
+  }
+
+ private:
+  template <typename ContractFn>
+  Contract WrapContract(const ContractFn& contract) {
+    return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); };
+  }
+
+  Contract WrapContract(StrongGuaranteeTagType) {
+    return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); };
+  }
+
+  Factory factory_;
+  Operation operation_;
+  std::vector<Contract> contracts_;
+};
+
+/*
+ * Builds a tester object that tests if performing a operation on a T follows
+ * exception safety guarantees. Verification is done via contract assertion
+ * callbacks applied to T instances post-throw.
+ *
+ * Template parameters for ExceptionSafetyTestBuilder:
+ *
+ * - Factory: The factory object (passed in via tester.WithFactory(...) or
+ *   tester.WithInitialValue(...)) must be invocable with the signature
+ *   `std::unique_ptr<T> operator()() const` where T is the type being tested.
+ *   It is used for reliably creating identical T instances to test on.
+ *
+ * - Operation: The operation object (passed in via tester.WithOperation(...)
+ *   or tester.Test(...)) must be invocable with the signature
+ *   `void operator()(T*) const` where T is the type being tested. It is used
+ *   for performing steps on a T instance that may throw and that need to be
+ *   checked for exception safety. Each call to the operation will receive a
+ *   fresh T instance so it's free to modify and destroy the T instances as it
+ *   pleases.
+ *
+ * - Contracts...: The contract assertion callback objects (passed in via
+ *   tester.WithContracts(...)) must be invocable with the signature
+ *   `testing::AssertionResult operator()(T*) const` where T is the type being
+ *   tested. Contract assertion callbacks are provided T instances post-throw.
+ *   They must return testing::AssertionSuccess when the type contracts of the
+ *   provided T instance hold. If the type contracts of the T instance do not
+ *   hold, they must return testing::AssertionFailure. Execution order of
+ *   Contracts... is unspecified. They will each individually get a fresh T
+ *   instance so they are free to modify and destroy the T instances as they
+ *   please.
+ */
+template <typename Factory, typename Operation, typename... Contracts>
+class ExceptionSafetyTestBuilder {
+ public:
+  /*
+   * Returns a new ExceptionSafetyTestBuilder with an included T factory based
+   * on the provided T instance. The existing factory will not be included in
+   * the newly created tester instance. The created factory returns a new T
+   * instance by copy-constructing the provided const T& t.
+   *
+   * Preconditions for tester.WithInitialValue(const T& t):
+   *
+   * - The const T& t object must be copy-constructible where T is the type
+   *   being tested. For non-copy-constructible objects, use the method
+   *   tester.WithFactory(...).
+   */
+  template <typename T>
+  ExceptionSafetyTestBuilder<DefaultFactory<T>, Operation, Contracts...>
+  WithInitialValue(const T& t) const {
+    return WithFactory(DefaultFactory<T>(t));
+  }
+
+  /*
+   * Returns a new ExceptionSafetyTestBuilder with the provided T factory
+   * included. The existing factory will not be included in the newly-created
+   * tester instance. This method is intended for use with types lacking a copy
+   * constructor. Types that can be copy-constructed should instead use the
+   * method tester.WithInitialValue(...).
+   */
+  template <typename NewFactory>
+  ExceptionSafetyTestBuilder<absl::decay_t<NewFactory>, Operation, Contracts...>
+  WithFactory(const NewFactory& new_factory) const {
+    return {new_factory, operation_, contracts_};
+  }
+
+  /*
+   * Returns a new ExceptionSafetyTestBuilder with the provided testable
+   * operation included. The existing operation will not be included in the
+   * newly created tester.
+   */
+  template <typename NewOperation>
+  ExceptionSafetyTestBuilder<Factory, absl::decay_t<NewOperation>, Contracts...>
+  WithOperation(const NewOperation& new_operation) const {
+    return {factory_, new_operation, contracts_};
+  }
+
+  /*
+   * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts...
+   * combined with the Contracts... that were already included in the instance
+   * on which the method was called. Contracts... cannot be removed or replaced
+   * once added to an ExceptionSafetyTestBuilder instance. A fresh object must
+   * be created in order to get an empty Contracts... list.
+   *
+   * In addition to passing in custom contract assertion callbacks, this method
+   * accepts `testing::strong_guarantee` as an argument which checks T instances
+   * post-throw against freshly created T instances via operator== to verify
+   * that any state changes made during the execution of the operation were
+   * properly rolled back.
+   */
+  template <typename... MoreContracts>
+  ExceptionSafetyTestBuilder<Factory, Operation, Contracts...,
+                             absl::decay_t<MoreContracts>...>
+  WithContracts(const MoreContracts&... more_contracts) const {
+    return {
+        factory_, operation_,
+        std::tuple_cat(contracts_, std::tuple<absl::decay_t<MoreContracts>...>(
+                                       more_contracts...))};
+  }
+
+  /*
+   * Returns a testing::AssertionResult that is the reduced result of the
+   * exception safety algorithm. The algorithm short circuits and returns
+   * AssertionFailure after the first contract callback returns an
+   * AssertionFailure. Otherwise, if all contract callbacks return an
+   * AssertionSuccess, the reduced result is AssertionSuccess.
+   *
+   * The passed-in testable operation will not be saved in a new tester instance
+   * nor will it modify/replace the existing tester instance. This is useful
+   * when each operation being tested is unique and does not need to be reused.
+   *
+   * Preconditions for tester.Test(const NewOperation& new_operation):
+   *
+   * - May only be called after at least one contract assertion callback and a
+   *   factory or initial value have been provided.
+   */
+  template <
+      typename NewOperation,
+      typename = EnableIfTestable<sizeof...(Contracts), Factory, NewOperation>>
+  testing::AssertionResult Test(const NewOperation& new_operation) const {
+    return TestImpl(new_operation, absl::index_sequence_for<Contracts...>());
+  }
+
+  /*
+   * Returns a testing::AssertionResult that is the reduced result of the
+   * exception safety algorithm. The algorithm short circuits and returns
+   * AssertionFailure after the first contract callback returns an
+   * AssertionFailure. Otherwise, if all contract callbacks return an
+   * AssertionSuccess, the reduced result is AssertionSuccess.
+   *
+   * Preconditions for tester.Test():
+   *
+   * - May only be called after at least one contract assertion callback, a
+   *   factory or initial value and a testable operation have been provided.
+   */
+  template <
+      typename LazyOperation = Operation,
+      typename = EnableIfTestable<sizeof...(Contracts), Factory, LazyOperation>>
+  testing::AssertionResult Test() const {
+    return Test(operation_);
+  }
+
+ private:
+  template <typename, typename, typename...>
+  friend class ExceptionSafetyTestBuilder;
+
+  friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester();
+
+  ExceptionSafetyTestBuilder() {}
+
+  ExceptionSafetyTestBuilder(const Factory& f, const Operation& o,
+                             const std::tuple<Contracts...>& i)
+      : factory_(f), operation_(o), contracts_(i) {}
+
+  template <typename SelectedOperation, size_t... Indices>
+  testing::AssertionResult TestImpl(SelectedOperation selected_operation,
+                                    absl::index_sequence<Indices...>) const {
+    return ExceptionSafetyTest<FactoryElementType<Factory>>(
+               factory_, selected_operation, std::get<Indices>(contracts_)...)
+        .Test();
+  }
+
+  Factory factory_;
+  Operation operation_;
+  std::tuple<Contracts...> contracts_;
+};
+
+}  // namespace exceptions_internal
+
+}  // namespace testing
+
+#endif  // ABSL_HAVE_EXCEPTIONS
+
+#endif  // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_

+ 42 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/exception_testing.h

@@ -0,0 +1,42 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Testing utilities for ABSL types which throw exceptions.
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception
+// if exceptions are enabled, or for death with a specified text in the error
+// message
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_THROW(expr, exception_t)
+
+#elif defined(__ANDROID__)
+// Android asserts do not log anywhere that gtest can currently inspect.
+// So we expect exit, but cannot match the message.
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_DEATH(expr, ".*")
+#else
+#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \
+  EXPECT_DEATH_IF_SUPPORTED(expr, text)
+
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_

+ 50 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/fast_type_id.h

@@ -0,0 +1,50 @@
+//
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template <typename Type>
+struct FastTypeTag {
+  constexpr static char dummy_var = 0;
+};
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+template <typename Type>
+constexpr char FastTypeTag<Type>::dummy_var;
+#endif
+
+// FastTypeId<Type>() evaluates at compile/link-time to a unique pointer for the
+// passed-in type. These are meant to be good match for keys into maps or
+// straight up comparisons.
+using FastTypeIdType = const void*;
+
+template <typename Type>
+constexpr inline FastTypeIdType FastTypeId() {
+  return &FastTypeTag<Type>::dummy_var;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_

+ 123 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/fast_type_id_test.cc

@@ -0,0 +1,123 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/fast_type_id.h"
+
+#include <cstdint>
+#include <map>
+#include <vector>
+
+#include "gtest/gtest.h"
+
+namespace {
+namespace bi = absl::base_internal;
+
+// NOLINTNEXTLINE
+#define PRIM_TYPES(A)   \
+  A(bool)               \
+  A(short)              \
+  A(unsigned short)     \
+  A(int)                \
+  A(unsigned int)       \
+  A(long)               \
+  A(unsigned long)      \
+  A(long long)          \
+  A(unsigned long long) \
+  A(float)              \
+  A(double)             \
+  A(long double)
+
+TEST(FastTypeIdTest, PrimitiveTypes) {
+  bi::FastTypeIdType type_ids[] = {
+#define A(T) bi::FastTypeId<T>(),
+    PRIM_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const T>(),
+    PRIM_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<volatile T>(),
+    PRIM_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const volatile T>(),
+    PRIM_TYPES(A)
+#undef A
+  };
+  size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
+
+  for (int i = 0; i < total_type_ids; ++i) {
+    EXPECT_EQ(type_ids[i], type_ids[i]);
+    for (int j = 0; j < i; ++j) {
+      EXPECT_NE(type_ids[i], type_ids[j]);
+    }
+  }
+}
+
+#define FIXED_WIDTH_TYPES(A) \
+  A(int8_t)                  \
+  A(uint8_t)                 \
+  A(int16_t)                 \
+  A(uint16_t)                \
+  A(int32_t)                 \
+  A(uint32_t)                \
+  A(int64_t)                 \
+  A(uint64_t)
+
+TEST(FastTypeIdTest, FixedWidthTypes) {
+  bi::FastTypeIdType type_ids[] = {
+#define A(T) bi::FastTypeId<T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<volatile T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+#define A(T) bi::FastTypeId<const volatile T>(),
+    FIXED_WIDTH_TYPES(A)
+#undef A
+  };
+  size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType);
+
+  for (int i = 0; i < total_type_ids; ++i) {
+    EXPECT_EQ(type_ids[i], type_ids[i]);
+    for (int j = 0; j < i; ++j) {
+      EXPECT_NE(type_ids[i], type_ids[j]);
+    }
+  }
+}
+
+TEST(FastTypeIdTest, AliasTypes) {
+  using int_alias = int;
+  EXPECT_EQ(bi::FastTypeId<int_alias>(), bi::FastTypeId<int>());
+}
+
+TEST(FastTypeIdTest, TemplateSpecializations) {
+  EXPECT_NE(bi::FastTypeId<std::vector<int>>(),
+            bi::FastTypeId<std::vector<long>>());
+
+  EXPECT_NE((bi::FastTypeId<std::map<int, float>>()),
+            (bi::FastTypeId<std::map<int, double>>()));
+}
+
+struct Base {};
+struct Derived : Base {};
+struct PDerived : private Base {};
+
+TEST(FastTypeIdTest, Inheritance) {
+  EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<Derived>());
+  EXPECT_NE(bi::FastTypeId<Base>(), bi::FastTypeId<PDerived>());
+}
+
+}  // namespace

+ 51 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/hide_ptr.h

@@ -0,0 +1,51 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_
+#define ABSL_BASE_INTERNAL_HIDE_PTR_H_
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Arbitrary value with high bits set. Xor'ing with it is unlikely
+// to map one valid pointer to another valid pointer.
+constexpr uintptr_t HideMask() {
+  return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU;
+}
+
+// Hide a pointer from the leak checker. For internal use only.
+// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr
+// and all objects reachable from ptr to be ignored by the leak checker.
+template <class T>
+inline uintptr_t HidePtr(T* ptr) {
+  return reinterpret_cast<uintptr_t>(ptr) ^ HideMask();
+}
+
+// Return a pointer that has been hidden from the leak checker.
+// For internal use only.
+template <class T>
+inline T* UnhidePtr(uintptr_t hidden) {
+  return reinterpret_cast<T*>(hidden ^ HideMask());
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_HIDE_PTR_H_

+ 39 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/identity.h

@@ -0,0 +1,39 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_IDENTITY_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace internal {
+
+// This is a back-fill of C++20's `std::type_identity`.
+template <typename T>
+struct type_identity {
+  typedef T type;
+};
+
+// This is a back-fill of C++20's `std::type_identity_t`.
+template <typename T>
+using type_identity_t = typename type_identity<T>::type;
+
+}  // namespace internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_IDENTITY_H_

+ 108 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/inline_variable.h

@@ -0,0 +1,108 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
+#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_
+
+#include <type_traits>
+
+#include "absl/base/internal/identity.h"
+
+// File:
+//   This file define a macro that allows the creation of or emulation of C++17
+//   inline variables based on whether or not the feature is supported.
+
+////////////////////////////////////////////////////////////////////////////////
+// Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init)
+//
+// Description:
+//   Expands to the equivalent of an inline constexpr instance of the specified
+//   `type` and `name`, initialized to the value `init`. If the compiler being
+//   used is detected as supporting actual inline variables as a language
+//   feature, then the macro expands to an actual inline variable definition.
+//
+// Requires:
+//   `type` is a type that is usable in an extern variable declaration.
+//
+// Requires: `name` is a valid identifier
+//
+// Requires:
+//   `init` is an expression that can be used in the following definition:
+//     constexpr type name = init;
+//
+// Usage:
+//
+//   // Equivalent to: `inline constexpr size_t variant_npos = -1;`
+//   ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1);
+//
+// Differences in implementation:
+//   For a direct, language-level inline variable, decltype(name) will be the
+//   type that was specified along with const qualification, whereas for
+//   emulated inline variables, decltype(name) may be different (in practice
+//   it will likely be a reference type).
+////////////////////////////////////////////////////////////////////////////////
+
+#ifdef __cpp_inline_variables
+
+// Clang's -Wmissing-variable-declarations option erroneously warned that
+// inline constexpr objects need to be pre-declared. This has now been fixed,
+// but we will need to support this workaround for people building with older
+// versions of clang.
+//
+// Bug: https://bugs.llvm.org/show_bug.cgi?id=35862
+//
+// Note:
+//   type_identity_t is used here so that the const and name are in the
+//   appropriate place for pointer types, reference types, function pointer
+//   types, etc..
+#if defined(__clang__)
+#define ABSL_INTERNAL_EXTERN_DECL(type, name) \
+  extern const ::absl::internal::type_identity_t<type> name;
+#else  // Otherwise, just define the macro to do nothing.
+#define ABSL_INTERNAL_EXTERN_DECL(type, name)
+#endif  // defined(__clang__)
+
+// See above comment at top of file for details.
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \
+  ABSL_INTERNAL_EXTERN_DECL(type, name)                  \
+  inline constexpr ::absl::internal::type_identity_t<type> name = init
+
+#else
+
+// See above comment at top of file for details.
+//
+// Note:
+//   type_identity_t is used here so that the const and name are in the
+//   appropriate place for pointer types, reference types, function pointer
+//   types, etc..
+#define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init)                 \
+  template <class /*AbslInternalDummy*/ = void>                              \
+  struct AbslInternalInlineVariableHolder##name {                            \
+    static constexpr ::absl::internal::type_identity_t<var_type> kInstance = \
+        init;                                                                \
+  };                                                                         \
+                                                                             \
+  template <class AbslInternalDummy>                                         \
+  constexpr ::absl::internal::type_identity_t<var_type>                      \
+      AbslInternalInlineVariableHolder##name<AbslInternalDummy>::kInstance;  \
+                                                                             \
+  static constexpr const ::absl::internal::type_identity_t<var_type>&        \
+      name = /* NOLINT */                                                    \
+      AbslInternalInlineVariableHolder##name<>::kInstance;                   \
+  static_assert(sizeof(void (*)(decltype(name))) != 0,                       \
+                "Silence unused variable warnings.")
+
+#endif  // __cpp_inline_variables
+
+#endif  // ABSL_BASE_INTERNAL_INLINE_VARIABLE_H_

+ 46 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/inline_variable_testing.h

@@ -0,0 +1,46 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_
+#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_
+
+#include "absl/base/internal/inline_variable.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace inline_variable_testing_internal {
+
+struct Foo {
+  int value = 5;
+};
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {});
+ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {});
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5);
+ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5);
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr);
+
+const Foo& get_foo_a();
+const Foo& get_foo_b();
+
+const int& get_int_a();
+const int& get_int_b();
+
+}  // namespace inline_variable_testing_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_

+ 241 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/invoke.h

@@ -0,0 +1,241 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// absl::base_internal::invoke(f, args...) is an implementation of
+// INVOKE(f, args...) from section [func.require] of the C++ standard.
+// When compiled as C++17 and later versions, it is implemented as an alias of
+// std::invoke.
+//
+// [func.require]
+// Define INVOKE (f, t1, t2, ..., tN) as follows:
+// 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+//    and t1 is an object of type T or a reference to an object of type T or a
+//    reference to an object of a type derived from T;
+// 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+//    class T and t1 is not one of the types described in the previous item;
+// 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+//    an object of type T or a reference to an object of type T or a reference
+//    to an object of a type derived from T;
+// 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+//    is not one of the types described in the previous item;
+// 5. f(t1, t2, ..., tN) in all other cases.
+//
+// The implementation is SFINAE-friendly: substitution failure within invoke()
+// isn't an error.
+
+#ifndef ABSL_BASE_INTERNAL_INVOKE_H_
+#define ABSL_BASE_INTERNAL_INVOKE_H_
+
+#include "absl/base/config.h"
+
+#if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
+#include <functional>
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+using std::invoke;
+using std::invoke_result_t;
+using std::is_invocable_r;
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#else  // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
+#include <algorithm>
+#include <type_traits>
+#include <utility>
+
+#include "absl/meta/type_traits.h"
+
+// The following code is internal implementation detail.  See the comment at the
+// top of this file for the API documentation.
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// The five classes below each implement one of the clauses from the definition
+// of INVOKE. The inner class template Accept<F, Args...> checks whether the
+// clause is applicable; static function template Invoke(f, args...) does the
+// invocation.
+//
+// By separating the clause selection logic from invocation we make sure that
+// Invoke() does exactly what the standard says.
+
+template <typename Derived>
+struct StrippedAccept {
+  template <typename... Args>
+  struct Accept : Derived::template AcceptImpl<typename std::remove_cv<
+                      typename std::remove_reference<Args>::type>::type...> {};
+};
+
+// (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T
+// and t1 is an object of type T or a reference to an object of type T or a
+// reference to an object of a type derived from T.
+struct MemFunAndRef : StrippedAccept<MemFunAndRef> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename MemFunType, typename C, typename Obj, typename... Args>
+  struct AcceptImpl<MemFunType C::*, Obj, Args...>
+      : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
+                                         absl::is_function<MemFunType>::value> {
+  };
+
+  template <typename MemFun, typename Obj, typename... Args>
+  static decltype((std::declval<Obj>().*
+                   std::declval<MemFun>())(std::declval<Args>()...))
+  Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) {
+// Ignore bogus GCC warnings on this line.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+    return (std::forward<Obj>(obj).*
+            std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0)
+#pragma GCC diagnostic pop
+#endif
+  }
+};
+
+// ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a
+// class T and t1 is not one of the types described in the previous item.
+struct MemFunAndPtr : StrippedAccept<MemFunAndPtr> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename MemFunType, typename C, typename Ptr, typename... Args>
+  struct AcceptImpl<MemFunType C::*, Ptr, Args...>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
+                                         absl::is_function<MemFunType>::value> {
+  };
+
+  template <typename MemFun, typename Ptr, typename... Args>
+  static decltype(((*std::declval<Ptr>()).*
+                   std::declval<MemFun>())(std::declval<Args>()...))
+  Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) {
+    return ((*std::forward<Ptr>(ptr)).*
+            std::forward<MemFun>(mem_fun))(std::forward<Args>(args)...);
+  }
+};
+
+// t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is
+// an object of type T or a reference to an object of type T or a reference
+// to an object of a type derived from T.
+struct DataMemAndRef : StrippedAccept<DataMemAndRef> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename Obj>
+  struct AcceptImpl<R C::*, Obj>
+      : std::integral_constant<bool, std::is_base_of<C, Obj>::value &&
+                                         !absl::is_function<R>::value> {};
+
+  template <typename DataMem, typename Ref>
+  static decltype(std::declval<Ref>().*std::declval<DataMem>()) Invoke(
+      DataMem&& data_mem, Ref&& ref) {
+    return std::forward<Ref>(ref).*std::forward<DataMem>(data_mem);
+  }
+};
+
+// (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1
+// is not one of the types described in the previous item.
+struct DataMemAndPtr : StrippedAccept<DataMemAndPtr> {
+  template <typename... Args>
+  struct AcceptImpl : std::false_type {};
+
+  template <typename R, typename C, typename Ptr>
+  struct AcceptImpl<R C::*, Ptr>
+      : std::integral_constant<bool, !std::is_base_of<C, Ptr>::value &&
+                                         !absl::is_function<R>::value> {};
+
+  template <typename DataMem, typename Ptr>
+  static decltype((*std::declval<Ptr>()).*std::declval<DataMem>()) Invoke(
+      DataMem&& data_mem, Ptr&& ptr) {
+    return (*std::forward<Ptr>(ptr)).*std::forward<DataMem>(data_mem);
+  }
+};
+
+// f(t1, t2, ..., tN) in all other cases.
+struct Callable {
+  // Callable doesn't have Accept because it's the last clause that gets picked
+  // when none of the previous clauses are applicable.
+  template <typename F, typename... Args>
+  static decltype(std::declval<F>()(std::declval<Args>()...)) Invoke(
+      F&& f, Args&&... args) {
+    return std::forward<F>(f)(std::forward<Args>(args)...);
+  }
+};
+
+// Resolves to the first matching clause.
+template <typename... Args>
+struct Invoker {
+  typedef typename std::conditional<
+      MemFunAndRef::Accept<Args...>::value, MemFunAndRef,
+      typename std::conditional<
+          MemFunAndPtr::Accept<Args...>::value, MemFunAndPtr,
+          typename std::conditional<
+              DataMemAndRef::Accept<Args...>::value, DataMemAndRef,
+              typename std::conditional<DataMemAndPtr::Accept<Args...>::value,
+                                        DataMemAndPtr, Callable>::type>::type>::
+          type>::type type;
+};
+
+// The result type of Invoke<F, Args...>.
+template <typename F, typename... Args>
+using invoke_result_t = decltype(Invoker<F, Args...>::type::Invoke(
+    std::declval<F>(), std::declval<Args>()...));
+
+// Invoke(f, args...) is an implementation of INVOKE(f, args...) from section
+// [func.require] of the C++ standard.
+template <typename F, typename... Args>
+invoke_result_t<F, Args...> invoke(F&& f, Args&&... args) {
+  return Invoker<F, Args...>::type::Invoke(std::forward<F>(f),
+                                           std::forward<Args>(args)...);
+}
+
+template <typename AlwaysVoid, typename, typename, typename...>
+struct IsInvocableRImpl : std::false_type {};
+
+template <typename R, typename F, typename... Args>
+struct IsInvocableRImpl<
+    absl::void_t<absl::base_internal::invoke_result_t<F, Args...> >, R, F,
+    Args...>
+    : std::integral_constant<
+          bool,
+          std::is_convertible<absl::base_internal::invoke_result_t<F, Args...>,
+                              R>::value ||
+              std::is_void<R>::value> {};
+
+// Type trait whose member `value` is true if invoking `F` with `Args` is valid,
+// and either the return type is convertible to `R`, or `R` is void.
+// C++11-compatible version of `std::is_invocable_r`.
+template <typename R, typename F, typename... Args>
+using is_invocable_r = IsInvocableRImpl<void, R, F, Args...>;
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L
+
+#endif  // ABSL_BASE_INTERNAL_INVOKE_H_

+ 631 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/low_level_alloc.cc

@@ -0,0 +1,631 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A low-level allocator that can be used by other low-level
+// modules without introducing dependency cycles.
+// This allocator is slow and wasteful of memory;
+// it should not be used when performance is key.
+
+#include "absl/base/internal/low_level_alloc.h"
+
+#include <type_traits>
+
+#include "absl/base/call_once.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/direct_mmap.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/macros.h"
+#include "absl/base/thread_annotations.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING
+
+#ifndef _WIN32
+#include <pthread.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#else
+#include <windows.h>
+#endif
+
+#ifdef __linux__
+#include <sys/prctl.h>
+#endif
+
+#include <string.h>
+
+#include <algorithm>
+#include <atomic>
+#include <cerrno>
+#include <cstddef>
+#include <new>  // for placement-new
+
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// A first-fit allocator with amortized logarithmic free() time.
+
+// ---------------------------------------------------------------------------
+static const int kMaxLevel = 30;
+
+namespace {
+// This struct describes one allocated block, or one free block.
+struct AllocList {
+  struct Header {
+    // Size of entire region, including this field. Must be
+    // first. Valid in both allocated and unallocated blocks.
+    uintptr_t size;
+
+    // kMagicAllocated or kMagicUnallocated xor this.
+    uintptr_t magic;
+
+    // Pointer to parent arena.
+    LowLevelAlloc::Arena *arena;
+
+    // Aligns regions to 0 mod 2*sizeof(void*).
+    void *dummy_for_alignment;
+  } header;
+
+  // Next two fields: in unallocated blocks: freelist skiplist data
+  //                  in allocated blocks: overlaps with client data
+
+  // Levels in skiplist used.
+  int levels;
+
+  // Actually has levels elements. The AllocList node may not have room
+  // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels().
+  AllocList *next[kMaxLevel];
+};
+}  // namespace
+
+// ---------------------------------------------------------------------------
+// A trivial skiplist implementation.  This is used to keep the freelist
+// in address order while taking only logarithmic time per insert and delete.
+
+// An integer approximation of log2(size/base)
+// Requires size >= base.
+static int IntLog2(size_t size, size_t base) {
+  int result = 0;
+  for (size_t i = size; i > base; i >>= 1) {  // i == floor(size/2**result)
+    result++;
+  }
+  //    floor(size / 2**result) <= base < floor(size / 2**(result-1))
+  // =>     log2(size/(base+1)) <= result < 1+log2(size/base)
+  // => result ~= log2(size/base)
+  return result;
+}
+
+// Return a random integer n:  p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
+static int Random(uint32_t *state) {
+  uint32_t r = *state;
+  int result = 1;
+  while ((((r = r * 1103515245 + 12345) >> 30) & 1) == 0) {
+    result++;
+  }
+  *state = r;
+  return result;
+}
+
+// Return a number of skiplist levels for a node of size bytes, where
+// base is the minimum node size.  Compute level=log2(size / base)+n
+// where n is 1 if random is false and otherwise a random number generated with
+// the standard distribution for a skiplist:  See Random() above.
+// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
+// term, so first-fit searches touch fewer nodes.  "level" is clipped so
+// level<kMaxLevel and next[level-1] will fit in the node.
+// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
+static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {
+  // max_fit is the maximum number of levels that will fit in a node for the
+  // given size.   We can't return more than max_fit, no matter what the
+  // random number generator says.
+  size_t max_fit = (size - offsetof(AllocList, next)) / sizeof(AllocList *);
+  int level = IntLog2(size, base) + (random != nullptr ? Random(random) : 1);
+  if (static_cast<size_t>(level) > max_fit) level = static_cast<int>(max_fit);
+  if (level > kMaxLevel - 1) level = kMaxLevel - 1;
+  ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level");
+  return level;
+}
+
+// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
+// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
+// points to the last element at level i in the AllocList less than *e, or is
+// head if no such element exists.
+static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e,
+                                     AllocList **prev) {
+  AllocList *p = head;
+  for (int level = head->levels - 1; level >= 0; level--) {
+    for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) {
+    }
+    prev[level] = p;
+  }
+  return (head->levels == 0) ? nullptr : prev[0]->next[0];
+}
+
+// Insert element *e into AllocList *head.  Set prev[] as LLA_SkiplistSearch.
+// Requires that e->levels be previously set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
+                               AllocList **prev) {
+  LLA_SkiplistSearch(head, e, prev);
+  for (; head->levels < e->levels; head->levels++) {  // extend prev pointers
+    prev[head->levels] = head;                        // to all *e's levels
+  }
+  for (int i = 0; i != e->levels; i++) {  // add element to list
+    e->next[i] = prev[i]->next[i];
+    prev[i]->next[i] = e;
+  }
+}
+
+// Remove element *e from AllocList *head.  Set prev[] as LLA_SkiplistSearch().
+// Requires that e->levels be previous set by the caller (using
+// LLA_SkiplistLevels())
+static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
+                               AllocList **prev) {
+  AllocList *found = LLA_SkiplistSearch(head, e, prev);
+  ABSL_RAW_CHECK(e == found, "element not in freelist");
+  for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
+    prev[i]->next[i] = e->next[i];
+  }
+  while (head->levels > 0 && head->next[head->levels - 1] == nullptr) {
+    head->levels--;  // reduce head->levels if level unused
+  }
+}
+
+// ---------------------------------------------------------------------------
+// Arena implementation
+
+// Metadata for an LowLevelAlloc arena instance.
+struct LowLevelAlloc::Arena {
+  // Constructs an arena with the given LowLevelAlloc flags.
+  explicit Arena(uint32_t flags_value);
+
+  base_internal::SpinLock mu;
+  // Head of free list, sorted by address
+  AllocList freelist ABSL_GUARDED_BY(mu);
+  // Count of allocated blocks
+  int32_t allocation_count ABSL_GUARDED_BY(mu);
+  // flags passed to NewArena
+  const uint32_t flags;
+  // Result of sysconf(_SC_PAGESIZE)
+  const size_t pagesize;
+  // Lowest power of two >= max(16, sizeof(AllocList))
+  const size_t round_up;
+  // Smallest allocation block size
+  const size_t min_size;
+  // PRNG state
+  uint32_t random ABSL_GUARDED_BY(mu);
+};
+
+namespace {
+// Static storage space for the lazily-constructed, default global arena
+// instances.  We require this space because the whole point of LowLevelAlloc
+// is to avoid relying on malloc/new.
+alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof(
+    LowLevelAlloc::Arena)];
+alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof(
+    LowLevelAlloc::Arena)];
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+alignas(
+    LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage
+    [sizeof(LowLevelAlloc::Arena)];
+#endif
+
+// We must use LowLevelCallOnce here to construct the global arenas, rather than
+// using function-level statics, to avoid recursively invoking the scheduler.
+absl::once_flag create_globals_once;
+
+void CreateGlobalArenas() {
+  new (&default_arena_storage)
+      LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook);
+  new (&unhooked_arena_storage) LowLevelAlloc::Arena(0);
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  new (&unhooked_async_sig_safe_arena_storage)
+      LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe);
+#endif
+}
+
+// Returns a global arena that does not call into hooks.  Used by NewArena()
+// when kCallMallocHook is not set.
+LowLevelAlloc::Arena *UnhookedArena() {
+  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+  return reinterpret_cast<LowLevelAlloc::Arena *>(&unhooked_arena_storage);
+}
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+// Returns a global arena that is async-signal safe.  Used by NewArena() when
+// kAsyncSignalSafe is set.
+LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {
+  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+  return reinterpret_cast<LowLevelAlloc::Arena *>(
+      &unhooked_async_sig_safe_arena_storage);
+}
+#endif
+
+}  // namespace
+
+// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
+LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
+  base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas);
+  return reinterpret_cast<LowLevelAlloc::Arena *>(&default_arena_storage);
+}
+
+// magic numbers to identify allocated and unallocated blocks
+static const uintptr_t kMagicAllocated = 0x4c833e95U;
+static const uintptr_t kMagicUnallocated = ~kMagicAllocated;
+
+namespace {
+class ABSL_SCOPED_LOCKABLE ArenaLock {
+ public:
+  explicit ArenaLock(LowLevelAlloc::Arena *arena)
+      ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu)
+      : arena_(arena) {
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+      sigset_t all;
+      sigfillset(&all);
+      mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0;
+    }
+#endif
+    arena_->mu.Lock();
+  }
+  ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); }
+  void Leave() ABSL_UNLOCK_FUNCTION() {
+    arena_->mu.Unlock();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if (mask_valid_) {
+      const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr);
+      if (err != 0) {
+        ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err);
+      }
+    }
+#endif
+    left_ = true;
+  }
+
+ private:
+  bool left_ = false;  // whether left region
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  bool mask_valid_ = false;
+  sigset_t mask_;  // old mask of blocked signals
+#endif
+  LowLevelAlloc::Arena *arena_;
+  ArenaLock(const ArenaLock &) = delete;
+  ArenaLock &operator=(const ArenaLock &) = delete;
+};
+}  // namespace
+
+// create an appropriate magic number for an object at "ptr"
+// "magic" should be kMagicAllocated or kMagicUnallocated
+inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {
+  return magic ^ reinterpret_cast<uintptr_t>(ptr);
+}
+
+namespace {
+size_t GetPageSize() {
+#ifdef _WIN32
+  SYSTEM_INFO system_info;
+  GetSystemInfo(&system_info);
+  return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity);
+#elif defined(__wasm__) || defined(__asmjs__) || defined(__hexagon__)
+  return getpagesize();
+#else
+  return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+#endif
+}
+
+size_t RoundedUpBlockSize() {
+  // Round up block sizes to a power of two close to the header size.
+  size_t round_up = 16;
+  while (round_up < sizeof(AllocList::Header)) {
+    round_up += round_up;
+  }
+  return round_up;
+}
+
+}  // namespace
+
+LowLevelAlloc::Arena::Arena(uint32_t flags_value)
+    : mu(base_internal::SCHEDULE_KERNEL_ONLY),
+      allocation_count(0),
+      flags(flags_value),
+      pagesize(GetPageSize()),
+      round_up(RoundedUpBlockSize()),
+      min_size(2 * round_up),
+      random(0) {
+  freelist.header.size = 0;
+  freelist.header.magic = Magic(kMagicUnallocated, &freelist.header);
+  freelist.header.arena = this;
+  freelist.levels = 0;
+  memset(freelist.next, 0, sizeof(freelist.next));
+}
+
+// L < meta_data_arena->mu
+LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) {
+  Arena *meta_data_arena = DefaultArena();
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+  if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+    meta_data_arena = UnhookedAsyncSigSafeArena();
+  } else  // NOLINT(readability/braces)
+#endif
+      if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
+    meta_data_arena = UnhookedArena();
+  }
+  Arena *result =
+      new (AllocWithArena(sizeof(*result), meta_data_arena)) Arena(flags);
+  return result;
+}
+
+// L < arena->mu, L < arena->arena->mu
+bool LowLevelAlloc::DeleteArena(Arena *arena) {
+  ABSL_RAW_CHECK(
+      arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(),
+      "may not delete default arena");
+  ArenaLock section(arena);
+  if (arena->allocation_count != 0) {
+    section.Leave();
+    return false;
+  }
+  while (arena->freelist.next[0] != nullptr) {
+    AllocList *region = arena->freelist.next[0];
+    size_t size = region->header.size;
+    arena->freelist.next[0] = region->next[0];
+    ABSL_RAW_CHECK(
+        region->header.magic == Magic(kMagicUnallocated, &region->header),
+        "bad magic number in DeleteArena()");
+    ABSL_RAW_CHECK(region->header.arena == arena,
+                   "bad arena pointer in DeleteArena()");
+    ABSL_RAW_CHECK(size % arena->pagesize == 0,
+                   "empty arena has non-page-aligned block size");
+    ABSL_RAW_CHECK(reinterpret_cast<uintptr_t>(region) % arena->pagesize == 0,
+                   "empty arena has non-page-aligned block");
+    int munmap_result;
+#ifdef _WIN32
+    munmap_result = VirtualFree(region, 0, MEM_RELEASE);
+    ABSL_RAW_CHECK(munmap_result != 0,
+                   "LowLevelAlloc::DeleteArena: VitualFree failed");
+#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
+      munmap_result = munmap(region, size);
+    } else {
+      munmap_result = base_internal::DirectMunmap(region, size);
+    }
+#else
+    munmap_result = munmap(region, size);
+#endif  // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    if (munmap_result != 0) {
+      ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d",
+                   errno);
+    }
+#endif  // _WIN32
+  }
+  section.Leave();
+  arena->~Arena();
+  Free(arena);
+  return true;
+}
+
+// ---------------------------------------------------------------------------
+
+// Addition, checking for overflow.  The intent is to die if an external client
+// manages to push through a request that would cause arithmetic to fail.
+static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {
+  uintptr_t sum = a + b;
+  ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow");
+  return sum;
+}
+
+// Return value rounded up to next multiple of align.
+// align must be a power of two.
+static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {
+  return CheckedAdd(addr, align - 1) & ~(align - 1);
+}
+
+// Equivalent to "return prev->next[i]" but with sanity checking
+// that the freelist is in the correct order, that it
+// consists of regions marked "unallocated", and that no two regions
+// are adjacent in memory (they should have been coalesced).
+// L >= arena->mu
+static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
+  ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()");
+  AllocList *next = prev->next[i];
+  if (next != nullptr) {
+    ABSL_RAW_CHECK(
+        next->header.magic == Magic(kMagicUnallocated, &next->header),
+        "bad magic number in Next()");
+    ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()");
+    if (prev != &arena->freelist) {
+      ABSL_RAW_CHECK(prev < next, "unordered freelist");
+      ABSL_RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
+                         reinterpret_cast<char *>(next),
+                     "malformed freelist");
+    }
+  }
+  return next;
+}
+
+// Coalesce list item "a" with its successor if they are adjacent.
+static void Coalesce(AllocList *a) {
+  AllocList *n = a->next[0];
+  if (n != nullptr && reinterpret_cast<char *>(a) + a->header.size ==
+                          reinterpret_cast<char *>(n)) {
+    LowLevelAlloc::Arena *arena = a->header.arena;
+    a->header.size += n->header.size;
+    n->header.magic = 0;
+    n->header.arena = nullptr;
+    AllocList *prev[kMaxLevel];
+    LLA_SkiplistDelete(&arena->freelist, n, prev);
+    LLA_SkiplistDelete(&arena->freelist, a, prev);
+    a->levels =
+        LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random);
+    LLA_SkiplistInsert(&arena->freelist, a, prev);
+  }
+}
+
+// Adds block at location "v" to the free list
+// L >= arena->mu
+static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
+  AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
+                                               sizeof(f->header));
+  ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
+                 "bad magic number in AddToFreelist()");
+  ABSL_RAW_CHECK(f->header.arena == arena,
+                 "bad arena pointer in AddToFreelist()");
+  f->levels =
+      LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random);
+  AllocList *prev[kMaxLevel];
+  LLA_SkiplistInsert(&arena->freelist, f, prev);
+  f->header.magic = Magic(kMagicUnallocated, &f->header);
+  Coalesce(f);        // maybe coalesce with successor
+  Coalesce(prev[0]);  // maybe coalesce with predecessor
+}
+
+// Frees storage allocated by LowLevelAlloc::Alloc().
+// L < arena->mu
+void LowLevelAlloc::Free(void *v) {
+  if (v != nullptr) {
+    AllocList *f = reinterpret_cast<AllocList *>(reinterpret_cast<char *>(v) -
+                                                 sizeof(f->header));
+    LowLevelAlloc::Arena *arena = f->header.arena;
+    ArenaLock section(arena);
+    AddToFreelist(v, arena);
+    ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
+    arena->allocation_count--;
+    section.Leave();
+  }
+}
+
+// allocates and returns a block of size bytes, to be freed with Free()
+// L < arena->mu
+static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
+  void *result = nullptr;
+  if (request != 0) {
+    AllocList *s;  // will point to region that satisfies request
+    ArenaLock section(arena);
+    // round up with header
+    size_t req_rnd =
+        RoundUp(CheckedAdd(request, sizeof(s->header)), arena->round_up);
+    for (;;) {  // loop until we find a suitable region
+      // find the minimum levels that a block of this size must have
+      int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1;
+      if (i < arena->freelist.levels) {        // potential blocks exist
+        AllocList *before = &arena->freelist;  // predecessor of s
+        while ((s = Next(i, before, arena)) != nullptr &&
+               s->header.size < req_rnd) {
+          before = s;
+        }
+        if (s != nullptr) {  // we found a region
+          break;
+        }
+      }
+      // we unlock before mmap() both because mmap() may call a callback hook,
+      // and because it may be slow.
+      arena->mu.Unlock();
+      // mmap generous 64K chunks to decrease
+      // the chances/impact of fragmentation:
+      size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
+      void *new_pages;
+#ifdef _WIN32
+      new_pages = VirtualAlloc(nullptr, new_pages_size,
+                               MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+      ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed");
+#else
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+      if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
+        new_pages = base_internal::DirectMmap(nullptr, new_pages_size,
+            PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+      } else {
+        new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+                         MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+      }
+#else
+      new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ,
+                       MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+#endif  // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+      if (new_pages == MAP_FAILED) {
+        ABSL_RAW_LOG(FATAL, "mmap error: %d", errno);
+      }
+
+#ifdef __linux__
+#if defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
+      // Attempt to name the allocated address range in /proc/$PID/smaps on
+      // Linux.
+      //
+      // This invocation of prctl() may fail if the Linux kernel was not
+      // configured with the CONFIG_ANON_VMA_NAME option.  This is OK since
+      // the naming of arenas is primarily a debugging aid.
+      prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, new_pages, new_pages_size,
+            "absl");
+#endif
+#endif  // __linux__
+#endif  // _WIN32
+      arena->mu.Lock();
+      s = reinterpret_cast<AllocList *>(new_pages);
+      s->header.size = new_pages_size;
+      // Pretend the block is allocated; call AddToFreelist() to free it.
+      s->header.magic = Magic(kMagicAllocated, &s->header);
+      s->header.arena = arena;
+      AddToFreelist(&s->levels, arena);  // insert new region into free list
+    }
+    AllocList *prev[kMaxLevel];
+    LLA_SkiplistDelete(&arena->freelist, s, prev);  // remove from free list
+    // s points to the first free region that's big enough
+    if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) {
+      // big enough to split
+      AllocList *n =
+          reinterpret_cast<AllocList *>(req_rnd + reinterpret_cast<char *>(s));
+      n->header.size = s->header.size - req_rnd;
+      n->header.magic = Magic(kMagicAllocated, &n->header);
+      n->header.arena = arena;
+      s->header.size = req_rnd;
+      AddToFreelist(&n->levels, arena);
+    }
+    s->header.magic = Magic(kMagicAllocated, &s->header);
+    ABSL_RAW_CHECK(s->header.arena == arena, "");
+    arena->allocation_count++;
+    section.Leave();
+    result = &s->levels;
+  }
+  ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request);
+  return result;
+}
+
+void *LowLevelAlloc::Alloc(size_t request) {
+  void *result = DoAllocWithArena(request, DefaultArena());
+  return result;
+}
+
+void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
+  ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena");
+  void *result = DoAllocWithArena(request, arena);
+  return result;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_LOW_LEVEL_ALLOC_MISSING

+ 127 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/low_level_alloc.h

@@ -0,0 +1,127 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_
+
+// A simple thread-safe memory allocator that does not depend on
+// mutexes or thread-specific data.  It is intended to be used
+// sparingly, and only when malloc() would introduce an unwanted
+// dependency, such as inside the heap-checker, or the Mutex
+// implementation.
+
+// IWYU pragma: private, include "base/low_level_alloc.h"
+
+#include <sys/types.h>
+
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+// LowLevelAlloc requires that the platform support low-level
+// allocation of virtual memory. Platforms lacking this cannot use
+// LowLevelAlloc.
+#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set
+#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32)
+#define ABSL_LOW_LEVEL_ALLOC_MISSING 1
+#endif
+
+// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or
+// asm.js / WebAssembly.
+// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html
+// for more information.
+#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set
+#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) || \
+    defined(__hexagon__)
+#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1
+#endif
+
+#include <cstddef>
+
+#include "absl/base/port.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class LowLevelAlloc {
+ public:
+  struct Arena;       // an arena from which memory may be allocated
+
+  // Returns a pointer to a block of at least "request" bytes
+  // that have been newly allocated from the specific arena.
+  // for Alloc() call the DefaultArena() is used.
+  // Returns 0 if passed request==0.
+  // Does not return 0 under other circumstances; it crashes if memory
+  // is not available.
+  static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+  static void *AllocWithArena(size_t request, Arena *arena)
+      ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+  // Deallocates a region of memory that was previously allocated with
+  // Alloc().   Does nothing if passed 0.   "s" must be either 0,
+  // or must have been returned from a call to Alloc() and not yet passed to
+  // Free() since that call to Alloc().  The space is returned to the arena
+  // from which it was allocated.
+  static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook);
+
+  // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free
+  // are to put all callers of MallocHook::Invoke* in this module
+  // into special section,
+  // so that MallocHook::GetCallerStackTrace can function accurately.
+
+  // Create a new arena.
+  // The root metadata for the new arena is allocated in the
+  // meta_data_arena; the DefaultArena() can be passed for meta_data_arena.
+  // These values may be ored into flags:
+  enum {
+    // Report calls to Alloc() and Free() via the MallocHook interface.
+    // Set in the DefaultArena.
+    kCallMallocHook = 0x0001,
+
+#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
+    // Make calls to Alloc(), Free() be async-signal-safe. Not set in
+    // DefaultArena(). Not supported on all platforms.
+    kAsyncSignalSafe = 0x0002,
+#endif
+  };
+  // Construct a new arena.  The allocation of the underlying metadata honors
+  // the provided flags.  For example, the call NewArena(kAsyncSignalSafe)
+  // is itself async-signal-safe, as well as generatating an arena that provides
+  // async-signal-safe Alloc/Free.
+  static Arena *NewArena(uint32_t flags);
+
+  // Destroys an arena allocated by NewArena and returns true,
+  // provided no allocated blocks remain in the arena.
+  // If allocated blocks remain in the arena, does nothing and
+  // returns false.
+  // It is illegal to attempt to destroy the DefaultArena().
+  static bool DeleteArena(Arena *arena);
+
+  // The default arena that always exists.
+  static Arena *DefaultArena();
+
+ private:
+  LowLevelAlloc();      // no instances
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_

+ 180 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/low_level_alloc_test.cc

@@ -0,0 +1,180 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/low_level_alloc.h"
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <thread>  // NOLINT(build/c++11)
+#include <unordered_map>
+#include <utility>
+
+#ifdef __EMSCRIPTEN__
+#include <emscripten.h>
+#endif
+
+#include "absl/container/node_hash_map.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+// This test doesn't use gtest since it needs to test that everything
+// works before main().
+#define TEST_ASSERT(x)                                           \
+  if (!(x)) {                                                    \
+    printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \
+    abort();                                                     \
+  }
+
+// a block of memory obtained from the allocator
+struct BlockDesc {
+  char *ptr;      // pointer to memory
+  int len;        // number of bytes
+  int fill;       // filled with data starting with this
+};
+
+// Check that the pattern placed in the block d
+// by RandomizeBlockDesc is still there.
+static void CheckBlockDesc(const BlockDesc &d) {
+  for (int i = 0; i != d.len; i++) {
+    TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff));
+  }
+}
+
+// Fill the block "*d" with a pattern
+// starting with a random byte.
+static void RandomizeBlockDesc(BlockDesc *d) {
+  d->fill = rand() & 0xff;
+  for (int i = 0; i != d->len; i++) {
+    d->ptr[i] = (d->fill + i) & 0xff;
+  }
+}
+
+// Use to indicate to the malloc hooks that
+// this calls is from LowLevelAlloc.
+static bool using_low_level_alloc = false;
+
+// n times, toss a coin, and based on the outcome
+// either allocate a new block or deallocate an old block.
+// New blocks are placed in a std::unordered_map with a random key
+// and initialized with RandomizeBlockDesc().
+// If keys conflict, the older block is freed.
+// Old blocks are always checked with CheckBlockDesc()
+// before being freed.  At the end of the run,
+// all remaining allocated blocks are freed.
+// If use_new_arena is true, use a fresh arena, and then delete it.
+// If call_malloc_hook is true and user_arena is true,
+// allocations and deallocations are reported via the MallocHook
+// interface.
+static void Test(bool use_new_arena, bool call_malloc_hook, int n) {
+  typedef absl::node_hash_map<int, BlockDesc> AllocMap;
+  AllocMap allocated;
+  AllocMap::iterator it;
+  BlockDesc block_desc;
+  int rnd;
+  LowLevelAlloc::Arena *arena = nullptr;
+  if (use_new_arena) {
+    int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0;
+    arena = LowLevelAlloc::NewArena(flags);
+  }
+  for (int i = 0; i != n; i++) {
+    if (i != 0 && i % 10000 == 0) {
+      printf(".");
+      fflush(stdout);
+    }
+
+    switch (rand() & 1) {      // toss a coin
+    case 0:     // coin came up heads: add a block
+      using_low_level_alloc = true;
+      block_desc.len = rand() & 0x3fff;
+      block_desc.ptr = reinterpret_cast<char *>(
+          arena == nullptr
+              ? LowLevelAlloc::Alloc(block_desc.len)
+              : LowLevelAlloc::AllocWithArena(block_desc.len, arena));
+      using_low_level_alloc = false;
+      RandomizeBlockDesc(&block_desc);
+      rnd = rand();
+      it = allocated.find(rnd);
+      if (it != allocated.end()) {
+        CheckBlockDesc(it->second);
+        using_low_level_alloc = true;
+        LowLevelAlloc::Free(it->second.ptr);
+        using_low_level_alloc = false;
+        it->second = block_desc;
+      } else {
+        allocated[rnd] = block_desc;
+      }
+      break;
+    case 1:     // coin came up tails: remove a block
+      it = allocated.begin();
+      if (it != allocated.end()) {
+        CheckBlockDesc(it->second);
+        using_low_level_alloc = true;
+        LowLevelAlloc::Free(it->second.ptr);
+        using_low_level_alloc = false;
+        allocated.erase(it);
+      }
+      break;
+    }
+  }
+  // remove all remaining blocks
+  while ((it = allocated.begin()) != allocated.end()) {
+    CheckBlockDesc(it->second);
+    using_low_level_alloc = true;
+    LowLevelAlloc::Free(it->second.ptr);
+    using_low_level_alloc = false;
+    allocated.erase(it);
+  }
+  if (use_new_arena) {
+    TEST_ASSERT(LowLevelAlloc::DeleteArena(arena));
+  }
+}
+
+// LowLevelAlloc is designed to be safe to call before main().
+static struct BeforeMain {
+  BeforeMain() {
+    Test(false, false, 50000);
+    Test(true, false, 50000);
+    Test(true, true, 50000);
+  }
+} before_main;
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+int main(int argc, char *argv[]) {
+  // The actual test runs in the global constructor of `before_main`.
+  printf("PASS\n");
+#ifdef __EMSCRIPTEN__
+  // clang-format off
+// This is JS here. Don't try to format it.
+    MAIN_THREAD_EM_ASM({
+      if (ENVIRONMENT_IS_WEB) {
+        if (typeof TEST_FINISH === 'function') {
+          TEST_FINISH($0);
+        } else {
+          console.error('Attempted to exit with status ' + $0);
+          console.error('But TEST_FINSIHED is not a function.');
+        }
+      }
+    }, 0);
+// clang-format on
+#endif
+  return 0;
+}

+ 134 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/low_level_scheduling.h

@@ -0,0 +1,134 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+#define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/macros.h"
+
+// The following two declarations exist so SchedulingGuard may friend them with
+// the appropriate language linkage.  These callbacks allow libc internals, such
+// as function level statics, to schedule cooperatively when locking.
+extern "C" bool __google_disable_rescheduling(void);
+extern "C" void __google_enable_rescheduling(bool disable_result);
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+class CondVar;
+class Mutex;
+
+namespace synchronization_internal {
+int MutexDelay(int32_t c, int mode);
+}  // namespace synchronization_internal
+
+namespace base_internal {
+
+class SchedulingHelper;  // To allow use of SchedulingGuard.
+class SpinLock;          // To allow use of SchedulingGuard.
+
+// SchedulingGuard
+// Provides guard semantics that may be used to disable cooperative rescheduling
+// of the calling thread within specific program blocks.  This is used to
+// protect resources (e.g. low-level SpinLocks or Domain code) that cooperative
+// scheduling depends on.
+//
+// Domain implementations capable of rescheduling in reaction to involuntary
+// kernel thread actions (e.g blocking due to a pagefault or syscall) must
+// guarantee that an annotated thread is not allowed to (cooperatively)
+// reschedule until the annotated region is complete.
+//
+// It is an error to attempt to use a cooperatively scheduled resource (e.g.
+// Mutex) within a rescheduling-disabled region.
+//
+// All methods are async-signal safe.
+class SchedulingGuard {
+ public:
+  // Returns true iff the calling thread may be cooperatively rescheduled.
+  static bool ReschedulingIsAllowed();
+  SchedulingGuard(const SchedulingGuard&) = delete;
+  SchedulingGuard& operator=(const SchedulingGuard&) = delete;
+
+ private:
+  // Disable cooperative rescheduling of the calling thread.  It may still
+  // initiate scheduling operations (e.g. wake-ups), however, it may not itself
+  // reschedule.  Nestable.  The returned result is opaque, clients should not
+  // attempt to interpret it.
+  // REQUIRES: Result must be passed to a pairing EnableScheduling().
+  static bool DisableRescheduling();
+
+  // Marks the end of a rescheduling disabled region, previously started by
+  // DisableRescheduling().
+  // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling().
+  static void EnableRescheduling(bool disable_result);
+
+  // A scoped helper for {Disable, Enable}Rescheduling().
+  // REQUIRES: destructor must run in same thread as constructor.
+  struct ScopedDisable {
+    ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); }
+    ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); }
+
+    bool disabled;
+  };
+
+  // A scoped helper to enable rescheduling temporarily.
+  // REQUIRES: destructor must run in same thread as constructor.
+  class ScopedEnable {
+   public:
+    ScopedEnable();
+    ~ScopedEnable();
+
+   private:
+    int scheduling_disabled_depth_;
+  };
+
+  // Access to SchedulingGuard is explicitly permitted.
+  friend class absl::CondVar;
+  friend class absl::Mutex;
+  friend class SchedulingHelper;
+  friend class SpinLock;
+  friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode);
+};
+
+//------------------------------------------------------------------------------
+// End of public interfaces.
+//------------------------------------------------------------------------------
+
+inline bool SchedulingGuard::ReschedulingIsAllowed() {
+  return false;
+}
+
+inline bool SchedulingGuard::DisableRescheduling() {
+  return false;
+}
+
+inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) {
+  return;
+}
+
+inline SchedulingGuard::ScopedEnable::ScopedEnable()
+    : scheduling_disabled_depth_(0) {}
+inline SchedulingGuard::ScopedEnable::~ScopedEnable() {
+  ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning");
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_

+ 106 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/nullability_impl.h

@@ -0,0 +1,106 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
+#define ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_
+
+#include <memory>
+#include <type_traits>
+
+#include "absl/base/attributes.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+
+namespace nullability_internal {
+
+// `IsNullabilityCompatible` checks whether its first argument is a class
+// explicitly tagged as supporting nullability annotations. The tag is the type
+// declaration `absl_nullability_compatible`.
+template <typename, typename = void>
+struct IsNullabilityCompatible : std::false_type {};
+
+template <typename T>
+struct IsNullabilityCompatible<
+    T, absl::void_t<typename T::absl_nullability_compatible>> : std::true_type {
+};
+
+template <typename T>
+constexpr bool IsSupportedType = IsNullabilityCompatible<T>::value;
+
+template <typename T>
+constexpr bool IsSupportedType<T*> = true;
+
+template <typename T, typename U>
+constexpr bool IsSupportedType<T U::*> = true;
+
+template <typename T, typename... Deleter>
+constexpr bool IsSupportedType<std::unique_ptr<T, Deleter...>> = true;
+
+template <typename T>
+constexpr bool IsSupportedType<std::shared_ptr<T>> = true;
+
+template <typename T>
+struct EnableNullable {
+  static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+                "Template argument must be a raw or supported smart pointer "
+                "type. See absl/base/nullability.h.");
+  using type = T;
+};
+
+template <typename T>
+struct EnableNonnull {
+  static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+                "Template argument must be a raw or supported smart pointer "
+                "type. See absl/base/nullability.h.");
+  using type = T;
+};
+
+template <typename T>
+struct EnableNullabilityUnknown {
+  static_assert(nullability_internal::IsSupportedType<std::remove_cv_t<T>>,
+                "Template argument must be a raw or supported smart pointer "
+                "type. See absl/base/nullability.h.");
+  using type = T;
+};
+
+// Note: we do not apply Clang nullability attributes (e.g. _Nullable).  These
+// only support raw pointers, and conditionally enabling them only for raw
+// pointers inhibits template arg deduction.  Ideally, they would support all
+// pointer-like types.
+template <typename T, typename = typename EnableNullable<T>::type>
+using NullableImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+    [[clang::annotate("Nullable")]]
+#endif
+    = T;
+
+template <typename T, typename = typename EnableNonnull<T>::type>
+using NonnullImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+    [[clang::annotate("Nonnull")]]
+#endif
+    = T;
+
+template <typename T, typename = typename EnableNullabilityUnknown<T>::type>
+using NullabilityUnknownImpl
+#if ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+    [[clang::annotate("Nullability_Unspecified")]]
+#endif
+    = T;
+
+}  // namespace nullability_internal
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_NULLABILITY_IMPL_H_

+ 52 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/per_thread_tls.h

@@ -0,0 +1,52 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_
+
+// This header defines two macros:
+//
+// If the platform supports thread-local storage:
+//
+// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a
+//   thread-local variable
+// * ABSL_PER_THREAD_TLS is 1
+//
+// Otherwise:
+//
+// * ABSL_PER_THREAD_TLS_KEYWORD is empty
+// * ABSL_PER_THREAD_TLS is 0
+//
+// Microsoft C supports thread-local storage.
+// GCC supports it if the appropriate version of glibc is available,
+// which the programmer can indicate by defining ABSL_HAVE_TLS
+
+#include "absl/base/port.h"  // For ABSL_HAVE_TLS
+
+#if defined(ABSL_PER_THREAD_TLS)
+#error ABSL_PER_THREAD_TLS cannot be directly set
+#elif defined(ABSL_PER_THREAD_TLS_KEYWORD)
+#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set
+#elif defined(ABSL_HAVE_TLS)
+#define ABSL_PER_THREAD_TLS_KEYWORD __thread
+#define ABSL_PER_THREAD_TLS 1
+#elif defined(_MSC_VER)
+#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread)
+#define ABSL_PER_THREAD_TLS 1
+#else
+#define ABSL_PER_THREAD_TLS_KEYWORD
+#define ABSL_PER_THREAD_TLS 0
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_

+ 33 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/pretty_function.h

@@ -0,0 +1,33 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_
+
+// ABSL_PRETTY_FUNCTION
+//
+// In C++11, __func__ gives the undecorated name of the current function.  That
+// is, "main", not "int main()".  Various compilers give extra macros to get the
+// decorated function name, including return type and arguments, to
+// differentiate between overload sets.  ABSL_PRETTY_FUNCTION is a portable
+// version of these macros which forwards to the correct macro on each compiler.
+#if defined(_MSC_VER)
+#define ABSL_PRETTY_FUNCTION __FUNCSIG__
+#elif defined(__GNUC__)
+#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__
+#else
+#error "Unsupported compiler"
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_

+ 280 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/raw_logging.cc

@@ -0,0 +1,280 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/raw_logging.h"
+
+#include <cstdarg>
+#include <cstddef>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+
+#ifdef __EMSCRIPTEN__
+#include <emscripten/console.h>
+#endif
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/errno_saver.h"
+#include "absl/base/log_severity.h"
+
+// We know how to perform low-level writes to stderr in POSIX and Windows.  For
+// these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED.
+// Much of raw_logging.cc becomes a no-op when we can't output messages,
+// although a FATAL ABSL_RAW_LOG message will still abort the process.
+
+// ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write()
+// (as from unistd.h)
+//
+// This preprocessor token is also defined in raw_io.cc.  If you need to copy
+// this, consider moving both to config.h instead.
+#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+    defined(__hexagon__) || defined(__Fuchsia__) ||                     \
+    defined(__native_client__) || defined(__OpenBSD__) ||               \
+    defined(__EMSCRIPTEN__) || defined(__ASYLO__)
+
+#include <unistd.h>
+
+#define ABSL_HAVE_POSIX_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_POSIX_WRITE
+#endif
+
+// ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
+//   syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
+// for low level operations that want to avoid libc.
+#if (defined(__linux__) || defined(__FreeBSD__)) && !defined(__ANDROID__)
+#include <sys/syscall.h>
+#define ABSL_HAVE_SYSCALL_WRITE 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_SYSCALL_WRITE
+#endif
+
+#ifdef _WIN32
+#include <io.h>
+
+#define ABSL_HAVE_RAW_IO 1
+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
+#else
+#undef ABSL_HAVE_RAW_IO
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_log_internal {
+namespace {
+
+// TODO(gfalcon): We want raw-logging to work on as many platforms as possible.
+// Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for
+// a selected set of platforms for which we expect not to be able to raw log.
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+constexpr char kTruncated[] = " ... (message truncated)\n";
+
+// sprintf the format to the buffer, adjusting *buf and *size to reflect the
+// consumed bytes, and return whether the message fit without truncation.  If
+// truncation occurred, if possible leave room in the buffer for the message
+// kTruncated[].
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap)
+    ABSL_PRINTF_ATTRIBUTE(3, 0);
+bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) {
+  if (*size < 0) return false;
+  int n = vsnprintf(*buf, static_cast<size_t>(*size), format, ap);
+  bool result = true;
+  if (n < 0 || n > *size) {
+    result = false;
+    if (static_cast<size_t>(*size) > sizeof(kTruncated)) {
+      n = *size - static_cast<int>(sizeof(kTruncated));
+    } else {
+      n = 0;  // no room for truncation message
+    }
+  }
+  *size -= n;
+  *buf += n;
+  return result;
+}
+#endif  // ABSL_LOW_LEVEL_WRITE_SUPPORTED
+
+constexpr int kLogBufSize = 3000;
+
+// CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths
+// that invoke malloc() and getenv() that might acquire some locks.
+
+// Helper for RawLog below.
+// *DoRawLog writes to *buf of *size and move them past the written portion.
+// It returns true iff there was no overflow or error.
+bool DoRawLog(char** buf, int* size, const char* format, ...)
+    ABSL_PRINTF_ATTRIBUTE(3, 4);
+bool DoRawLog(char** buf, int* size, const char* format, ...) {
+  if (*size < 0) return false;
+  va_list ap;
+  va_start(ap, format);
+  int n = vsnprintf(*buf, static_cast<size_t>(*size), format, ap);
+  va_end(ap);
+  if (n < 0 || n > *size) return false;
+  *size -= n;
+  *buf += n;
+  return true;
+}
+
+bool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line,
+                               char** buf, int* buf_size) {
+  DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line);
+  return true;
+}
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+absl::base_internal::AtomicHook<LogFilterAndPrefixHook>
+    log_filter_and_prefix_hook(DefaultLogFilterAndPrefix);
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES
+absl::base_internal::AtomicHook<AbortHook> abort_hook;
+
+void RawLogVA(absl::LogSeverity severity, const char* file, int line,
+              const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0);
+void RawLogVA(absl::LogSeverity severity, const char* file, int line,
+              const char* format, va_list ap) {
+  char buffer[kLogBufSize];
+  char* buf = buffer;
+  int size = sizeof(buffer);
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  bool enabled = true;
+#else
+  bool enabled = false;
+#endif
+
+#ifdef ABSL_MIN_LOG_LEVEL
+  if (severity < static_cast<absl::LogSeverity>(ABSL_MIN_LOG_LEVEL) &&
+      severity < absl::LogSeverity::kFatal) {
+    enabled = false;
+  }
+#endif
+
+  enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size);
+  const char* const prefix_end = buf;
+
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  if (enabled) {
+    bool no_chop = VADoRawLog(&buf, &size, format, ap);
+    if (no_chop) {
+      DoRawLog(&buf, &size, "\n");
+    } else {
+      DoRawLog(&buf, &size, "%s", kTruncated);
+    }
+    AsyncSignalSafeWriteError(buffer, strlen(buffer));
+  }
+#else
+  static_cast<void>(format);
+  static_cast<void>(ap);
+  static_cast<void>(enabled);
+#endif
+
+  // Abort the process after logging a FATAL message, even if the output itself
+  // was suppressed.
+  if (severity == absl::LogSeverity::kFatal) {
+    abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize);
+    abort();
+  }
+}
+
+// Non-formatting version of RawLog().
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line,
+                        const std::string& message) {
+  RawLog(severity, file, line, "%.*s", static_cast<int>(message.size()),
+         message.data());
+}
+
+}  // namespace
+
+void AsyncSignalSafeWriteError(const char* s, size_t len) {
+  if (!len) return;
+  absl::base_internal::ErrnoSaver errno_saver;
+#if defined(__EMSCRIPTEN__)
+  // In WebAssembly, bypass filesystem emulation via fwrite.
+  if (s[len - 1] == '\n') {
+    // Skip a trailing newline character as emscripten_errn adds one itself.
+    len--;
+  }
+  // emscripten_errn was introduced in 3.1.41 but broken in standalone mode
+  // until 3.1.43.
+#if ABSL_INTERNAL_EMSCRIPTEN_VERSION >= 3001043
+  emscripten_errn(s, len);
+#else
+  char buf[kLogBufSize];
+  if (len >= kLogBufSize) {
+    len = kLogBufSize - 1;
+    constexpr size_t trunc_len = sizeof(kTruncated) - 2;
+    memcpy(buf + len - trunc_len, kTruncated, trunc_len);
+    buf[len] = '\0';
+    len -= trunc_len;
+  } else {
+    buf[len] = '\0';
+  }
+  memcpy(buf, s, len);
+  _emscripten_err(buf);
+#endif
+#elif defined(ABSL_HAVE_SYSCALL_WRITE)
+  // We prefer calling write via `syscall` to minimize the risk of libc doing
+  // something "helpful".
+  syscall(SYS_write, STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_POSIX_WRITE)
+  write(STDERR_FILENO, s, len);
+#elif defined(ABSL_HAVE_RAW_IO)
+  _write(/* stderr */ 2, s, static_cast<unsigned>(len));
+#else
+  // stderr logging unsupported on this platform
+  (void)s;
+  (void)len;
+#endif
+}
+
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) {
+  va_list ap;
+  va_start(ap, format);
+  RawLogVA(severity, file, line, format, ap);
+  va_end(ap);
+}
+
+bool RawLoggingFullySupported() {
+#ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  return true;
+#else   // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+  return false;
+#endif  // !ABSL_LOW_LEVEL_WRITE_SUPPORTED
+}
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL
+    absl::base_internal::AtomicHook<InternalLogFunction>
+        internal_log_function(DefaultInternalLog);
+
+void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) {
+  log_filter_and_prefix_hook.Store(func);
+}
+
+void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); }
+
+void RegisterInternalLogFunction(InternalLogFunction func) {
+  internal_log_function.Store(func);
+}
+
+}  // namespace raw_log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 217 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/raw_logging.h

@@ -0,0 +1,217 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Thread-safe logging routines that do not allocate any memory or
+// acquire any locks, and can therefore be used by low-level memory
+// allocation, synchronization, and signal-handling code.
+
+#ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+#define ABSL_BASE_INTERNAL_RAW_LOGGING_H_
+
+#include <string>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/log_severity.h"
+#include "absl/base/macros.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+
+// This is similar to LOG(severity) << format..., but
+// * it is to be used ONLY by low-level modules that can't use normal LOG()
+// * it is designed to be a low-level logger that does not allocate any
+//   memory and does not need any locks, hence:
+// * it logs straight and ONLY to STDERR w/o buffering
+// * it uses an explicit printf-format and arguments list
+// * it will silently chop off really long message strings
+// Usage example:
+//   ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error);
+// This will print an almost standard log line like this to stderr only:
+//   E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file
+
+#define ABSL_RAW_LOG(severity, ...)                                            \
+  do {                                                                         \
+    constexpr const char* absl_raw_log_internal_basename =                     \
+        ::absl::raw_log_internal::Basename(__FILE__, sizeof(__FILE__) - 1);    \
+    ::absl::raw_log_internal::RawLog(ABSL_RAW_LOG_INTERNAL_##severity,         \
+                                     absl_raw_log_internal_basename, __LINE__, \
+                                     __VA_ARGS__);                             \
+    ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity;                        \
+  } while (0)
+
+// Similar to CHECK(condition) << message, but for low-level modules:
+// we use only ABSL_RAW_LOG that does not allocate memory.
+// We do not want to provide args list here to encourage this usage:
+//   if (!cond)  ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args);
+// so that the args are not computed when not needed.
+#define ABSL_RAW_CHECK(condition, message)                             \
+  do {                                                                 \
+    if (ABSL_PREDICT_FALSE(!(condition))) {                            \
+      ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \
+    }                                                                  \
+  } while (0)
+
+// ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above,
+// except that if the richer log library is linked into the binary, we dispatch
+// to that instead.  This is potentially useful for internal logging and
+// assertions, where we are using RAW_LOG neither for its async-signal-safety
+// nor for its non-allocating nature, but rather because raw logging has very
+// few other dependencies.
+//
+// The API is a subset of the above: each macro only takes two arguments.  Use
+// StrCat if you need to build a richer message.
+#define ABSL_INTERNAL_LOG(severity, message)                              \
+  do {                                                                    \
+    constexpr const char* absl_raw_log_internal_filename = __FILE__;      \
+    ::absl::raw_log_internal::internal_log_function(                      \
+        ABSL_RAW_LOG_INTERNAL_##severity, absl_raw_log_internal_filename, \
+        __LINE__, message);                                               \
+    ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_##severity;                   \
+  } while (0)
+
+#define ABSL_INTERNAL_CHECK(condition, message)                    \
+  do {                                                             \
+    if (ABSL_PREDICT_FALSE(!(condition))) {                        \
+      std::string death_message = "Check " #condition " failed: "; \
+      death_message += std::string(message);                       \
+      ABSL_INTERNAL_LOG(FATAL, death_message);                     \
+    }                                                              \
+  } while (0)
+
+#ifndef NDEBUG
+
+#define ABSL_RAW_DLOG(severity, ...) ABSL_RAW_LOG(severity, __VA_ARGS__)
+#define ABSL_RAW_DCHECK(condition, message) ABSL_RAW_CHECK(condition, message)
+
+#else  // NDEBUG
+
+#define ABSL_RAW_DLOG(severity, ...)                   \
+  while (false) ABSL_RAW_LOG(severity, __VA_ARGS__)
+#define ABSL_RAW_DCHECK(condition, message) \
+  while (false) ABSL_RAW_CHECK(condition, message)
+
+#endif  // NDEBUG
+
+#define ABSL_RAW_LOG_INTERNAL_INFO ::absl::LogSeverity::kInfo
+#define ABSL_RAW_LOG_INTERNAL_WARNING ::absl::LogSeverity::kWarning
+#define ABSL_RAW_LOG_INTERNAL_ERROR ::absl::LogSeverity::kError
+#define ABSL_RAW_LOG_INTERNAL_FATAL ::absl::LogSeverity::kFatal
+#define ABSL_RAW_LOG_INTERNAL_DFATAL ::absl::kLogDebugFatal
+#define ABSL_RAW_LOG_INTERNAL_LEVEL(severity) \
+  ::absl::NormalizeLogSeverity(severity)
+
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_INFO
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_WARNING
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_ERROR
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_FATAL ABSL_UNREACHABLE()
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_DFATAL
+#define ABSL_RAW_LOG_INTERNAL_MAYBE_UNREACHABLE_LEVEL(severity)
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace raw_log_internal {
+
+// Helper function to implement ABSL_RAW_LOG
+// Logs format... at "severity" level, reporting it
+// as called from file:line.
+// This does not allocate memory or acquire locks.
+void RawLog(absl::LogSeverity severity, const char* file, int line,
+            const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5);
+
+// Writes the provided buffer directly to stderr, in a signal-safe, low-level
+// manner.  Preserves errno.
+void AsyncSignalSafeWriteError(const char* s, size_t len);
+
+// compile-time function to get the "base" filename, that is, the part of
+// a filename after the last "/" or "\" path separator.  The search starts at
+// the end of the string; the second parameter is the length of the string.
+constexpr const char* Basename(const char* fname, int offset) {
+  return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\'
+             ? fname + offset
+             : Basename(fname, offset - 1);
+}
+
+// For testing only.
+// Returns true if raw logging is fully supported. When it is not
+// fully supported, no messages will be emitted, but a log at FATAL
+// severity will cause an abort.
+//
+// TODO(gfalcon): Come up with a better name for this method.
+bool RawLoggingFullySupported();
+
+// Function type for a raw_log customization hook for suppressing messages
+// by severity, and for writing custom prefixes on non-suppressed messages.
+//
+// The installed hook is called for every raw log invocation.  The message will
+// be logged to stderr only if the hook returns true.  FATAL errors will cause
+// the process to abort, even if writing to stderr is suppressed.  The hook is
+// also provided with an output buffer, where it can write a custom log message
+// prefix.
+//
+// The raw_log system does not allocate memory or grab locks.  User-provided
+// hooks must avoid these operations, and must not throw exceptions.
+//
+// 'severity' is the severity level of the message being written.
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// 'buf' and 'buf_size' are pointers to the buffer and buffer size.  If the
+// hook writes a prefix, it must increment *buf and decrement *buf_size
+// accordingly.
+using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity,
+                                        const char* file, int line, char** buf,
+                                        int* buf_size);
+
+// Function type for a raw_log customization hook called to abort a process
+// when a FATAL message is logged.  If the provided AbortHook() returns, the
+// logging system will call abort().
+//
+// 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro
+// was located.
+// The NUL-terminated logged message lives in the buffer between 'buf_start'
+// and 'buf_end'.  'prefix_end' points to the first non-prefix character of the
+// buffer (as written by the LogFilterAndPrefixHook.)
+//
+// The lifetime of the filename and message buffers will not end while the
+// process remains alive.
+using AbortHook = void (*)(const char* file, int line, const char* buf_start,
+                           const char* prefix_end, const char* buf_end);
+
+// Internal logging function for ABSL_INTERNAL_LOG to dispatch to.
+//
+// TODO(gfalcon): When string_view no longer depends on base, change this
+// interface to take its message as a string_view instead.
+using InternalLogFunction = void (*)(absl::LogSeverity severity,
+                                     const char* file, int line,
+                                     const std::string& message);
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook<
+    InternalLogFunction>
+    internal_log_function;
+
+// Registers hooks of the above types.  Only a single hook of each type may be
+// registered.  It is an error to call these functions multiple times with
+// different input arguments.
+//
+// These functions are safe to call at any point during initialization; they do
+// not block or malloc, and are async-signal safe.
+void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func);
+void RegisterAbortHook(AbortHook func);
+void RegisterInternalLogFunction(InternalLogFunction func);
+
+}  // namespace raw_log_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_RAW_LOGGING_H_

+ 58 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/scheduling_mode.h

@@ -0,0 +1,58 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Core interfaces and definitions used by by low-level interfaces such as
+// SpinLock.
+
+#ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+#define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Used to describe how a thread may be scheduled.  Typically associated with
+// the declaration of a resource supporting synchronized access.
+//
+// SCHEDULE_COOPERATIVE_AND_KERNEL:
+// Specifies that when waiting, a cooperative thread (e.g. a Fiber) may
+// reschedule (using base::scheduling semantics); allowing other cooperative
+// threads to proceed.
+//
+// SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative")
+// Specifies that no cooperative scheduling semantics may be used, even if the
+// current thread is itself cooperatively scheduled.  This means that
+// cooperative threads will NOT allow other cooperative threads to execute in
+// their place while waiting for a resource of this type.  Host operating system
+// semantics (e.g. a futex) may still be used.
+//
+// When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL
+// by default.  SCHEDULE_KERNEL_ONLY should only be used for resources on which
+// base::scheduling (e.g. the implementation of a Scheduler) may depend.
+//
+// NOTE: Cooperative resources may not be nested below non-cooperative ones.
+// This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL
+// resource if a SCHEDULE_KERNEL_ONLY resource is already held.
+enum SchedulingMode {
+  SCHEDULE_KERNEL_ONLY = 0,         // Allow scheduling only the host OS.
+  SCHEDULE_COOPERATIVE_AND_KERNEL,  // Also allow cooperative scheduling.
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_

+ 81 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/scoped_set_env.cc

@@ -0,0 +1,81 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/scoped_set_env.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include <cstdlib>
+
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+namespace {
+
+#ifdef _WIN32
+const int kMaxEnvVarValueSize = 1024;
+#endif
+
+void SetEnvVar(const char* name, const char* value) {
+#ifdef _WIN32
+  SetEnvironmentVariableA(name, value);
+#else
+  if (value == nullptr) {
+    ::unsetenv(name);
+  } else {
+    ::setenv(name, value, 1);
+  }
+#endif
+}
+
+}  // namespace
+
+ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value)
+    : var_name_(var_name), was_unset_(false) {
+#ifdef _WIN32
+  char buf[kMaxEnvVarValueSize];
+  auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf));
+  ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size");
+
+  if (get_res == 0) {
+    was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND);
+  } else {
+    old_value_.assign(buf, get_res);
+  }
+
+  SetEnvironmentVariableA(var_name_.c_str(), new_value);
+#else
+  const char* val = ::getenv(var_name_.c_str());
+  if (val == nullptr) {
+    was_unset_ = true;
+  } else {
+    old_value_ = val;
+  }
+#endif
+
+  SetEnvVar(var_name_.c_str(), new_value);
+}
+
+ScopedSetEnv::~ScopedSetEnv() {
+  SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str());
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 45 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/scoped_set_env.h

@@ -0,0 +1,45 @@
+//
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
+#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_
+
+#include <string>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class ScopedSetEnv {
+ public:
+  ScopedSetEnv(const char* var_name, const char* new_value);
+  ~ScopedSetEnv();
+
+ private:
+  std::string var_name_;
+  std::string old_value_;
+
+  // True if the environment variable was initially not set.
+  bool was_unset_;
+};
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_

+ 99 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/scoped_set_env_test.cc

@@ -0,0 +1,99 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifdef _WIN32
+#include <windows.h>
+#endif
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/scoped_set_env.h"
+
+namespace {
+
+using absl::base_internal::ScopedSetEnv;
+
+std::string GetEnvVar(const char* name) {
+#ifdef _WIN32
+  char buf[1024];
+  auto get_res = GetEnvironmentVariableA(name, buf, sizeof(buf));
+  if (get_res >= sizeof(buf)) {
+    return "TOO_BIG";
+  }
+
+  if (get_res == 0) {
+    return "UNSET";
+  }
+
+  return std::string(buf, get_res);
+#else
+  const char* val = ::getenv(name);
+  if (val == nullptr) {
+    return "UNSET";
+  }
+
+  return val;
+#endif
+}
+
+TEST(ScopedSetEnvTest, SetNonExistingVarToString) {
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+
+  {
+    ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
+
+    EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+  }
+
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+}
+
+TEST(ScopedSetEnvTest, SetNonExistingVarToNull) {
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+
+  {
+    ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
+
+    EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+  }
+
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+}
+
+TEST(ScopedSetEnvTest, SetExistingVarToString) {
+  ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+
+  {
+    ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "new_value");
+
+    EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "new_value");
+  }
+
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+}
+
+TEST(ScopedSetEnvTest, SetExistingVarToNull) {
+  ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value");
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+
+  {
+    ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr);
+
+    EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET");
+  }
+
+  EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value");
+}
+
+}  // namespace

+ 232 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock.cc

@@ -0,0 +1,232 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/spinlock.h"
+
+#include <algorithm>
+#include <atomic>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/atomic_hook.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/spinlock_wait.h"
+#include "absl/base/internal/sysinfo.h" /* For NumCPUs() */
+#include "absl/base/call_once.h"
+
+// Description of lock-word:
+//  31..00: [............................3][2][1][0]
+//
+//     [0]: kSpinLockHeld
+//     [1]: kSpinLockCooperative
+//     [2]: kSpinLockDisabledScheduling
+// [31..3]: ONLY kSpinLockSleeper OR
+//          Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT
+//
+// Detailed descriptions:
+//
+// Bit [0]: The lock is considered held iff kSpinLockHeld is set.
+//
+// Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when
+//          contended iff kSpinLockCooperative is set.
+//
+// Bit [2]: This bit is exclusive from bit [1].  It is used only by a
+//          non-cooperative lock.  When set, indicates that scheduling was
+//          successfully disabled when the lock was acquired.  May be unset,
+//          even if non-cooperative, if a ThreadIdentity did not yet exist at
+//          time of acquisition.
+//
+// Bit [3]: If this is the only upper bit ([31..3]) set then this lock was
+//          acquired without contention, however, at least one waiter exists.
+//
+//          Otherwise, bits [31..3] represent the time spent by the current lock
+//          holder to acquire the lock.  There may be outstanding waiter(s).
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook<void (*)(
+    const void *lock, int64_t wait_cycles)>
+    submit_profile_data;
+
+void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock,
+                                         int64_t wait_cycles)) {
+  submit_profile_data.Store(fn);
+}
+
+#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL
+// Static member variable definitions.
+constexpr uint32_t SpinLock::kSpinLockHeld;
+constexpr uint32_t SpinLock::kSpinLockCooperative;
+constexpr uint32_t SpinLock::kSpinLockDisabledScheduling;
+constexpr uint32_t SpinLock::kSpinLockSleeper;
+constexpr uint32_t SpinLock::kWaitTimeMask;
+#endif
+
+// Uncommon constructors.
+SpinLock::SpinLock(base_internal::SchedulingMode mode)
+    : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {
+  ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+}
+
+// Monitor the lock to see if its value changes within some time period
+// (adaptive_spin_count loop iterations). The last value read from the lock
+// is returned from the method.
+uint32_t SpinLock::SpinLoop() {
+  // We are already in the slow path of SpinLock, initialize the
+  // adaptive_spin_count here.
+  ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count;
+  ABSL_CONST_INIT static int adaptive_spin_count = 0;
+  base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() {
+    adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1;
+  });
+
+  int c = adaptive_spin_count;
+  uint32_t lock_value;
+  do {
+    lock_value = lockword_.load(std::memory_order_relaxed);
+  } while ((lock_value & kSpinLockHeld) != 0 && --c > 0);
+  return lock_value;
+}
+
+void SpinLock::SlowLock() {
+  uint32_t lock_value = SpinLoop();
+  lock_value = TryLockInternal(lock_value, 0);
+  if ((lock_value & kSpinLockHeld) == 0) {
+    return;
+  }
+
+  base_internal::SchedulingMode scheduling_mode;
+  if ((lock_value & kSpinLockCooperative) != 0) {
+    scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+  } else {
+    scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY;
+  }
+
+  // The lock was not obtained initially, so this thread needs to wait for
+  // it.  Record the current timestamp in the local variable wait_start_time
+  // so the total wait time can be stored in the lockword once this thread
+  // obtains the lock.
+  int64_t wait_start_time = CycleClock::Now();
+  uint32_t wait_cycles = 0;
+  int lock_wait_call_count = 0;
+  while ((lock_value & kSpinLockHeld) != 0) {
+    // If the lock is currently held, but not marked as having a sleeper, mark
+    // it as having a sleeper.
+    if ((lock_value & kWaitTimeMask) == 0) {
+      // Here, just "mark" that the thread is going to sleep.  Don't store the
+      // lock wait time in the lock -- the lock word stores the amount of time
+      // that the current holder waited before acquiring the lock, not the wait
+      // time of any thread currently waiting to acquire it.
+      if (lockword_.compare_exchange_strong(
+              lock_value, lock_value | kSpinLockSleeper,
+              std::memory_order_relaxed, std::memory_order_relaxed)) {
+        // Successfully transitioned to kSpinLockSleeper.  Pass
+        // kSpinLockSleeper to the SpinLockWait routine to properly indicate
+        // the last lock_value observed.
+        lock_value |= kSpinLockSleeper;
+      } else if ((lock_value & kSpinLockHeld) == 0) {
+        // Lock is free again, so try and acquire it before sleeping.  The
+        // new lock state will be the number of cycles this thread waited if
+        // this thread obtains the lock.
+        lock_value = TryLockInternal(lock_value, wait_cycles);
+        continue;   // Skip the delay at the end of the loop.
+      } else if ((lock_value & kWaitTimeMask) == 0) {
+        // The lock is still held, without a waiter being marked, but something
+        // else about the lock word changed, causing our CAS to fail. For
+        // example, a new lock holder may have acquired the lock with
+        // kSpinLockDisabledScheduling set, whereas the previous holder had not
+        // set that flag. In this case, attempt again to mark ourselves as a
+        // waiter.
+        continue;
+      }
+    }
+
+    // SpinLockDelay() calls into fiber scheduler, we need to see
+    // synchronization there to avoid false positives.
+    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+    // Wait for an OS specific delay.
+    base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count,
+                                 scheduling_mode);
+    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+    // Spin again after returning from the wait routine to give this thread
+    // some chance of obtaining the lock.
+    lock_value = SpinLoop();
+    wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now());
+    lock_value = TryLockInternal(lock_value, wait_cycles);
+  }
+}
+
+void SpinLock::SlowUnlock(uint32_t lock_value) {
+  base_internal::SpinLockWake(&lockword_,
+                              false);  // wake waiter if necessary
+
+  // If our acquisition was contended, collect contentionz profile info.  We
+  // reserve a unitary wait time to represent that a waiter exists without our
+  // own acquisition having been contended.
+  if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) {
+    const int64_t wait_cycles = DecodeWaitCycles(lock_value);
+    ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0);
+    submit_profile_data(this, wait_cycles);
+    ABSL_TSAN_MUTEX_POST_DIVERT(this, 0);
+  }
+}
+
+// We use the upper 29 bits of the lock word to store the time spent waiting to
+// acquire this lock.  This is reported by contentionz profiling.  Since the
+// lower bits of the cycle counter wrap very quickly on high-frequency
+// processors we divide to reduce the granularity to 2^kProfileTimestampShift
+// sized units.  On a 4Ghz machine this will lose track of wait times greater
+// than (2^29/4 Ghz)*128 =~ 17.2 seconds.  Such waits should be extremely rare.
+static constexpr int kProfileTimestampShift = 7;
+
+// We currently reserve the lower 3 bits.
+static constexpr int kLockwordReservedShift = 3;
+
+uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time,
+                                    int64_t wait_end_time) {
+  static const int64_t kMaxWaitTime =
+      std::numeric_limits<uint32_t>::max() >> kLockwordReservedShift;
+  int64_t scaled_wait_time =
+      (wait_end_time - wait_start_time) >> kProfileTimestampShift;
+
+  // Return a representation of the time spent waiting that can be stored in
+  // the lock word's upper bits.
+  uint32_t clamped = static_cast<uint32_t>(
+      std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift);
+
+  if (clamped == 0) {
+    return kSpinLockSleeper;  // Just wake waiters, but don't record contention.
+  }
+  // Bump up value if necessary to avoid returning kSpinLockSleeper.
+  const uint32_t kMinWaitTime =
+      kSpinLockSleeper + (1 << kLockwordReservedShift);
+  if (clamped == kSpinLockSleeper) {
+    return kMinWaitTime;
+  }
+  return clamped;
+}
+
+int64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) {
+  // Cast to uint32_t first to ensure bits [63:32] are cleared.
+  const int64_t scaled_wait_time =
+      static_cast<uint32_t>(lock_value & kWaitTimeMask);
+  return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift);
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 275 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock.h

@@ -0,0 +1,275 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+//  Most users requiring mutual exclusion should use Mutex.
+//  SpinLock is provided for use in two situations:
+//   - for use by Abseil internal code that Mutex itself depends on
+//   - for async signal safety (see below)
+
+// SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async
+// signal safe. If a spinlock is used within a signal handler, all code that
+// acquires the lock must ensure that the signal cannot arrive while they are
+// holding the lock. Typically, this is done by blocking the signal.
+//
+// Threads waiting on a SpinLock may be woken in an arbitrary order.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/const_init.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/low_level_scheduling.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/internal/tsan_mutex_interface.h"
+#include "absl/base/thread_annotations.h"
+
+namespace tcmalloc {
+namespace tcmalloc_internal {
+
+class AllocationGuardSpinLockHolder;
+
+}  // namespace tcmalloc_internal
+}  // namespace tcmalloc
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock {
+ public:
+  SpinLock() : lockword_(kSpinLockCooperative) {
+    ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
+  }
+
+  // Constructors that allow non-cooperative spinlocks to be created for use
+  // inside thread schedulers.  Normal clients should not use these.
+  explicit SpinLock(base_internal::SchedulingMode mode);
+
+  // Constructor for global SpinLock instances.  See absl/base/const_init.h.
+  constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode)
+      : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {}
+
+  // For global SpinLock instances prefer trivial destructor when possible.
+  // Default but non-trivial destructor in some build configurations causes an
+  // extra static initializer.
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+  ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
+#else
+  ~SpinLock() = default;
+#endif
+
+  // Acquire this SpinLock.
+  inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() {
+    ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
+    if (!TryLockImpl()) {
+      SlowLock();
+    }
+    ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
+  }
+
+  // Try to acquire this SpinLock without blocking and return true if the
+  // acquisition was successful.  If the lock was not acquired, false is
+  // returned.  If this SpinLock is free at the time of the call, TryLock
+  // will return true with high probability.
+  ABSL_MUST_USE_RESULT inline bool TryLock()
+      ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) {
+    ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
+    bool res = TryLockImpl();
+    ABSL_TSAN_MUTEX_POST_LOCK(
+        this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
+        0);
+    return res;
+  }
+
+  // Release this SpinLock, which must be held by the calling thread.
+  inline void Unlock() ABSL_UNLOCK_FUNCTION() {
+    ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
+    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+    lock_value = lockword_.exchange(lock_value & kSpinLockCooperative,
+                                    std::memory_order_release);
+
+    if ((lock_value & kSpinLockDisabledScheduling) != 0) {
+      base_internal::SchedulingGuard::EnableRescheduling(true);
+    }
+    if ((lock_value & kWaitTimeMask) != 0) {
+      // Collect contentionz profile info, and speed the wakeup of any waiter.
+      // The wait_cycles value indicates how long this thread spent waiting
+      // for the lock.
+      SlowUnlock(lock_value);
+    }
+    ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
+  }
+
+  // Determine if the lock is held.  When the lock is held by the invoking
+  // thread, true will always be returned. Intended to be used as
+  // CHECK(lock.IsHeld()).
+  ABSL_MUST_USE_RESULT inline bool IsHeld() const {
+    return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
+  }
+
+  // Return immediately if this thread holds the SpinLock exclusively.
+  // Otherwise, report an error by crashing with a diagnostic.
+  inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() {
+    if (!IsHeld()) {
+      ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock");
+    }
+  }
+
+ protected:
+  // These should not be exported except for testing.
+
+  // Store number of cycles between wait_start_time and wait_end_time in a
+  // lock value.
+  static uint32_t EncodeWaitCycles(int64_t wait_start_time,
+                                   int64_t wait_end_time);
+
+  // Extract number of wait cycles in a lock value.
+  static int64_t DecodeWaitCycles(uint32_t lock_value);
+
+  // Provide access to protected method above.  Use for testing only.
+  friend struct SpinLockTest;
+  friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder;
+
+ private:
+  // lockword_ is used to store the following:
+  //
+  // bit[0] encodes whether a lock is being held.
+  // bit[1] encodes whether a lock uses cooperative scheduling.
+  // bit[2] encodes whether the current lock holder disabled scheduling when
+  //        acquiring the lock. Only set when kSpinLockHeld is also set.
+  // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
+  //        This is set by the lock holder to indicate how long it waited on
+  //        the lock before eventually acquiring it. The number of cycles is
+  //        encoded as a 29-bit unsigned int, or in the case that the current
+  //        holder did not wait but another waiter is queued, the LSB
+  //        (kSpinLockSleeper) is set. The implementation does not explicitly
+  //        track the number of queued waiters beyond this. It must always be
+  //        assumed that waiters may exist if the current holder was required to
+  //        queue.
+  //
+  // Invariant: if the lock is not held, the value is either 0 or
+  // kSpinLockCooperative.
+  static constexpr uint32_t kSpinLockHeld = 1;
+  static constexpr uint32_t kSpinLockCooperative = 2;
+  static constexpr uint32_t kSpinLockDisabledScheduling = 4;
+  static constexpr uint32_t kSpinLockSleeper = 8;
+  // Includes kSpinLockSleeper.
+  static constexpr uint32_t kWaitTimeMask =
+      ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling);
+
+  // Returns true if the provided scheduling mode is cooperative.
+  static constexpr bool IsCooperative(
+      base_internal::SchedulingMode scheduling_mode) {
+    return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
+  }
+
+  bool IsCooperative() const {
+    return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative;
+  }
+
+  uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
+  void SlowLock() ABSL_ATTRIBUTE_COLD;
+  void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
+  uint32_t SpinLoop();
+
+  inline bool TryLockImpl() {
+    uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
+    return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
+  }
+
+  std::atomic<uint32_t> lockword_;
+
+  SpinLock(const SpinLock&) = delete;
+  SpinLock& operator=(const SpinLock&) = delete;
+};
+
+// Corresponding locker object that arranges to acquire a spinlock for
+// the duration of a C++ scope.
+//
+// TODO(b/176172494): Use only [[nodiscard]] when baseline is raised.
+// TODO(b/6695610): Remove forward declaration when #ifdef is no longer needed.
+#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard)
+class [[nodiscard]] SpinLockHolder;
+#else
+class ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_TRIVIAL_ABI SpinLockHolder;
+#endif
+
+class ABSL_SCOPED_LOCKABLE SpinLockHolder {
+ public:
+  inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l)
+      : lock_(l) {
+    l->Lock();
+  }
+  inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); }
+
+  SpinLockHolder(const SpinLockHolder&) = delete;
+  SpinLockHolder& operator=(const SpinLockHolder&) = delete;
+
+ private:
+  SpinLock* lock_;
+};
+
+// Register a hook for profiling support.
+//
+// The function pointer registered here will be called whenever a spinlock is
+// contended.  The callback is given an opaque handle to the contended spinlock
+// and the number of wait cycles.  This is thread-safe, but only a single
+// profiler can be registered.  It is an error to call this function multiple
+// times with different arguments.
+void RegisterSpinLockProfiler(void (*fn)(const void* lock,
+                                         int64_t wait_cycles));
+
+//------------------------------------------------------------------------------
+// Public interface ends here.
+//------------------------------------------------------------------------------
+
+// If (result & kSpinLockHeld) == 0, then *this was successfully locked.
+// Otherwise, returns last observed value for lockword_.
+inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
+                                          uint32_t wait_cycles) {
+  if ((lock_value & kSpinLockHeld) != 0) {
+    return lock_value;
+  }
+
+  uint32_t sched_disabled_bit = 0;
+  if ((lock_value & kSpinLockCooperative) == 0) {
+    // For non-cooperative locks we must make sure we mark ourselves as
+    // non-reschedulable before we attempt to CompareAndSwap.
+    if (base_internal::SchedulingGuard::DisableRescheduling()) {
+      sched_disabled_bit = kSpinLockDisabledScheduling;
+    }
+  }
+
+  if (!lockword_.compare_exchange_strong(
+          lock_value,
+          kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
+          std::memory_order_acquire, std::memory_order_relaxed)) {
+    base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
+  }
+
+  return lock_value;
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SPINLOCK_H_

+ 35 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_akaros.inc

@@ -0,0 +1,35 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is an Akaros-specific part of spinlock_wait.cc
+
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+    std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */,
+    int /* loop */, absl::base_internal::SchedulingMode /* mode */) {
+  // In Akaros, one must take care not to call anything that could cause a
+  // malloc(), a blocking system call, or a uthread_yield() while holding a
+  // spinlock. Our callers assume will not call into libraries or other
+  // arbitrary code.
+}
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+    std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+}  // extern "C"

+ 80 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_benchmark.cc

@@ -0,0 +1,80 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// See also //absl/synchronization:mutex_benchmark for a comparison of SpinLock
+// and Mutex performance under varying levels of contention.
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/no_destructor.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+
+template <absl::base_internal::SchedulingMode scheduling_mode>
+static void BM_TryLock(benchmark::State& state) {
+  // Ensure a ThreadIdentity is installed so that KERNEL_ONLY has an effect.
+  ABSL_INTERNAL_CHECK(
+      absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() !=
+          nullptr,
+      "GetOrCreateCurrentThreadIdentity() failed");
+
+  static absl::NoDestructor<absl::base_internal::SpinLock> spinlock(
+      scheduling_mode);
+  for (auto _ : state) {
+    if (spinlock->TryLock()) spinlock->Unlock();
+  }
+}
+
+template <absl::base_internal::SchedulingMode scheduling_mode>
+static void BM_SpinLock(benchmark::State& state) {
+  // Ensure a ThreadIdentity is installed so that KERNEL_ONLY has an effect.
+  ABSL_INTERNAL_CHECK(
+      absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() !=
+          nullptr,
+      "GetOrCreateCurrentThreadIdentity() failed");
+
+  static absl::NoDestructor<absl::base_internal::SpinLock> spinlock(
+      scheduling_mode);
+  for (auto _ : state) {
+    absl::base_internal::SpinLockHolder holder(spinlock.get());
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_SpinLock,
+                   absl::base_internal::SCHEDULE_KERNEL_ONLY)
+    ->UseRealTime()
+    ->Threads(1)
+    ->ThreadPerCpu();
+
+BENCHMARK_TEMPLATE(BM_SpinLock,
+                   absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL)
+    ->UseRealTime()
+    ->Threads(1)
+    ->ThreadPerCpu();
+
+BENCHMARK_TEMPLATE(BM_TryLock, absl::base_internal::SCHEDULE_KERNEL_ONLY)
+    ->UseRealTime()
+    ->Threads(1)
+    ->ThreadPerCpu();
+
+BENCHMARK_TEMPLATE(BM_TryLock,
+                   absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL)
+    ->UseRealTime()
+    ->Threads(1)
+    ->ThreadPerCpu();
+
+}  // namespace

+ 71 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_linux.inc

@@ -0,0 +1,71 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Linux-specific part of spinlock_wait.cc
+
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#include <atomic>
+#include <climits>
+#include <cstdint>
+#include <ctime>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/errno_saver.h"
+
+// The SpinLock lockword is `std::atomic<uint32_t>`. Here we assert that
+// `std::atomic<uint32_t>` is bitwise equivalent of the `int` expected
+// by SYS_futex. We also assume that reads/writes done to the lockword
+// by SYS_futex have rational semantics with regard to the
+// std::atomic<> API. C++ provides no guarantees of these assumptions,
+// but they are believed to hold in practice.
+static_assert(sizeof(std::atomic<uint32_t>) == sizeof(int),
+              "SpinLock lockword has the wrong size for a futex");
+
+// Some Android headers are missing these definitions even though they
+// support these futex operations.
+#ifdef __BIONIC__
+#ifndef SYS_futex
+#define SYS_futex __NR_futex
+#endif
+#ifndef FUTEX_PRIVATE_FLAG
+#define FUTEX_PRIVATE_FLAG 128
+#endif
+#endif
+
+#if defined(__NR_futex_time64) && !defined(SYS_futex_time64)
+#define SYS_futex_time64 __NR_futex_time64
+#endif
+
+#if defined(SYS_futex_time64) && !defined(SYS_futex)
+#define SYS_futex SYS_futex_time64
+#endif
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+    std::atomic<uint32_t> *w, uint32_t value, int,
+    absl::base_internal::SchedulingMode) {
+  absl::base_internal::ErrnoSaver errno_saver;
+  syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr);
+}
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+    std::atomic<uint32_t> *w, bool all) {
+  syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0);
+}
+
+}  // extern "C"

+ 46 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_posix.inc

@@ -0,0 +1,46 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Posix-specific part of spinlock_wait.cc
+
+#include <sched.h>
+
+#include <atomic>
+#include <ctime>
+
+#include "absl/base/internal/errno_saver.h"
+#include "absl/base/internal/scheduling_mode.h"
+#include "absl/base/port.h"
+
+extern "C" {
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+    std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+    absl::base_internal::SchedulingMode /* mode */) {
+  absl::base_internal::ErrnoSaver errno_saver;
+  if (loop == 0) {
+  } else if (loop == 1) {
+    sched_yield();
+  } else {
+    struct timespec tm;
+    tm.tv_sec = 0;
+    tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop);
+    nanosleep(&tm, nullptr);
+  }
+}
+
+ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+    std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+}  // extern "C"

+ 81 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_wait.cc

@@ -0,0 +1,81 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The OS-specific header included below must provide two calls:
+// AbslInternalSpinLockDelay() and AbslInternalSpinLockWake().
+// See spinlock_wait.h for the specs.
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/internal/spinlock_wait.h"
+
+#if defined(_WIN32)
+#include "absl/base/internal/spinlock_win32.inc"
+#elif defined(__linux__)
+#include "absl/base/internal/spinlock_linux.inc"
+#elif defined(__akaros__)
+#include "absl/base/internal/spinlock_akaros.inc"
+#else
+#include "absl/base/internal/spinlock_posix.inc"
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// See spinlock_wait.h for spec.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+                      const SpinLockWaitTransition trans[],
+                      base_internal::SchedulingMode scheduling_mode) {
+  int loop = 0;
+  for (;;) {
+    uint32_t v = w->load(std::memory_order_acquire);
+    int i;
+    for (i = 0; i != n && v != trans[i].from; i++) {
+    }
+    if (i == n) {
+      SpinLockDelay(w, v, ++loop, scheduling_mode);  // no matching transition
+    } else if (trans[i].to == v ||                   // null transition
+               w->compare_exchange_strong(v, trans[i].to,
+                                          std::memory_order_acquire,
+                                          std::memory_order_relaxed)) {
+      if (trans[i].done) return v;
+    }
+  }
+}
+
+static std::atomic<uint64_t> delay_rand;
+
+// Return a suggested delay in nanoseconds for iteration number "loop"
+int SpinLockSuggestedDelayNS(int loop) {
+  // Weak pseudo-random number generator to get some spread between threads
+  // when many are spinning.
+  uint64_t r = delay_rand.load(std::memory_order_relaxed);
+  r = 0x5deece66dLL * r + 0xb;   // numbers from nrand48()
+  delay_rand.store(r, std::memory_order_relaxed);
+
+  if (loop < 0 || loop > 32) {   // limit loop to 0..32
+    loop = 32;
+  }
+  const int kMinDelay = 128 << 10;  // 128us
+  // Double delay every 8 iterations, up to 16x (2ms).
+  int delay = kMinDelay << (loop / 8);
+  // Randomize in delay..2*delay range, for resulting 128us..4ms range.
+  return delay | ((delay - 1) & static_cast<int>(r));
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 95 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_wait.h

@@ -0,0 +1,95 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+#define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_
+
+// Operations to make atomic transitions on a word, and to allow
+// waiting for those transitions to become possible.
+
+#include <stdint.h>
+#include <atomic>
+
+#include "absl/base/internal/scheduling_mode.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// SpinLockWait() waits until it can perform one of several transitions from
+// "from" to "to".  It returns when it performs a transition where done==true.
+struct SpinLockWaitTransition {
+  uint32_t from;
+  uint32_t to;
+  bool done;
+};
+
+// Wait until *w can transition from trans[i].from to trans[i].to for some i
+// satisfying 0<=i<n && trans[i].done, atomically make the transition,
+// then return the old value of *w.   Make any other atomic transitions
+// where !trans[i].done, but continue waiting.
+//
+// Wakeups for threads blocked on SpinLockWait do not respect priorities.
+uint32_t SpinLockWait(std::atomic<uint32_t> *w, int n,
+                      const SpinLockWaitTransition trans[],
+                      SchedulingMode scheduling_mode);
+
+// If possible, wake some thread that has called SpinLockDelay(w, ...). If `all`
+// is true, wake all such threads. On some systems, this may be a no-op; on
+// those systems, threads calling SpinLockDelay() will always wake eventually
+// even if SpinLockWake() is never called.
+void SpinLockWake(std::atomic<uint32_t> *w, bool all);
+
+// Wait for an appropriate spin delay on iteration "loop" of a
+// spin loop on location *w, whose previously observed value was "value".
+// SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick,
+// or may wait for a call to SpinLockWake(w).
+void SpinLockDelay(std::atomic<uint32_t> *w, uint32_t value, int loop,
+                   base_internal::SchedulingMode scheduling_mode);
+
+// Helper used by AbslInternalSpinLockDelay.
+// Returns a suggested delay in nanoseconds for iteration number "loop".
+int SpinLockSuggestedDelayNS(int loop);
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+// In some build configurations we pass --detect-odr-violations to the
+// gold linker.  This causes it to flag weak symbol overrides as ODR
+// violations.  Because ODR only applies to C++ and not C,
+// --detect-odr-violations ignores symbols not mangled with C++ names.
+// By changing our extension points to be extern "C", we dodge this
+// check.
+extern "C" {
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic<uint32_t> *w,
+                                                      bool all);
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+    std::atomic<uint32_t> *w, uint32_t value, int loop,
+    absl::base_internal::SchedulingMode scheduling_mode);
+}
+
+inline void absl::base_internal::SpinLockWake(std::atomic<uint32_t> *w,
+                                              bool all) {
+  ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all);
+}
+
+inline void absl::base_internal::SpinLockDelay(
+    std::atomic<uint32_t> *w, uint32_t value, int loop,
+    absl::base_internal::SchedulingMode scheduling_mode) {
+  ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)
+  (w, value, loop, scheduling_mode);
+}
+
+#endif  // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_

+ 40 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/spinlock_win32.inc

@@ -0,0 +1,40 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is a Win32-specific part of spinlock_wait.cc
+
+#include <windows.h>
+#include <atomic>
+#include "absl/base/internal/scheduling_mode.h"
+
+extern "C" {
+
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)(
+    std::atomic<uint32_t>* /* lock_word */, uint32_t /* value */, int loop,
+    absl::base_internal::SchedulingMode /* mode */) {
+  if (loop == 0) {
+  } else if (loop == 1) {
+    Sleep(0);
+  } else {
+    // SpinLockSuggestedDelayNS() always returns a positive integer, so this
+    // static_cast is safe.
+    Sleep(static_cast<DWORD>(
+        absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000));
+  }
+}
+
+void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(
+    std::atomic<uint32_t>* /* lock_word */, bool /* all */) {}
+
+}  // extern "C"

+ 88 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/strerror.cc

@@ -0,0 +1,88 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/strerror.h"
+
+#include <array>
+#include <cerrno>
+#include <cstddef>
+#include <cstdio>
+#include <cstring>
+#include <string>
+#include <type_traits>
+
+#include "absl/base/internal/errno_saver.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) {
+#if defined(_WIN32)
+  int rc = strerror_s(buf, buflen, errnum);
+  buf[buflen - 1] = '\0';  // guarantee NUL termination
+  if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0';
+  return buf;
+#else
+  // The type of `ret` is platform-specific; both of these branches must compile
+  // either way but only one will execute on any given platform:
+  auto ret = strerror_r(errnum, buf, buflen);
+  if (std::is_same<decltype(ret), int>::value) {
+    // XSI `strerror_r`; `ret` is `int`:
+    if (ret) *buf = '\0';
+    return buf;
+  } else {
+    // GNU `strerror_r`; `ret` is `char *`:
+    return reinterpret_cast<const char*>(ret);
+  }
+#endif
+}
+
+std::string StrErrorInternal(int errnum) {
+  char buf[100];
+  const char* str = StrErrorAdaptor(errnum, buf, sizeof buf);
+  if (*str == '\0') {
+    snprintf(buf, sizeof buf, "Unknown error %d", errnum);
+    str = buf;
+  }
+  return str;
+}
+
+// kSysNerr is the number of errors from a recent glibc. `StrError()` falls back
+// to `StrErrorAdaptor()` if the value is larger than this.
+constexpr int kSysNerr = 135;
+
+std::array<std::string, kSysNerr>* NewStrErrorTable() {
+  auto* table = new std::array<std::string, kSysNerr>;
+  for (size_t i = 0; i < table->size(); ++i) {
+    (*table)[i] = StrErrorInternal(static_cast<int>(i));
+  }
+  return table;
+}
+
+}  // namespace
+
+std::string StrError(int errnum) {
+  absl::base_internal::ErrnoSaver errno_saver;
+  static const auto* table = NewStrErrorTable();
+  if (errnum >= 0 && static_cast<size_t>(errnum) < table->size()) {
+    return (*table)[static_cast<size_t>(errnum)];
+  }
+  return StrErrorInternal(errnum);
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 39 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/strerror.h

@@ -0,0 +1,39 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_STRERROR_H_
+#define ABSL_BASE_INTERNAL_STRERROR_H_
+
+#include <string>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// A portable and thread-safe alternative to C89's `strerror`.
+//
+// The C89 specification of `strerror` is not suitable for use in a
+// multi-threaded application as the returned string may be changed by calls to
+// `strerror` from another thread.  The many non-stdlib alternatives differ
+// enough in their names, availability, and semantics to justify this wrapper
+// around them.  `errno` will not be modified by a call to `absl::StrError`.
+std::string StrError(int errnum);
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_STRERROR_H_

+ 29 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/strerror_benchmark.cc

@@ -0,0 +1,29 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cerrno>
+#include <cstdio>
+#include <string>
+
+#include "absl/base/internal/strerror.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+void BM_AbslStrError(benchmark::State& state) {
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(absl::base_internal::StrError(ERANGE));
+  }
+}
+BENCHMARK(BM_AbslStrError);
+}  // namespace

+ 88 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/strerror_test.cc

@@ -0,0 +1,88 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/strerror.h"
+
+#include <atomic>
+#include <cerrno>
+#include <cstdio>
+#include <cstring>
+#include <string>
+#include <thread>  // NOLINT(build/c++11)
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/strings/match.h"
+
+namespace {
+using ::testing::AnyOf;
+using ::testing::Eq;
+
+TEST(StrErrorTest, ValidErrorCode) {
+  errno = ERANGE;
+  EXPECT_THAT(absl::base_internal::StrError(EDOM), Eq(strerror(EDOM)));
+  EXPECT_THAT(errno, Eq(ERANGE));
+}
+
+TEST(StrErrorTest, InvalidErrorCode) {
+  errno = ERANGE;
+  EXPECT_THAT(absl::base_internal::StrError(-1),
+              AnyOf(Eq("No error information"), Eq("Unknown error -1")));
+  EXPECT_THAT(errno, Eq(ERANGE));
+}
+
+TEST(StrErrorTest, MultipleThreads) {
+  // In this test, we will start up 2 threads and have each one call
+  // StrError 1000 times, each time with a different errnum.  We
+  // expect that StrError(errnum) will return a string equal to the
+  // one returned by strerror(errnum), if the code is known.  Since
+  // strerror is known to be thread-hostile, collect all the expected
+  // strings up front.
+  const int kNumCodes = 1000;
+  std::vector<std::string> expected_strings(kNumCodes);
+  for (int i = 0; i < kNumCodes; ++i) {
+    expected_strings[i] = strerror(i);
+  }
+
+  std::atomic_int counter(0);
+  auto thread_fun = [&]() {
+    for (int i = 0; i < kNumCodes; ++i) {
+      ++counter;
+      errno = ERANGE;
+      const std::string value = absl::base_internal::StrError(i);
+      // EXPECT_* could change errno. Stash it first.
+      int check_err = errno;
+      EXPECT_THAT(check_err, Eq(ERANGE));
+      // Only the GNU implementation is guaranteed to provide the
+      // string "Unknown error nnn". POSIX doesn't say anything.
+      if (!absl::StartsWith(value, "Unknown error ")) {
+        EXPECT_THAT(value, Eq(expected_strings[i]));
+      }
+    }
+  };
+
+  const int kNumThreads = 100;
+  std::vector<std::thread> threads;
+  for (int i = 0; i < kNumThreads; ++i) {
+    threads.push_back(std::thread(thread_fun));
+  }
+  for (auto& thread : threads) {
+    thread.join();
+  }
+
+  EXPECT_THAT(counter, Eq(kNumThreads * kNumCodes));
+}
+
+}  // namespace

+ 489 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/sysinfo.cc

@@ -0,0 +1,489 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/sysinfo.h"
+
+#include "absl/base/attributes.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <fcntl.h>
+#include <pthread.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#ifdef __linux__
+#include <sys/syscall.h>
+#endif
+
+#if defined(__APPLE__) || defined(__FreeBSD__)
+#include <sys/sysctl.h>
+#endif
+
+#ifdef __FreeBSD__
+#include <pthread_np.h>
+#endif
+
+#ifdef __NetBSD__
+#include <lwp.h>
+#endif
+
+#if defined(__myriad2__)
+#include <rtems.h>
+#endif
+
+#include <string.h>
+
+#include <cassert>
+#include <cerrno>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <ctime>
+#include <limits>
+#include <thread>  // NOLINT(build/c++11)
+#include <utility>
+#include <vector>
+
+#include "absl/base/call_once.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/internal/unscaledcycleclock.h"
+#include "absl/base/thread_annotations.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+namespace {
+
+#if defined(_WIN32)
+
+// Returns number of bits set in `bitMask`
+DWORD Win32CountSetBits(ULONG_PTR bitMask) {
+  for (DWORD bitSetCount = 0; ; ++bitSetCount) {
+    if (bitMask == 0) return bitSetCount;
+    bitMask &= bitMask - 1;
+  }
+}
+
+// Returns the number of logical CPUs using GetLogicalProcessorInformation(), or
+// 0 if the number of processors is not available or can not be computed.
+// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation
+int Win32NumCPUs() {
+#pragma comment(lib, "kernel32.lib")
+  using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
+
+  DWORD info_size = sizeof(Info);
+  Info* info(static_cast<Info*>(malloc(info_size)));
+  if (info == nullptr) return 0;
+
+  bool success = GetLogicalProcessorInformation(info, &info_size);
+  if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
+    free(info);
+    info = static_cast<Info*>(malloc(info_size));
+    if (info == nullptr) return 0;
+    success = GetLogicalProcessorInformation(info, &info_size);
+  }
+
+  DWORD logicalProcessorCount = 0;
+  if (success) {
+    Info* ptr = info;
+    DWORD byteOffset = 0;
+    while (byteOffset + sizeof(Info) <= info_size) {
+      switch (ptr->Relationship) {
+        case RelationProcessorCore:
+          logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask);
+          break;
+
+        case RelationNumaNode:
+        case RelationCache:
+        case RelationProcessorPackage:
+          // Ignore other entries
+          break;
+
+        default:
+          // Ignore unknown entries
+          break;
+      }
+      byteOffset += sizeof(Info);
+      ptr++;
+    }
+  }
+  free(info);
+  return static_cast<int>(logicalProcessorCount);
+}
+
+#endif
+
+}  // namespace
+
+static int GetNumCPUs() {
+#if defined(__myriad2__)
+  return 1;
+#elif defined(_WIN32)
+  const int hardware_concurrency = Win32NumCPUs();
+  return hardware_concurrency ? hardware_concurrency : 1;
+#elif defined(_AIX)
+  return sysconf(_SC_NPROCESSORS_ONLN);
+#else
+  // Other possibilities:
+  //  - Read /sys/devices/system/cpu/online and use cpumask_parse()
+  //  - sysconf(_SC_NPROCESSORS_ONLN)
+  return static_cast<int>(std::thread::hardware_concurrency());
+#endif
+}
+
+#if defined(_WIN32)
+
+static double GetNominalCPUFrequency() {
+#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \
+    !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+  // UWP apps don't have access to the registry and currently don't provide an
+  // API informing about CPU nominal frequency.
+  return 1.0;
+#else
+#pragma comment(lib, "advapi32.lib")  // For Reg* functions.
+  HKEY key;
+  // Use the Reg* functions rather than the SH functions because shlwapi.dll
+  // pulls in gdi32.dll which makes process destruction much more costly.
+  if (RegOpenKeyExA(HKEY_LOCAL_MACHINE,
+                    "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0,
+                    KEY_READ, &key) == ERROR_SUCCESS) {
+    DWORD type = 0;
+    DWORD data = 0;
+    DWORD data_size = sizeof(data);
+    auto result = RegQueryValueExA(key, "~MHz", nullptr, &type,
+                                   reinterpret_cast<LPBYTE>(&data), &data_size);
+    RegCloseKey(key);
+    if (result == ERROR_SUCCESS && type == REG_DWORD &&
+        data_size == sizeof(data)) {
+      return data * 1e6;  // Value is MHz.
+    }
+  }
+  return 1.0;
+#endif  // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP
+}
+
+#elif defined(CTL_HW) && defined(HW_CPU_FREQ)
+
+static double GetNominalCPUFrequency() {
+  unsigned freq;
+  size_t size = sizeof(freq);
+  int mib[2] = {CTL_HW, HW_CPU_FREQ};
+  if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) {
+    return static_cast<double>(freq);
+  }
+  return 1.0;
+}
+
+#else
+
+// Helper function for reading a long from a file. Returns true if successful
+// and the memory location pointed to by value is set to the value read.
+static bool ReadLongFromFile(const char *file, long *value) {
+  bool ret = false;
+#if defined(_POSIX_C_SOURCE)
+  const int file_mode = (O_RDONLY | O_CLOEXEC);
+#else
+  const int file_mode = O_RDONLY;
+#endif
+
+  int fd = open(file, file_mode);
+  if (fd != -1) {
+    char line[1024];
+    char *err;
+    memset(line, '\0', sizeof(line));
+    ssize_t len;
+    do {
+      len = read(fd, line, sizeof(line) - 1);
+    } while (len < 0 && errno == EINTR);
+    if (len <= 0) {
+      ret = false;
+    } else {
+      const long temp_value = strtol(line, &err, 10);
+      if (line[0] != '\0' && (*err == '\n' || *err == '\0')) {
+        *value = temp_value;
+        ret = true;
+      }
+    }
+    close(fd);
+  }
+  return ret;
+}
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+
+// Reads a monotonic time source and returns a value in
+// nanoseconds. The returned value uses an arbitrary epoch, not the
+// Unix epoch.
+static int64_t ReadMonotonicClockNanos() {
+  struct timespec t;
+#ifdef CLOCK_MONOTONIC_RAW
+  int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t);
+#else
+  int rc = clock_gettime(CLOCK_MONOTONIC, &t);
+#endif
+  if (rc != 0) {
+    ABSL_INTERNAL_LOG(
+        FATAL, "clock_gettime() failed: (" + std::to_string(errno) + ")");
+  }
+  return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec;
+}
+
+class UnscaledCycleClockWrapperForInitializeFrequency {
+ public:
+  static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); }
+};
+
+struct TimeTscPair {
+  int64_t time;  // From ReadMonotonicClockNanos().
+  int64_t tsc;   // From UnscaledCycleClock::Now().
+};
+
+// Returns a pair of values (monotonic kernel time, TSC ticks) that
+// approximately correspond to each other.  This is accomplished by
+// doing several reads and picking the reading with the lowest
+// latency.  This approach is used to minimize the probability that
+// our thread was preempted between clock reads.
+static TimeTscPair GetTimeTscPair() {
+  int64_t best_latency = std::numeric_limits<int64_t>::max();
+  TimeTscPair best;
+  for (int i = 0; i < 10; ++i) {
+    int64_t t0 = ReadMonotonicClockNanos();
+    int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now();
+    int64_t t1 = ReadMonotonicClockNanos();
+    int64_t latency = t1 - t0;
+    if (latency < best_latency) {
+      best_latency = latency;
+      best.time = t0;
+      best.tsc = tsc;
+    }
+  }
+  return best;
+}
+
+// Measures and returns the TSC frequency by taking a pair of
+// measurements approximately `sleep_nanoseconds` apart.
+static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) {
+  auto t0 = GetTimeTscPair();
+  struct timespec ts;
+  ts.tv_sec = 0;
+  ts.tv_nsec = sleep_nanoseconds;
+  while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {}
+  auto t1 = GetTimeTscPair();
+  double elapsed_ticks = t1.tsc - t0.tsc;
+  double elapsed_time = (t1.time - t0.time) * 1e-9;
+  return elapsed_ticks / elapsed_time;
+}
+
+// Measures and returns the TSC frequency by calling
+// MeasureTscFrequencyWithSleep(), doubling the sleep interval until the
+// frequency measurement stabilizes.
+static double MeasureTscFrequency() {
+  double last_measurement = -1.0;
+  int sleep_nanoseconds = 1000000;  // 1 millisecond.
+  for (int i = 0; i < 8; ++i) {
+    double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds);
+    if (measurement * 0.99 < last_measurement &&
+        last_measurement < measurement * 1.01) {
+      // Use the current measurement if it is within 1% of the
+      // previous measurement.
+      return measurement;
+    }
+    last_measurement = measurement;
+    sleep_nanoseconds *= 2;
+  }
+  return last_measurement;
+}
+
+#endif  // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+
+static double GetNominalCPUFrequency() {
+  long freq = 0;
+
+  // Google's production kernel has a patch to export the TSC
+  // frequency through sysfs. If the kernel is exporting the TSC
+  // frequency use that. There are issues where cpuinfo_max_freq
+  // cannot be relied on because the BIOS may be exporting an invalid
+  // p-state (on x86) or p-states may be used to put the processor in
+  // a new mode (turbo mode). Essentially, those frequencies cannot
+  // always be relied upon. The same reasons apply to /proc/cpuinfo as
+  // well.
+  if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) {
+    return freq * 1e3;  // Value is kHz.
+  }
+
+#if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY)
+  // On these platforms, the TSC frequency is the nominal CPU
+  // frequency.  But without having the kernel export it directly
+  // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no
+  // other way to reliably get the TSC frequency, so we have to
+  // measure it ourselves.  Some CPUs abuse cpuinfo_max_freq by
+  // exporting "fake" frequencies for implementing new features. For
+  // example, Intel's turbo mode is enabled by exposing a p-state
+  // value with a higher frequency than that of the real TSC
+  // rate. Because of this, we prefer to measure the TSC rate
+  // ourselves on i386 and x86-64.
+  return MeasureTscFrequency();
+#else
+
+  // If CPU scaling is in effect, we want to use the *maximum*
+  // frequency, not whatever CPU speed some random processor happens
+  // to be using now.
+  if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+                       &freq)) {
+    return freq * 1e3;  // Value is kHz.
+  }
+
+  return 1.0;
+#endif  // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+}
+
+#endif
+
+ABSL_CONST_INIT static once_flag init_num_cpus_once;
+ABSL_CONST_INIT static int num_cpus = 0;
+
+// NumCPUs() may be called before main() and before malloc is properly
+// initialized, therefore this must not allocate memory.
+int NumCPUs() {
+  base_internal::LowLevelCallOnce(
+      &init_num_cpus_once, []() { num_cpus = GetNumCPUs(); });
+  return num_cpus;
+}
+
+// A default frequency of 0.0 might be dangerous if it is used in division.
+ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once;
+ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0;
+
+// NominalCPUFrequency() may be called before main() and before malloc is
+// properly initialized, therefore this must not allocate memory.
+double NominalCPUFrequency() {
+  base_internal::LowLevelCallOnce(
+      &init_nominal_cpu_frequency_once,
+      []() { nominal_cpu_frequency = GetNominalCPUFrequency(); });
+  return nominal_cpu_frequency;
+}
+
+#if defined(_WIN32)
+
+pid_t GetTID() {
+  return pid_t{GetCurrentThreadId()};
+}
+
+#elif defined(__linux__)
+
+#ifndef SYS_gettid
+#define SYS_gettid __NR_gettid
+#endif
+
+pid_t GetTID() {
+  return static_cast<pid_t>(syscall(SYS_gettid));
+}
+
+#elif defined(__akaros__)
+
+pid_t GetTID() {
+  // Akaros has a concept of "vcore context", which is the state the program
+  // is forced into when we need to make a user-level scheduling decision, or
+  // run a signal handler.  This is analogous to the interrupt context that a
+  // CPU might enter if it encounters some kind of exception.
+  //
+  // There is no current thread context in vcore context, but we need to give
+  // a reasonable answer if asked for a thread ID (e.g., in a signal handler).
+  // Thread 0 always exists, so if we are in vcore context, we return that.
+  //
+  // Otherwise, we know (since we are using pthreads) that the uthread struct
+  // current_uthread is pointing to is the first element of a
+  // struct pthread_tcb, so we extract and return the thread ID from that.
+  //
+  // TODO(dcross): Akaros anticipates moving the thread ID to the uthread
+  // structure at some point. We should modify this code to remove the cast
+  // when that happens.
+  if (in_vcore_context())
+    return 0;
+  return reinterpret_cast<struct pthread_tcb *>(current_uthread)->id;
+}
+
+#elif defined(__myriad2__)
+
+pid_t GetTID() {
+  uint32_t tid;
+  rtems_task_ident(RTEMS_SELF, 0, &tid);
+  return tid;
+}
+
+#elif defined(__APPLE__)
+
+pid_t GetTID() {
+  uint64_t tid;
+  // `nullptr` here implies this thread.  This only fails if the specified
+  // thread is invalid or the pointer-to-tid is null, so we needn't worry about
+  // it.
+  pthread_threadid_np(nullptr, &tid);
+  return static_cast<pid_t>(tid);
+}
+
+#elif defined(__FreeBSD__)
+
+pid_t GetTID() { return static_cast<pid_t>(pthread_getthreadid_np()); }
+
+#elif defined(__OpenBSD__)
+
+pid_t GetTID() { return getthrid(); }
+
+#elif defined(__NetBSD__)
+
+pid_t GetTID() { return static_cast<pid_t>(_lwp_self()); }
+
+#elif defined(__native_client__)
+
+pid_t GetTID() {
+  auto* thread = pthread_self();
+  static_assert(sizeof(pid_t) == sizeof(thread),
+                "In NaCL int expected to be the same size as a pointer");
+  return reinterpret_cast<pid_t>(thread);
+}
+
+#else
+
+// Fallback implementation of `GetTID` using `pthread_self`.
+pid_t GetTID() {
+  // `pthread_t` need not be arithmetic per POSIX; platforms where it isn't
+  // should be handled above.
+  return static_cast<pid_t>(pthread_self());
+}
+
+#endif
+
+// GetCachedTID() caches the thread ID in thread-local storage (which is a
+// userspace construct) to avoid unnecessary system calls. Without this caching,
+// it can take roughly 98ns, while it takes roughly 1ns with this caching.
+pid_t GetCachedTID() {
+#ifdef ABSL_HAVE_THREAD_LOCAL
+  static thread_local pid_t thread_id = GetTID();
+  return thread_id;
+#else
+  return GetTID();
+#endif  // ABSL_HAVE_THREAD_LOCAL
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 74 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/sysinfo.h

@@ -0,0 +1,74 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file includes routines to find out characteristics
+// of the machine a program is running on.  It is undoubtedly
+// system-dependent.
+
+// Functions listed here that accept a pid_t as an argument act on the
+// current process if the pid_t argument is 0
+// All functions here are thread-hostile due to file caching unless
+// commented otherwise.
+
+#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_
+#define ABSL_BASE_INTERNAL_SYSINFO_H_
+
+#ifndef _WIN32
+#include <sys/types.h>
+#endif
+
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/base/port.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Nominal core processor cycles per second of each processor.   This is _not_
+// necessarily the frequency of the CycleClock counter (see cycleclock.h)
+// Thread-safe.
+double NominalCPUFrequency();
+
+// Number of logical processors (hyperthreads) in system. Thread-safe.
+int NumCPUs();
+
+// Return the thread id of the current thread, as told by the system.
+// No two currently-live threads implemented by the OS shall have the same ID.
+// Thread ids of exited threads may be reused.   Multiple user-level threads
+// may have the same thread ID if multiplexed on the same OS thread.
+//
+// On Linux, you may send a signal to the resulting ID with kill().  However,
+// it is recommended for portability that you use pthread_kill() instead.
+#ifdef _WIN32
+// On Windows, process id and thread id are of the same type according to the
+// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned
+// 32-bit type.
+using pid_t = uint32_t;
+#endif
+pid_t GetTID();
+
+// Like GetTID(), but caches the result in thread-local storage in order
+// to avoid unnecessary system calls. Note that there are some cases where
+// one must call through to GetTID directly, which is why this exists as a
+// separate function. For example, GetCachedTID() is not safe to call in
+// an asynchronous signal-handling context nor right after a call to fork().
+pid_t GetCachedTID();
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_SYSINFO_H_

+ 88 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/sysinfo_test.cc

@@ -0,0 +1,88 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/sysinfo.h"
+
+#ifndef _WIN32
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#include <thread>  // NOLINT(build/c++11)
+#include <unordered_set>
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/synchronization/barrier.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+TEST(SysinfoTest, NumCPUs) {
+  EXPECT_NE(NumCPUs(), 0)
+      << "NumCPUs() should not have the default value of 0";
+}
+
+TEST(SysinfoTest, GetTID) {
+  EXPECT_EQ(GetTID(), GetTID());  // Basic compile and equality test.
+#ifdef __native_client__
+  // Native Client has a race condition bug that leads to memory
+  // exaustion when repeatedly creating and joining threads.
+  // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027
+  return;
+#endif
+  // Test that TIDs are unique to each thread.
+  // Uses a few loops to exercise implementations that reallocate IDs.
+  for (int i = 0; i < 10; ++i) {
+    constexpr int kNumThreads = 10;
+    Barrier all_threads_done(kNumThreads);
+    std::vector<std::thread> threads;
+
+    Mutex mutex;
+    std::unordered_set<pid_t> tids;
+
+    for (int j = 0; j < kNumThreads; ++j) {
+      threads.push_back(std::thread([&]() {
+        pid_t id = GetTID();
+        {
+          MutexLock lock(&mutex);
+          ASSERT_TRUE(tids.find(id) == tids.end());
+          tids.insert(id);
+        }
+        // We can't simply join the threads here. The threads need to
+        // be alive otherwise the TID might have been reallocated to
+        // another live thread.
+        all_threads_done.Block();
+      }));
+    }
+    for (auto& thread : threads) {
+      thread.join();
+    }
+  }
+}
+
+#ifdef __linux__
+TEST(SysinfoTest, LinuxGetTID) {
+  // On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API.
+  EXPECT_EQ(GetTID(), getpid());
+}
+#endif
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 163 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/thread_identity.cc

@@ -0,0 +1,163 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/thread_identity.h"
+
+#if !defined(_WIN32) || defined(__MINGW32__)
+#include <pthread.h>
+#ifndef __wasi__
+// WASI does not provide this header, either way we disable use
+// of signals with it below.
+#include <signal.h>
+#endif
+#endif
+
+#include <atomic>
+#include <cassert>
+#include <memory>
+
+#include "absl/base/attributes.h"
+#include "absl/base/call_once.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/internal/spinlock.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+namespace {
+// Used to co-ordinate one-time creation of our pthread_key
+absl::once_flag init_thread_identity_key_once;
+pthread_key_t thread_identity_pthread_key;
+std::atomic<bool> pthread_key_initialized(false);
+
+void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) {
+  pthread_key_create(&thread_identity_pthread_key, reclaimer);
+  pthread_key_initialized.store(true, std::memory_order_release);
+}
+}  // namespace
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+// The actual TLS storage for a thread's currently associated ThreadIdentity.
+// This is referenced by inline accessors in the header.
+// "protected" visibility ensures that if multiple instances of Abseil code
+// exist within a process (via dlopen() or similar), references to
+// thread_identity_ptr from each instance of the code will refer to
+// *different* instances of this ptr.
+// Apple platforms have the visibility attribute, but issue a compile warning
+// that protected visibility is unsupported.
+ABSL_CONST_INIT  // Must come before __attribute__((visibility("protected")))
+#if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
+    __attribute__((visibility("protected")))
+#endif  // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__)
+#if ABSL_PER_THREAD_TLS
+    // Prefer __thread to thread_local as benchmarks indicate it is a bit
+    // faster.
+    ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr;
+#elif defined(ABSL_HAVE_THREAD_LOCAL)
+    thread_local ThreadIdentity* thread_identity_ptr = nullptr;
+#endif  // ABSL_PER_THREAD_TLS
+#endif  // TLS or CPP11
+
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+                              ThreadIdentityReclaimerFunction reclaimer) {
+  assert(CurrentThreadIdentityIfPresent() == nullptr);
+  // Associate our destructor.
+  // NOTE: This call to pthread_setspecific is currently the only immovable
+  // barrier to CurrentThreadIdentity() always being async signal safe.
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+  // NOTE: Not async-safe.  But can be open-coded.
+  absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+                  reclaimer);
+
+#if defined(__wasi__) || defined(__EMSCRIPTEN__) || defined(__MINGW32__) || \
+    defined(__hexagon__)
+  // Emscripten, WASI and MinGW pthread implementations does not support
+  // signals. See
+  // https://kripken.github.io/emscripten-site/docs/porting/pthreads.html for
+  // more information.
+  pthread_setspecific(thread_identity_pthread_key,
+                      reinterpret_cast<void*>(identity));
+#else
+  // We must mask signals around the call to setspecific as with current glibc,
+  // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent())
+  // may zero our value.
+  //
+  // While not officially async-signal safe, getspecific within a signal handler
+  // is otherwise OK.
+  sigset_t all_signals;
+  sigset_t curr_signals;
+  sigfillset(&all_signals);
+  pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals);
+  pthread_setspecific(thread_identity_pthread_key,
+                      reinterpret_cast<void*>(identity));
+  pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr);
+#endif  // !__EMSCRIPTEN__ && !__MINGW32__
+
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS
+  // NOTE: Not async-safe.  But can be open-coded.
+  absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey,
+                  reclaimer);
+  pthread_setspecific(thread_identity_pthread_key,
+                      reinterpret_cast<void*>(identity));
+  thread_identity_ptr = identity;
+#elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+  thread_local std::unique_ptr<ThreadIdentity, ThreadIdentityReclaimerFunction>
+      holder(identity, reclaimer);
+  thread_identity_ptr = identity;
+#else
+#error Unimplemented ABSL_THREAD_IDENTITY_MODE
+#endif
+}
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+
+// Please see the comment on `CurrentThreadIdentityIfPresent` in
+// thread_identity.h. When we cannot expose thread_local variables in
+// headers, we opt for the correct-but-slower option of not inlining this
+// function.
+#ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
+ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; }
+#endif
+#endif
+
+void ClearCurrentThreadIdentity() {
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+  thread_identity_ptr = nullptr;
+#elif ABSL_THREAD_IDENTITY_MODE == \
+    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+  // pthread_setspecific expected to clear value on destruction
+  assert(CurrentThreadIdentityIfPresent() == nullptr);
+#endif
+}
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+ThreadIdentity* CurrentThreadIdentityIfPresent() {
+  bool initialized = pthread_key_initialized.load(std::memory_order_acquire);
+  if (!initialized) {
+    return nullptr;
+  }
+  return reinterpret_cast<ThreadIdentity*>(
+      pthread_getspecific(thread_identity_pthread_key));
+}
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 269 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/thread_identity.h

@@ -0,0 +1,269 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Each active thread has an ThreadIdentity that may represent the thread in
+// various level interfaces.  ThreadIdentity objects are never deallocated.
+// When a thread terminates, its ThreadIdentity object may be reused for a
+// thread created later.
+
+#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_
+
+#ifndef _WIN32
+#include <pthread.h>
+// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when
+// supported.
+#include <unistd.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/optimization.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+struct SynchLocksHeld;
+struct SynchWaitParams;
+
+namespace base_internal {
+
+class SpinLock;
+struct ThreadIdentity;
+
+// Used by the implementation of absl::Mutex and absl::CondVar.
+struct PerThreadSynch {
+  // The internal representation of absl::Mutex and absl::CondVar rely
+  // on the alignment of PerThreadSynch. Both store the address of the
+  // PerThreadSynch in the high-order bits of their internal state,
+  // which means the low kLowZeroBits of the address of PerThreadSynch
+  // must be zero.
+  static constexpr int kLowZeroBits = 8;
+  static constexpr int kAlignment = 1 << kLowZeroBits;
+
+  // Returns the associated ThreadIdentity.
+  // This can be implemented as a cast because we guarantee
+  // PerThreadSynch is the first element of ThreadIdentity.
+  ThreadIdentity* thread_identity() {
+    return reinterpret_cast<ThreadIdentity*>(this);
+  }
+
+  PerThreadSynch* next;  // Circular waiter queue; initialized to 0.
+  PerThreadSynch* skip;  // If non-zero, all entries in Mutex queue
+                         // up to and including "skip" have same
+                         // condition as this, and will be woken later
+  bool may_skip;         // if false while on mutex queue, a mutex unlocker
+                         // is using this PerThreadSynch as a terminator.  Its
+                         // skip field must not be filled in because the loop
+                         // might then skip over the terminator.
+  bool wake;             // This thread is to be woken from a Mutex.
+  // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the
+  // waiter is waiting on the mutex as part of a CV Wait or Mutex Await.
+  //
+  // The value of "x->cond_waiter" is meaningless if "x" is not on a
+  // Mutex waiter list.
+  bool cond_waiter;
+  bool maybe_unlocking;  // Valid at head of Mutex waiter queue;
+                         // true if UnlockSlow could be searching
+                         // for a waiter to wake.  Used for an optimization
+                         // in Enqueue().  true is always a valid value.
+                         // Can be reset to false when the unlocker or any
+                         // writer releases the lock, or a reader fully
+                         // releases the lock.  It may not be set to false
+                         // by a reader that decrements the count to
+                         // non-zero. protected by mutex spinlock
+  bool suppress_fatal_errors;  // If true, try to proceed even in the face
+                               // of broken invariants.  This is used within
+                               // fatal signal handlers to improve the
+                               // chances of debug logging information being
+                               // output successfully.
+  int priority;                // Priority of thread (updated every so often).
+
+  // State values:
+  //   kAvailable: This PerThreadSynch is available.
+  //   kQueued: This PerThreadSynch is unavailable, it's currently queued on a
+  //            Mutex or CondVar waistlist.
+  //
+  // Transitions from kQueued to kAvailable require a release
+  // barrier. This is needed as a waiter may use "state" to
+  // independently observe that it's no longer queued.
+  //
+  // Transitions from kAvailable to kQueued require no barrier, they
+  // are externally ordered by the Mutex.
+  enum State { kAvailable, kQueued };
+  std::atomic<State> state;
+
+  // The wait parameters of the current wait.  waitp is null if the
+  // thread is not waiting. Transitions from null to non-null must
+  // occur before the enqueue commit point (state = kQueued in
+  // Enqueue() and CondVarEnqueue()). Transitions from non-null to
+  // null must occur after the wait is finished (state = kAvailable in
+  // Mutex::Block() and CondVar::WaitCommon()). This field may be
+  // changed only by the thread that describes this PerThreadSynch.  A
+  // special case is Fer(), which calls Enqueue() on another thread,
+  // but with an identical SynchWaitParams pointer, thus leaving the
+  // pointer unchanged.
+  SynchWaitParams* waitp;
+
+  intptr_t readers;  // Number of readers in mutex.
+
+  // When priority will next be read (cycles).
+  int64_t next_priority_read_cycles;
+
+  // Locks held; used during deadlock detection.
+  // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity().
+  SynchLocksHeld* all_locks;
+};
+
+// The instances of this class are allocated in NewThreadIdentity() with an
+// alignment of PerThreadSynch::kAlignment.
+//
+// NOTE: The layout of fields in this structure is critical, please do not
+//       add, remove, or modify the field placements without fully auditing the
+//       layout.
+struct ThreadIdentity {
+  // Must be the first member.  The Mutex implementation requires that
+  // the PerThreadSynch object associated with each thread is
+  // PerThreadSynch::kAlignment aligned.  We provide this alignment on
+  // ThreadIdentity itself.
+  PerThreadSynch per_thread_synch;
+
+  // Private: Reserved for absl::synchronization_internal::Waiter.
+  struct WaiterState {
+    alignas(void*) char data[256];
+  } waiter_state;
+
+  // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter().
+  std::atomic<int>* blocked_count_ptr;
+
+  // The following variables are mostly read/written just by the
+  // thread itself.  The only exception is that these are read by
+  // a ticker thread as a hint.
+  std::atomic<int> ticker;      // Tick counter, incremented once per second.
+  std::atomic<int> wait_start;  // Ticker value when thread started waiting.
+  std::atomic<bool> is_idle;    // Has thread become idle yet?
+
+  ThreadIdentity* next;
+};
+
+// Returns the ThreadIdentity object representing the calling thread; guaranteed
+// to be unique for its lifetime.  The returned object will remain valid for the
+// program's lifetime; although it may be re-assigned to a subsequent thread.
+// If one does not exist, return nullptr instead.
+//
+// Does not malloc(*), and is async-signal safe.
+// [*] Technically pthread_setspecific() does malloc on first use; however this
+// is handled internally within tcmalloc's initialization already. Note that
+// darwin does *not* use tcmalloc, so this can catch you if using MallocHooks
+// on Apple platforms. Whatever function is calling your MallocHooks will need
+// to watch for recursion on Apple platforms.
+//
+// New ThreadIdentity objects can be constructed and associated with a thread
+// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h.
+ThreadIdentity* CurrentThreadIdentityIfPresent();
+
+using ThreadIdentityReclaimerFunction = void (*)(void*);
+
+// Sets the current thread identity to the given value.  'reclaimer' is a
+// pointer to the global function for cleaning up instances on thread
+// destruction.
+void SetCurrentThreadIdentity(ThreadIdentity* identity,
+                              ThreadIdentityReclaimerFunction reclaimer);
+
+// Removes the currently associated ThreadIdentity from the running thread.
+// This must be called from inside the ThreadIdentityReclaimerFunction, and only
+// from that function.
+void ClearCurrentThreadIdentity();
+
+// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE=<mode
+// index>
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set
+#else
+#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2
+#endif
+
+#ifdef ABSL_THREAD_IDENTITY_MODE
+#error ABSL_THREAD_IDENTITY_MODE cannot be directly set
+#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE
+#elif defined(_WIN32) && !defined(__MINGW32__)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL)
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \
+    (__GOOGLE_GRTE_VERSION__ >= 20140228L)
+// Support for async-safe TLS was specifically added in GRTEv4.  It's not
+// present in the upstream eglibc.
+// Note:  Current default for production systems.
+#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS
+#else
+#define ABSL_THREAD_IDENTITY_MODE \
+  ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#endif
+
+#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \
+    ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11
+
+#if ABSL_PER_THREAD_TLS
+ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity*
+    thread_identity_ptr;
+#elif defined(ABSL_HAVE_THREAD_LOCAL)
+ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr;
+#else
+#error Thread-local storage not detected on this platform
+#endif
+
+// thread_local variables cannot be in headers exposed by DLLs or in certain
+// build configurations on Apple platforms. However, it is important for
+// performance reasons in general that `CurrentThreadIdentityIfPresent` be
+// inlined. In the other cases we opt to have the function not be inlined. Note
+// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude
+// this entire inline definition.
+#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \
+    !defined(ABSL_CONSUME_DLL)
+#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1
+#endif
+
+#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT
+inline ThreadIdentity* CurrentThreadIdentityIfPresent() {
+  return thread_identity_ptr;
+}
+#endif
+
+#elif ABSL_THREAD_IDENTITY_MODE != \
+    ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC
+#error Unknown ABSL_THREAD_IDENTITY_MODE
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_

+ 38 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/thread_identity_benchmark.cc

@@ -0,0 +1,38 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "benchmark/benchmark.h"
+#include "absl/base/internal/thread_identity.h"
+#include "absl/synchronization/internal/create_thread_identity.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+
+namespace {
+
+void BM_SafeCurrentThreadIdentity(benchmark::State& state) {
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(
+        absl::synchronization_internal::GetOrCreateCurrentThreadIdentity());
+  }
+}
+BENCHMARK(BM_SafeCurrentThreadIdentity);
+
+void BM_UnsafeCurrentThreadIdentity(benchmark::State& state) {
+  for (auto _ : state) {
+    benchmark::DoNotOptimize(
+        absl::base_internal::CurrentThreadIdentityIfPresent());
+  }
+}
+BENCHMARK(BM_UnsafeCurrentThreadIdentity);
+
+}  // namespace

+ 129 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/thread_identity_test.cc

@@ -0,0 +1,129 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/thread_identity.h"
+
+#include <thread>  // NOLINT(build/c++11)
+#include <vector>
+
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/internal/spinlock.h"
+#include "absl/base/macros.h"
+#include "absl/base/thread_annotations.h"
+#include "absl/synchronization/internal/per_thread_sem.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+ABSL_CONST_INIT static absl::base_internal::SpinLock map_lock(
+    absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY);
+ABSL_CONST_INIT static int num_identities_reused ABSL_GUARDED_BY(map_lock);
+
+static const void* const kCheckNoIdentity = reinterpret_cast<void*>(1);
+
+static void TestThreadIdentityCurrent(const void* assert_no_identity) {
+  ThreadIdentity* identity;
+
+  // We have to test this conditionally, because if the test framework relies
+  // on Abseil, then some previous action may have already allocated an
+  // identity.
+  if (assert_no_identity == kCheckNoIdentity) {
+    identity = CurrentThreadIdentityIfPresent();
+    EXPECT_TRUE(identity == nullptr);
+  }
+
+  identity = synchronization_internal::GetOrCreateCurrentThreadIdentity();
+  EXPECT_TRUE(identity != nullptr);
+  ThreadIdentity* identity_no_init;
+  identity_no_init = CurrentThreadIdentityIfPresent();
+  EXPECT_TRUE(identity == identity_no_init);
+
+  // Check that per_thread_synch is correctly aligned.
+  EXPECT_EQ(0, reinterpret_cast<intptr_t>(&identity->per_thread_synch) %
+                   PerThreadSynch::kAlignment);
+  EXPECT_EQ(identity, identity->per_thread_synch.thread_identity());
+
+  absl::base_internal::SpinLockHolder l(&map_lock);
+  num_identities_reused++;
+}
+
+TEST(ThreadIdentityTest, BasicIdentityWorks) {
+  // This tests for the main() thread.
+  TestThreadIdentityCurrent(nullptr);
+}
+
+TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) {
+  // Now try the same basic test with multiple threads being created and
+  // destroyed.  This makes sure that:
+  // - New threads are created without a ThreadIdentity.
+  // - We re-allocate ThreadIdentity objects from the free-list.
+  // - If a thread implementation chooses to recycle threads, that
+  //   correct re-initialization occurs.
+  static const int kNumLoops = 3;
+  static const int kNumThreads = 32;
+  for (int iter = 0; iter < kNumLoops; iter++) {
+    std::vector<std::thread> threads;
+    for (int i = 0; i < kNumThreads; ++i) {
+      threads.push_back(
+          std::thread(TestThreadIdentityCurrent, kCheckNoIdentity));
+    }
+    for (auto& thread : threads) {
+      thread.join();
+    }
+  }
+
+  // We should have recycled ThreadIdentity objects above; while (external)
+  // library threads allocating their own identities may preclude some
+  // reuse, we should have sufficient repetitions to exclude this.
+  absl::base_internal::SpinLockHolder l(&map_lock);
+  EXPECT_LT(kNumThreads, num_identities_reused);
+}
+
+TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) {
+  // This test repeatedly creates and joins a series of threads, each of
+  // which acquires and releases shared Mutex locks. This verifies
+  // Mutex operations work correctly under a reused
+  // ThreadIdentity. Note that the most likely failure mode of this
+  // test is a crash or deadlock.
+  static const int kNumLoops = 10;
+  static const int kNumThreads = 12;
+  static const int kNumMutexes = 3;
+  static const int kNumLockLoops = 5;
+
+  Mutex mutexes[kNumMutexes];
+  for (int iter = 0; iter < kNumLoops; ++iter) {
+    std::vector<std::thread> threads;
+    for (int thread = 0; thread < kNumThreads; ++thread) {
+      threads.push_back(std::thread([&]() {
+        for (int l = 0; l < kNumLockLoops; ++l) {
+          for (int m = 0; m < kNumMutexes; ++m) {
+            MutexLock lock(&mutexes[m]);
+          }
+        }
+      }));
+    }
+    for (auto& thread : threads) {
+      thread.join();
+    }
+  }
+}
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 203 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/throw_delegate.cc

@@ -0,0 +1,203 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/throw_delegate.h"
+
+#include <cstdlib>
+#include <functional>
+#include <new>
+#include <stdexcept>
+
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// NOTE: The exception types, like `std::logic_error`, do not exist on all
+// platforms. (For example, the Android NDK does not have them.)
+// Therefore, their use must be guarded by `#ifdef` or equivalent.
+
+void ThrowStdLogicError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::logic_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
+}
+void ThrowStdLogicError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::logic_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
+}
+void ThrowStdInvalidArgument(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::invalid_argument(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
+}
+void ThrowStdInvalidArgument(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::invalid_argument(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
+}
+
+void ThrowStdDomainError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::domain_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
+}
+void ThrowStdDomainError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::domain_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
+}
+
+void ThrowStdLengthError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::length_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
+}
+void ThrowStdLengthError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::length_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
+}
+
+void ThrowStdOutOfRange(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::out_of_range(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
+}
+void ThrowStdOutOfRange(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::out_of_range(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
+}
+
+void ThrowStdRuntimeError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::runtime_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
+}
+void ThrowStdRuntimeError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::runtime_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
+}
+
+void ThrowStdRangeError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::range_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
+}
+void ThrowStdRangeError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::range_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
+}
+
+void ThrowStdOverflowError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::overflow_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
+}
+void ThrowStdOverflowError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::overflow_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
+}
+
+void ThrowStdUnderflowError(const std::string& what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::underflow_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str());
+  std::abort();
+#endif
+}
+void ThrowStdUnderflowError(const char* what_arg) {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::underflow_error(what_arg);
+#else
+  ABSL_RAW_LOG(FATAL, "%s", what_arg);
+  std::abort();
+#endif
+}
+
+void ThrowStdBadFunctionCall() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::bad_function_call();
+#else
+  std::abort();
+#endif
+}
+
+void ThrowStdBadAlloc() {
+#ifdef ABSL_HAVE_EXCEPTIONS
+  throw std::bad_alloc();
+#else
+  std::abort();
+#endif
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 75 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/throw_delegate.h

@@ -0,0 +1,75 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+#define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_
+
+#include <string>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Helper functions that allow throwing exceptions consistently from anywhere.
+// The main use case is for header-based libraries (eg templates), as they will
+// be built by many different targets with their own compiler options.
+// In particular, this will allow a safe way to throw exceptions even if the
+// caller is compiled with -fno-exceptions.  This is intended for implementing
+// things like map<>::at(), which the standard documents as throwing an
+// exception on error.
+//
+// Using other techniques like #if tricks could lead to ODR violations.
+//
+// You shouldn't use it unless you're writing code that you know will be built
+// both with and without exceptions and you need to conform to an interface
+// that uses exceptions.
+
+[[noreturn]] void ThrowStdLogicError(const std::string& what_arg);
+[[noreturn]] void ThrowStdLogicError(const char* what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg);
+[[noreturn]] void ThrowStdInvalidArgument(const char* what_arg);
+[[noreturn]] void ThrowStdDomainError(const std::string& what_arg);
+[[noreturn]] void ThrowStdDomainError(const char* what_arg);
+[[noreturn]] void ThrowStdLengthError(const std::string& what_arg);
+[[noreturn]] void ThrowStdLengthError(const char* what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg);
+[[noreturn]] void ThrowStdOutOfRange(const char* what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg);
+[[noreturn]] void ThrowStdRuntimeError(const char* what_arg);
+[[noreturn]] void ThrowStdRangeError(const std::string& what_arg);
+[[noreturn]] void ThrowStdRangeError(const char* what_arg);
+[[noreturn]] void ThrowStdOverflowError(const std::string& what_arg);
+[[noreturn]] void ThrowStdOverflowError(const char* what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg);
+[[noreturn]] void ThrowStdUnderflowError(const char* what_arg);
+
+[[noreturn]] void ThrowStdBadFunctionCall();
+[[noreturn]] void ThrowStdBadAlloc();
+
+// ThrowStdBadArrayNewLength() cannot be consistently supported because
+// std::bad_array_new_length is missing in libstdc++ until 4.9.0.
+// https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html
+// https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html
+// libcxx (as of 3.2) and msvc (as of 2015) both have it.
+// [[noreturn]] void ThrowStdBadArrayNewLength();
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_

+ 68 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/tsan_mutex_interface.h

@@ -0,0 +1,68 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This file is intended solely for spinlock.h.
+// It provides ThreadSanitizer annotations for custom mutexes.
+// See <sanitizer/tsan_interface.h> for meaning of these annotations.
+
+#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_
+
+#include "absl/base/config.h"
+
+// ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+// Macro intended only for internal use.
+//
+// Checks whether LLVM Thread Sanitizer interfaces are available.
+// First made available in LLVM 5.0 (Sep 2017).
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set."
+#endif
+
+#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include)
+#if __has_include(<sanitizer/tsan_interface.h>)
+#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1
+#endif
+#endif
+
+#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE
+#include <sanitizer/tsan_interface.h>
+
+#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create
+#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy
+#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock
+#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock
+#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal
+#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal
+#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert
+#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert
+
+#else
+
+#define ABSL_TSAN_MUTEX_CREATE(...)
+#define ABSL_TSAN_MUTEX_DESTROY(...)
+#define ABSL_TSAN_MUTEX_PRE_LOCK(...)
+#define ABSL_TSAN_MUTEX_POST_LOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_POST_UNLOCK(...)
+#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_POST_SIGNAL(...)
+#define ABSL_TSAN_MUTEX_PRE_DIVERT(...)
+#define ABSL_TSAN_MUTEX_POST_DIVERT(...)
+
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_

+ 89 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/unaligned_access.h

@@ -0,0 +1,89 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_
+
+#include <string.h>
+
+#include <cstdint>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/nullability.h"
+
+// unaligned APIs
+
+// Portable handling of unaligned loads, stores, and copies.
+
+// The unaligned API is C++ only.  The declarations use C++ features
+// (namespaces, inline) which are absent or incompatible in C.
+#if defined(__cplusplus)
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline uint16_t UnalignedLoad16(absl::Nonnull<const void *> p) {
+  uint16_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline uint32_t UnalignedLoad32(absl::Nonnull<const void *> p) {
+  uint32_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline uint64_t UnalignedLoad64(absl::Nonnull<const void *> p) {
+  uint64_t t;
+  memcpy(&t, p, sizeof t);
+  return t;
+}
+
+inline void UnalignedStore16(absl::Nonnull<void *> p, uint16_t v) {
+  memcpy(p, &v, sizeof v);
+}
+
+inline void UnalignedStore32(absl::Nonnull<void *> p, uint32_t v) {
+  memcpy(p, &v, sizeof v);
+}
+
+inline void UnalignedStore64(absl::Nonnull<void *> p, uint64_t v) {
+  memcpy(p, &v, sizeof v);
+}
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \
+  (absl::base_internal::UnalignedLoad16(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \
+  (absl::base_internal::UnalignedLoad32(_p))
+#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \
+  (absl::base_internal::UnalignedLoad64(_p))
+
+#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \
+  (absl::base_internal::UnalignedStore16(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \
+  (absl::base_internal::UnalignedStore32(_p, _val))
+#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \
+  (absl::base_internal::UnalignedStore64(_p, _val))
+
+#endif  // defined(__cplusplus), end of unaligned API
+
+#endif  // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_

+ 77 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/unique_small_name_test.cc

@@ -0,0 +1,77 @@
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "gtest/gtest.h"
+#include "absl/base/optimization.h"
+#include "absl/strings/string_view.h"
+
+// This test by itself does not do anything fancy, but it serves as binary I can
+// query in shell test.
+
+namespace {
+
+template <class T>
+void DoNotOptimize(const T& var) {
+#ifdef __GNUC__
+  asm volatile("" : "+m"(const_cast<T&>(var)));
+#else
+  std::cout << (void*)&var;
+#endif
+}
+
+int very_long_int_variable_name ABSL_INTERNAL_UNIQUE_SMALL_NAME() = 0;
+char very_long_str_variable_name[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = "abc";
+
+TEST(UniqueSmallName, NonAutomaticVar) {
+  EXPECT_EQ(very_long_int_variable_name, 0);
+  EXPECT_EQ(absl::string_view(very_long_str_variable_name), "abc");
+}
+
+int VeryLongFreeFunctionName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+
+TEST(UniqueSmallName, FreeFunction) {
+  DoNotOptimize(&VeryLongFreeFunctionName);
+
+  EXPECT_EQ(VeryLongFreeFunctionName(), 456);
+}
+
+int VeryLongFreeFunctionName() { return 456; }
+
+struct VeryLongStructName {
+  explicit VeryLongStructName(int i);
+
+  int VeryLongMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+
+  static int VeryLongStaticMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME();
+
+ private:
+  int fld;
+};
+
+TEST(UniqueSmallName, Struct) {
+  VeryLongStructName var(10);
+
+  DoNotOptimize(var);
+  DoNotOptimize(&VeryLongStructName::VeryLongMethodName);
+  DoNotOptimize(&VeryLongStructName::VeryLongStaticMethodName);
+
+  EXPECT_EQ(var.VeryLongMethodName(), 10);
+  EXPECT_EQ(VeryLongStructName::VeryLongStaticMethodName(), 123);
+}
+
+VeryLongStructName::VeryLongStructName(int i) : fld(i) {}
+int VeryLongStructName::VeryLongMethodName() { return fld; }
+int VeryLongStructName::VeryLongStaticMethodName() { return 123; }
+
+}  // namespace

+ 140 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/unscaledcycleclock.cc

@@ -0,0 +1,140 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/unscaledcycleclock.h"
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+#if defined(_WIN32)
+#include <intrin.h>
+#endif
+
+#if defined(__powerpc__) || defined(__ppc__)
+#ifdef __GLIBC__
+#include <sys/platform/ppc.h>
+#elif defined(__FreeBSD__)
+// clang-format off
+// This order does actually matter =(.
+#include <sys/types.h>
+#include <sys/sysctl.h>
+// clang-format on
+
+#include "absl/base/call_once.h"
+#endif
+#endif
+
+#include "absl/base/internal/sysinfo.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if defined(__i386__)
+
+int64_t UnscaledCycleClock::Now() {
+  int64_t ret;
+  __asm__ volatile("rdtsc" : "=A"(ret));
+  return ret;
+}
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__x86_64__)
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
+#elif defined(__powerpc__) || defined(__ppc__)
+
+int64_t UnscaledCycleClock::Now() {
+#ifdef __GLIBC__
+  return __ppc_get_timebase();
+#else
+#ifdef __powerpc64__
+  int64_t tbr;
+  asm volatile("mfspr %0, 268" : "=r"(tbr));
+  return tbr;
+#else
+  int32_t tbu, tbl, tmp;
+  asm volatile(
+      "mftbu %[hi32]\n"
+      "mftb %[lo32]\n"
+      "mftbu %[tmp]\n"
+      "cmpw %[tmp],%[hi32]\n"
+      "bne $-16\n"  // Retry on failure.
+      : [hi32] "=r"(tbu), [lo32] "=r"(tbl), [tmp] "=r"(tmp));
+  return (static_cast<int64_t>(tbu) << 32) | tbl;
+#endif
+#endif
+}
+
+double UnscaledCycleClock::Frequency() {
+#ifdef __GLIBC__
+  return __ppc_get_timebase_freq();
+#elif defined(_AIX)
+  // This is the same constant value as returned by
+  // __ppc_get_timebase_freq().
+  return static_cast<double>(512000000);
+#elif defined(__FreeBSD__)
+  static once_flag init_timebase_frequency_once;
+  static double timebase_frequency = 0.0;
+  base_internal::LowLevelCallOnce(&init_timebase_frequency_once, [&]() {
+    size_t length = sizeof(timebase_frequency);
+    sysctlbyname("kern.timecounter.tc.timebase.frequency", &timebase_frequency,
+                 &length, nullptr, 0);
+  });
+  return timebase_frequency;
+#else
+#error Must implement UnscaledCycleClock::Frequency()
+#endif
+}
+
+#elif defined(__aarch64__)
+
+// System timer of ARMv8 runs at a different frequency than the CPU's.
+// The frequency is fixed, typically in the range 1-50MHz.  It can be
+// read at CNTFRQ special register.  We assume the OS has set up
+// the virtual timer properly.
+int64_t UnscaledCycleClock::Now() {
+  int64_t virtual_timer_value;
+  asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
+  return virtual_timer_value;
+}
+
+double UnscaledCycleClock::Frequency() {
+  uint64_t aarch64_timer_frequency;
+  asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency));
+  return aarch64_timer_frequency;
+}
+
+#elif defined(_M_IX86) || defined(_M_X64)
+
+#pragma intrinsic(__rdtsc)
+
+int64_t UnscaledCycleClock::Now() { return __rdtsc(); }
+
+double UnscaledCycleClock::Frequency() {
+  return base_internal::NominalCPUFrequency();
+}
+
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_USE_UNSCALED_CYCLECLOCK

+ 96 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/unscaledcycleclock.h

@@ -0,0 +1,96 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// UnscaledCycleClock
+//    An UnscaledCycleClock yields the value and frequency of a cycle counter
+//    that increments at a rate that is approximately constant.
+//    This class is for internal use only, you should consider using CycleClock
+//    instead.
+//
+// Notes:
+// The cycle counter frequency is not necessarily the core clock frequency.
+// That is, CycleCounter cycles are not necessarily "CPU cycles".
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor.  Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately.   If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_
+
+#include <cstdint>
+
+#if defined(__APPLE__)
+#include <TargetConditionals.h>
+#endif
+
+#include "absl/base/config.h"
+#include "absl/base/internal/unscaledcycleclock_config.h"
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace time_internal {
+class UnscaledCycleClockWrapperForGetCurrentTime;
+}  // namespace time_internal
+
+namespace base_internal {
+class CycleClock;
+class UnscaledCycleClockWrapperForInitializeFrequency;
+
+class UnscaledCycleClock {
+ private:
+  UnscaledCycleClock() = delete;
+
+  // Return the value of a cycle counter that counts at a rate that is
+  // approximately constant.
+  static int64_t Now();
+
+  // Return the how much UnscaledCycleClock::Now() increases per second.
+  // This is not necessarily the core CPU clock frequency.
+  // It may be the nominal value report by the kernel, rather than a measured
+  // value.
+  static double Frequency();
+
+  // Allowed users
+  friend class base_internal::CycleClock;
+  friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime;
+  friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency;
+};
+
+#if defined(__x86_64__)
+
+inline int64_t UnscaledCycleClock::Now() {
+  uint64_t low, high;
+  __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
+  return static_cast<int64_t>((high << 32) | low);
+}
+
+#endif
+
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_USE_UNSCALED_CYCLECLOCK
+
+#endif  // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_

+ 62 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/internal/unscaledcycleclock_config.h

@@ -0,0 +1,62 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_
+#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_
+
+#if defined(__APPLE__)
+#include <TargetConditionals.h>
+#endif
+
+// The following platforms have an implementation of a hardware counter.
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \
+    defined(__powerpc__) || defined(__ppc__) || defined(_M_IX86) ||     \
+    (defined(_M_X64) && !defined(_M_ARM64EC))
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1
+#else
+#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0
+#endif
+
+// The following platforms often disable access to the hardware
+// counter (through a sandbox) even if the underlying hardware has a
+// usable counter. The CycleTimer interface also requires a *scaled*
+// CycleClock that runs at atleast 1 MHz. We've found some Android
+// ARM64 devices where this is not the case, so we disable it by
+// default on Android ARM64.
+#if defined(__native_client__) || (defined(__APPLE__)) || \
+    (defined(__ANDROID__) && defined(__aarch64__))
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0
+#else
+#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1
+#endif
+
+// UnscaledCycleClock is an optional internal feature.
+// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence.
+// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1
+#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK)
+#define ABSL_USE_UNSCALED_CYCLECLOCK               \
+  (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \
+   ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT)
+#endif
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+// This macro can be used to test if UnscaledCycleClock::Frequency()
+// is NominalCPUFrequency() on a particular platform.
+#if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || \
+     defined(_M_X64))
+#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+#endif
+#endif
+
+#endif  // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_

+ 331 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/invoke_test.cc

@@ -0,0 +1,331 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/internal/invoke.h"
+
+#include <functional>
+#include <memory>
+#include <string>
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/str_cat.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+namespace {
+
+int Function(int a, int b) { return a - b; }
+
+void VoidFunction(int& a, int& b) {
+  a += b;
+  b = a - b;
+  a -= b;
+}
+
+int ZeroArgFunction() { return -1937; }
+
+int Sink(std::unique_ptr<int> p) {
+  return *p;
+}
+
+std::unique_ptr<int> Factory(int n) {
+  return make_unique<int>(n);
+}
+
+void NoOp() {}
+
+struct ConstFunctor {
+  int operator()(int a, int b) const { return a - b; }
+};
+
+struct MutableFunctor {
+  int operator()(int a, int b) { return a - b; }
+};
+
+struct EphemeralFunctor {
+  int operator()(int a, int b) && { return a - b; }
+};
+
+struct OverloadedFunctor {
+  template <typename... Args>
+  std::string operator()(const Args&... args) & {
+    return StrCat("&", args...);
+  }
+  template <typename... Args>
+  std::string operator()(const Args&... args) const& {
+    return StrCat("const&", args...);
+  }
+  template <typename... Args>
+  std::string operator()(const Args&... args) && {
+    return StrCat("&&", args...);
+  }
+};
+
+struct Class {
+  int Method(int a, int b) { return a - b; }
+  int ConstMethod(int a, int b) const { return a - b; }
+  int RefMethod(int a, int b) & { return a - b; }
+  int RefRefMethod(int a, int b) && { return a - b; }
+  int NoExceptMethod(int a, int b) noexcept { return a - b; }
+  int VolatileMethod(int a, int b) volatile { return a - b; }
+
+  int member;
+};
+
+struct FlipFlop {
+  int ConstMethod() const { return member; }
+  FlipFlop operator*() const { return {-member}; }
+
+  int member;
+};
+
+// CallMaybeWithArg(f) resolves either to invoke(f) or invoke(f, 42), depending
+// on which one is valid.
+template <typename F>
+decltype(base_internal::invoke(std::declval<const F&>())) CallMaybeWithArg(
+    const F& f) {
+  return base_internal::invoke(f);
+}
+
+template <typename F>
+decltype(base_internal::invoke(std::declval<const F&>(), 42)) CallMaybeWithArg(
+    const F& f) {
+  return base_internal::invoke(f, 42);
+}
+
+TEST(InvokeTest, Function) {
+  EXPECT_EQ(1, base_internal::invoke(Function, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Function, 3, 2));
+}
+
+TEST(InvokeTest, NonCopyableArgument) {
+  EXPECT_EQ(42, base_internal::invoke(Sink, make_unique<int>(42)));
+}
+
+TEST(InvokeTest, NonCopyableResult) {
+  EXPECT_THAT(base_internal::invoke(Factory, 42), ::testing::Pointee(42));
+}
+
+TEST(InvokeTest, VoidResult) { base_internal::invoke(NoOp); }
+
+TEST(InvokeTest, ConstFunctor) {
+  EXPECT_EQ(1, base_internal::invoke(ConstFunctor(), 3, 2));
+}
+
+TEST(InvokeTest, MutableFunctor) {
+  MutableFunctor f;
+  EXPECT_EQ(1, base_internal::invoke(f, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(MutableFunctor(), 3, 2));
+}
+
+TEST(InvokeTest, EphemeralFunctor) {
+  EphemeralFunctor f;
+  EXPECT_EQ(1, base_internal::invoke(std::move(f), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(EphemeralFunctor(), 3, 2));
+}
+
+TEST(InvokeTest, OverloadedFunctor) {
+  OverloadedFunctor f;
+  const OverloadedFunctor& cf = f;
+
+  EXPECT_EQ("&", base_internal::invoke(f));
+  EXPECT_EQ("& 42", base_internal::invoke(f, " 42"));
+
+  EXPECT_EQ("const&", base_internal::invoke(cf));
+  EXPECT_EQ("const& 42", base_internal::invoke(cf, " 42"));
+
+  EXPECT_EQ("&&", base_internal::invoke(std::move(f)));
+
+  OverloadedFunctor f2;
+  EXPECT_EQ("&& 42", base_internal::invoke(std::move(f2), " 42"));
+}
+
+TEST(InvokeTest, ReferenceWrapper) {
+  ConstFunctor cf;
+  MutableFunctor mf;
+  EXPECT_EQ(1, base_internal::invoke(std::cref(cf), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(std::ref(cf), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(std::ref(mf), 3, 2));
+}
+
+TEST(InvokeTest, MemberFunction) {
+  std::unique_ptr<Class> p(new Class);
+  std::unique_ptr<const Class> cp(new Class);
+  std::unique_ptr<volatile Class> vp(new Class);
+
+  EXPECT_EQ(1, base_internal::invoke(&Class::Method, p, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::Method, p.get(), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::Method, *p, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p.get(), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, *p, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::RefRefMethod, std::move(*p), 3,
+                                     2));  // NOLINT
+  EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p.get(), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, *p, 3, 2));
+
+  EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p.get(), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *p, 3, 2));
+
+  EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp.get(), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *cp, 3, 2));
+
+  EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p.get(), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *p, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp, 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp.get(), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *vp, 3, 2));
+
+  EXPECT_EQ(1,
+            base_internal::invoke(&Class::Method, make_unique<Class>(), 3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, make_unique<Class>(),
+                                     3, 2));
+  EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod,
+                                     make_unique<const Class>(), 3, 2));
+}
+
+TEST(InvokeTest, DataMember) {
+  std::unique_ptr<Class> p(new Class{42});
+  std::unique_ptr<const Class> cp(new Class{42});
+  EXPECT_EQ(42, base_internal::invoke(&Class::member, p));
+  EXPECT_EQ(42, base_internal::invoke(&Class::member, *p));
+  EXPECT_EQ(42, base_internal::invoke(&Class::member, p.get()));
+
+  base_internal::invoke(&Class::member, p) = 42;
+  base_internal::invoke(&Class::member, p.get()) = 42;
+
+  EXPECT_EQ(42, base_internal::invoke(&Class::member, cp));
+  EXPECT_EQ(42, base_internal::invoke(&Class::member, *cp));
+  EXPECT_EQ(42, base_internal::invoke(&Class::member, cp.get()));
+}
+
+TEST(InvokeTest, FlipFlop) {
+  FlipFlop obj = {42};
+  // This call could resolve to (obj.*&FlipFlop::ConstMethod)() or
+  // ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former.
+  EXPECT_EQ(42, base_internal::invoke(&FlipFlop::ConstMethod, obj));
+  EXPECT_EQ(42, base_internal::invoke(&FlipFlop::member, obj));
+}
+
+TEST(InvokeTest, SfinaeFriendly) {
+  CallMaybeWithArg(NoOp);
+  EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42));
+}
+
+TEST(IsInvocableRTest, CallableExactMatch) {
+  static_assert(
+      base_internal::is_invocable_r<int, decltype(Function), int, int>::value,
+      "Should be true for exact match of types on a free function");
+}
+
+TEST(IsInvocableRTest, CallableArgumentConversionMatch) {
+  static_assert(
+      base_internal::is_invocable_r<int, decltype(Function), char, int>::value,
+      "Should be true for convertible argument type");
+}
+
+TEST(IsInvocableRTest, CallableReturnConversionMatch) {
+  static_assert(base_internal::is_invocable_r<double, decltype(Function), int,
+                                              int>::value,
+                "Should be true for convertible return type");
+}
+
+TEST(IsInvocableRTest, CallableReturnVoid) {
+  static_assert(base_internal::is_invocable_r<void, decltype(VoidFunction),
+                                              int&, int&>::value,
+                "Should be true for void expected and actual return types");
+  static_assert(
+      base_internal::is_invocable_r<void, decltype(Function), int, int>::value,
+      "Should be true for void expected and non-void actual return types");
+}
+
+TEST(IsInvocableRTest, CallableRefQualifierMismatch) {
+  static_assert(!base_internal::is_invocable_r<void, decltype(VoidFunction),
+                                               int&, const int&>::value,
+                "Should be false for reference constness mismatch");
+  static_assert(!base_internal::is_invocable_r<void, decltype(VoidFunction),
+                                               int&&, int&>::value,
+                "Should be false for reference value category mismatch");
+}
+
+TEST(IsInvocableRTest, CallableArgumentTypeMismatch) {
+  static_assert(!base_internal::is_invocable_r<int, decltype(Function),
+                                               std::string, int>::value,
+                "Should be false for argument type mismatch");
+}
+
+TEST(IsInvocableRTest, CallableReturnTypeMismatch) {
+  static_assert(!base_internal::is_invocable_r<std::string, decltype(Function),
+                                               int, int>::value,
+                "Should be false for return type mismatch");
+}
+
+TEST(IsInvocableRTest, CallableTooFewArgs) {
+  static_assert(
+      !base_internal::is_invocable_r<int, decltype(Function), int>::value,
+      "Should be false for too few arguments");
+}
+
+TEST(IsInvocableRTest, CallableTooManyArgs) {
+  static_assert(!base_internal::is_invocable_r<int, decltype(Function), int,
+                                               int, int>::value,
+                "Should be false for too many arguments");
+}
+
+TEST(IsInvocableRTest, MemberFunctionAndReference) {
+  static_assert(base_internal::is_invocable_r<int, decltype(&Class::Method),
+                                              Class&, int, int>::value,
+                "Should be true for exact match of types on a member function "
+                "and class reference");
+}
+
+TEST(IsInvocableRTest, MemberFunctionAndPointer) {
+  static_assert(base_internal::is_invocable_r<int, decltype(&Class::Method),
+                                              Class*, int, int>::value,
+                "Should be true for exact match of types on a member function "
+                "and class pointer");
+}
+
+TEST(IsInvocableRTest, DataMemberAndReference) {
+  static_assert(base_internal::is_invocable_r<int, decltype(&Class::member),
+                                              Class&>::value,
+                "Should be true for exact match of types on a data member and "
+                "class reference");
+}
+
+TEST(IsInvocableRTest, DataMemberAndPointer) {
+  static_assert(base_internal::is_invocable_r<int, decltype(&Class::member),
+                                              Class*>::value,
+                "Should be true for exact match of types on a data member and "
+                "class pointer");
+}
+
+TEST(IsInvocableRTest, CallableZeroArgs) {
+  static_assert(
+      base_internal::is_invocable_r<int, decltype(ZeroArgFunction)>::value,
+      "Should be true for exact match for a zero-arg free function");
+}
+
+}  // namespace
+}  // namespace base_internal
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 56 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/log_severity.cc

@@ -0,0 +1,56 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/log_severity.h"
+
+#include <ostream>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) {
+  if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s);
+  return os << "absl::LogSeverity(" << static_cast<int>(s) << ")";
+}
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s) {
+  switch (s) {
+    case absl::LogSeverityAtLeast::kInfo:
+    case absl::LogSeverityAtLeast::kWarning:
+    case absl::LogSeverityAtLeast::kError:
+    case absl::LogSeverityAtLeast::kFatal:
+      return os << ">=" << static_cast<absl::LogSeverity>(s);
+    case absl::LogSeverityAtLeast::kInfinity:
+      return os << "INFINITY";
+  }
+  return os;
+}
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s) {
+  switch (s) {
+    case absl::LogSeverityAtMost::kInfo:
+    case absl::LogSeverityAtMost::kWarning:
+    case absl::LogSeverityAtMost::kError:
+    case absl::LogSeverityAtMost::kFatal:
+      return os << "<=" << static_cast<absl::LogSeverity>(s);
+    case absl::LogSeverityAtMost::kNegativeInfinity:
+      return os << "NEGATIVE_INFINITY";
+  }
+  return os;
+}
+ABSL_NAMESPACE_END
+}  // namespace absl

+ 185 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/log_severity.h

@@ -0,0 +1,185 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_LOG_SEVERITY_H_
+#define ABSL_BASE_LOG_SEVERITY_H_
+
+#include <array>
+#include <ostream>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// absl::LogSeverity
+//
+// Four severity levels are defined. Logging APIs should terminate the program
+// when a message is logged at severity `kFatal`; the other levels have no
+// special semantics.
+//
+// Values other than the four defined levels (e.g. produced by `static_cast`)
+// are valid, but their semantics when passed to a function, macro, or flag
+// depend on the function, macro, or flag. The usual behavior is to normalize
+// such values to a defined severity level, however in some cases values other
+// than the defined levels are useful for comparison.
+//
+// Example:
+//
+//   // Effectively disables all logging:
+//   SetMinLogLevel(static_cast<absl::LogSeverity>(100));
+//
+// Abseil flags may be defined with type `LogSeverity`. Dependency layering
+// constraints require that the `AbslParseFlag()` overload be declared and
+// defined in the flags library itself rather than here. The `AbslUnparseFlag()`
+// overload is defined there as well for consistency.
+//
+// absl::LogSeverity Flag String Representation
+//
+// An `absl::LogSeverity` has a string representation used for parsing
+// command-line flags based on the enumerator name (e.g. `kFatal`) or
+// its unprefixed name (without the `k`) in any case-insensitive form. (E.g.
+// "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an
+// unprefixed string representation in all caps (e.g. "FATAL") or an integer.
+//
+// Additionally, the parser accepts arbitrary integers (as if the type were
+// `int`).
+//
+// Examples:
+//
+//   --my_log_level=kInfo
+//   --my_log_level=INFO
+//   --my_log_level=info
+//   --my_log_level=0
+//
+// `DFATAL` and `kLogDebugFatal` are similarly accepted.
+//
+// Unparsing a flag produces the same result as `absl::LogSeverityName()` for
+// the standard levels and a base-ten integer otherwise.
+enum class LogSeverity : int {
+  kInfo = 0,
+  kWarning = 1,
+  kError = 2,
+  kFatal = 3,
+};
+
+// LogSeverities()
+//
+// Returns an iterable of all standard `absl::LogSeverity` values, ordered from
+// least to most severe.
+constexpr std::array<absl::LogSeverity, 4> LogSeverities() {
+  return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning,
+           absl::LogSeverity::kError, absl::LogSeverity::kFatal}};
+}
+
+// `absl::kLogDebugFatal` equals `absl::LogSeverity::kFatal` in debug builds
+// (i.e. when `NDEBUG` is not defined) and `absl::LogSeverity::kError`
+// otherwise.  Avoid ODR-using this variable as it has internal linkage and thus
+// distinct storage in different TUs.
+#ifdef NDEBUG
+static constexpr absl::LogSeverity kLogDebugFatal = absl::LogSeverity::kError;
+#else
+static constexpr absl::LogSeverity kLogDebugFatal = absl::LogSeverity::kFatal;
+#endif
+
+// LogSeverityName()
+//
+// Returns the all-caps string representation (e.g. "INFO") of the specified
+// severity level if it is one of the standard levels and "UNKNOWN" otherwise.
+constexpr const char* LogSeverityName(absl::LogSeverity s) {
+  switch (s) {
+    case absl::LogSeverity::kInfo: return "INFO";
+    case absl::LogSeverity::kWarning: return "WARNING";
+    case absl::LogSeverity::kError: return "ERROR";
+    case absl::LogSeverity::kFatal: return "FATAL";
+  }
+  return "UNKNOWN";
+}
+
+// NormalizeLogSeverity()
+//
+// Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal`
+// normalize to `kError` (**NOT** `kFatal`).
+constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) {
+  absl::LogSeverity n = s;
+  if (n < absl::LogSeverity::kInfo) n = absl::LogSeverity::kInfo;
+  if (n > absl::LogSeverity::kFatal) n = absl::LogSeverity::kError;
+  return n;
+}
+constexpr absl::LogSeverity NormalizeLogSeverity(int s) {
+  return absl::NormalizeLogSeverity(static_cast<absl::LogSeverity>(s));
+}
+
+// operator<<
+//
+// The exact representation of a streamed `absl::LogSeverity` is deliberately
+// unspecified; do not rely on it.
+std::ostream& operator<<(std::ostream& os, absl::LogSeverity s);
+
+// Enums representing a lower bound for LogSeverity. APIs that only operate on
+// messages of at least a certain level (for example, `SetMinLogLevel()`) use
+// this type to specify that level. absl::LogSeverityAtLeast::kInfinity is
+// a level above all threshold levels and therefore no log message will
+// ever meet this threshold.
+enum class LogSeverityAtLeast : int {
+  kInfo = static_cast<int>(absl::LogSeverity::kInfo),
+  kWarning = static_cast<int>(absl::LogSeverity::kWarning),
+  kError = static_cast<int>(absl::LogSeverity::kError),
+  kFatal = static_cast<int>(absl::LogSeverity::kFatal),
+  kInfinity = 1000,
+};
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s);
+
+// Enums representing an upper bound for LogSeverity. APIs that only operate on
+// messages of at most a certain level (for example, buffer all messages at or
+// below a certain level) use this type to specify that level.
+// absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold
+// levels and therefore will exclude all log messages.
+enum class LogSeverityAtMost : int {
+  kNegativeInfinity = -1000,
+  kInfo = static_cast<int>(absl::LogSeverity::kInfo),
+  kWarning = static_cast<int>(absl::LogSeverity::kWarning),
+  kError = static_cast<int>(absl::LogSeverity::kError),
+  kFatal = static_cast<int>(absl::LogSeverity::kFatal),
+};
+
+std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s);
+
+#define COMPOP(op1, op2, T)                                         \
+  constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \
+    return static_cast<absl::LogSeverity>(lhs) op1 rhs;             \
+  }                                                                 \
+  constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \
+    return lhs op2 static_cast<absl::LogSeverity>(rhs);             \
+  }
+
+// Comparisons between `LogSeverity` and `LogSeverityAtLeast`/
+// `LogSeverityAtMost` are only supported in one direction.
+// Valid checks are:
+//   LogSeverity >= LogSeverityAtLeast
+//   LogSeverity < LogSeverityAtLeast
+//   LogSeverity <= LogSeverityAtMost
+//   LogSeverity > LogSeverityAtMost
+COMPOP(>, <, LogSeverityAtLeast)
+COMPOP(<=, >=, LogSeverityAtLeast)
+COMPOP(<, >, LogSeverityAtMost)
+COMPOP(>=, <=, LogSeverityAtMost)
+#undef COMPOP
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_LOG_SEVERITY_H_

+ 251 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/log_severity_test.cc

@@ -0,0 +1,251 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/log_severity.h"
+
+#include <cstdint>
+#include <ios>
+#include <limits>
+#include <ostream>
+#include <sstream>
+#include <string>
+#include <tuple>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/flags/internal/flag.h"
+#include "absl/flags/marshalling.h"
+#include "absl/strings/str_cat.h"
+
+namespace {
+using ::testing::Eq;
+using ::testing::IsFalse;
+using ::testing::IsTrue;
+using ::testing::TestWithParam;
+using ::testing::Values;
+
+template <typename T>
+std::string StreamHelper(T value) {
+  std::ostringstream stream;
+  stream << value;
+  return stream.str();
+}
+
+TEST(StreamTest, Works) {
+  EXPECT_THAT(StreamHelper(static_cast<absl::LogSeverity>(-100)),
+              Eq("absl::LogSeverity(-100)"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverity::kInfo), Eq("INFO"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverity::kWarning), Eq("WARNING"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverity::kError), Eq("ERROR"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverity::kFatal), Eq("FATAL"));
+  EXPECT_THAT(StreamHelper(static_cast<absl::LogSeverity>(4)),
+              Eq("absl::LogSeverity(4)"));
+}
+
+static_assert(absl::flags_internal::FlagUseValueAndInitBitStorage<
+                  absl::LogSeverity>::value,
+              "Flags of type absl::LogSeverity ought to be lock-free.");
+
+using ParseFlagFromOutOfRangeIntegerTest = TestWithParam<int64_t>;
+INSTANTIATE_TEST_SUITE_P(
+    Instantiation, ParseFlagFromOutOfRangeIntegerTest,
+    Values(static_cast<int64_t>(std::numeric_limits<int>::min()) - 1,
+           static_cast<int64_t>(std::numeric_limits<int>::max()) + 1));
+TEST_P(ParseFlagFromOutOfRangeIntegerTest, ReturnsError) {
+  const std::string to_parse = absl::StrCat(GetParam());
+  absl::LogSeverity value;
+  std::string error;
+  EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value;
+}
+
+using ParseFlagFromAlmostOutOfRangeIntegerTest = TestWithParam<int>;
+INSTANTIATE_TEST_SUITE_P(Instantiation,
+                         ParseFlagFromAlmostOutOfRangeIntegerTest,
+                         Values(std::numeric_limits<int>::min(),
+                                std::numeric_limits<int>::max()));
+TEST_P(ParseFlagFromAlmostOutOfRangeIntegerTest, YieldsExpectedValue) {
+  const auto expected = static_cast<absl::LogSeverity>(GetParam());
+  const std::string to_parse = absl::StrCat(GetParam());
+  absl::LogSeverity value;
+  std::string error;
+  ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error;
+  EXPECT_THAT(value, Eq(expected));
+}
+
+using ParseFlagFromIntegerMatchingEnumeratorTest =
+    TestWithParam<std::tuple<absl::string_view, absl::LogSeverity>>;
+INSTANTIATE_TEST_SUITE_P(
+    Instantiation, ParseFlagFromIntegerMatchingEnumeratorTest,
+    Values(std::make_tuple("0", absl::LogSeverity::kInfo),
+           std::make_tuple(" 0", absl::LogSeverity::kInfo),
+           std::make_tuple("-0", absl::LogSeverity::kInfo),
+           std::make_tuple("+0", absl::LogSeverity::kInfo),
+           std::make_tuple("00", absl::LogSeverity::kInfo),
+           std::make_tuple("0 ", absl::LogSeverity::kInfo),
+           std::make_tuple("0x0", absl::LogSeverity::kInfo),
+           std::make_tuple("1", absl::LogSeverity::kWarning),
+           std::make_tuple("+1", absl::LogSeverity::kWarning),
+           std::make_tuple("2", absl::LogSeverity::kError),
+           std::make_tuple("3", absl::LogSeverity::kFatal)));
+TEST_P(ParseFlagFromIntegerMatchingEnumeratorTest, YieldsExpectedValue) {
+  const absl::string_view to_parse = std::get<0>(GetParam());
+  const absl::LogSeverity expected = std::get<1>(GetParam());
+  absl::LogSeverity value;
+  std::string error;
+  ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error;
+  EXPECT_THAT(value, Eq(expected));
+}
+
+using ParseFlagFromOtherIntegerTest =
+    TestWithParam<std::tuple<absl::string_view, int>>;
+INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromOtherIntegerTest,
+                         Values(std::make_tuple("-1", -1),
+                                std::make_tuple("4", 4),
+                                std::make_tuple("010", 10),
+                                std::make_tuple("0x10", 16)));
+TEST_P(ParseFlagFromOtherIntegerTest, YieldsExpectedValue) {
+  const absl::string_view to_parse = std::get<0>(GetParam());
+  const auto expected = static_cast<absl::LogSeverity>(std::get<1>(GetParam()));
+  absl::LogSeverity value;
+  std::string error;
+  ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error;
+  EXPECT_THAT(value, Eq(expected));
+}
+
+using ParseFlagFromEnumeratorTest =
+    TestWithParam<std::tuple<absl::string_view, absl::LogSeverity>>;
+INSTANTIATE_TEST_SUITE_P(
+    Instantiation, ParseFlagFromEnumeratorTest,
+    Values(std::make_tuple("INFO", absl::LogSeverity::kInfo),
+           std::make_tuple("info", absl::LogSeverity::kInfo),
+           std::make_tuple("kInfo", absl::LogSeverity::kInfo),
+           std::make_tuple("iNfO", absl::LogSeverity::kInfo),
+           std::make_tuple("kInFo", absl::LogSeverity::kInfo),
+           std::make_tuple("WARNING", absl::LogSeverity::kWarning),
+           std::make_tuple("warning", absl::LogSeverity::kWarning),
+           std::make_tuple("kWarning", absl::LogSeverity::kWarning),
+           std::make_tuple("WaRnInG", absl::LogSeverity::kWarning),
+           std::make_tuple("KwArNiNg", absl::LogSeverity::kWarning),
+           std::make_tuple("ERROR", absl::LogSeverity::kError),
+           std::make_tuple("error", absl::LogSeverity::kError),
+           std::make_tuple("kError", absl::LogSeverity::kError),
+           std::make_tuple("eRrOr", absl::LogSeverity::kError),
+           std::make_tuple("kErRoR", absl::LogSeverity::kError),
+           std::make_tuple("FATAL", absl::LogSeverity::kFatal),
+           std::make_tuple("fatal", absl::LogSeverity::kFatal),
+           std::make_tuple("kFatal", absl::LogSeverity::kFatal),
+           std::make_tuple("FaTaL", absl::LogSeverity::kFatal),
+           std::make_tuple("KfAtAl", absl::LogSeverity::kFatal),
+           std::make_tuple("DFATAL", absl::kLogDebugFatal),
+           std::make_tuple("dfatal", absl::kLogDebugFatal),
+           std::make_tuple("kLogDebugFatal", absl::kLogDebugFatal),
+           std::make_tuple("dFaTaL", absl::kLogDebugFatal),
+           std::make_tuple("kLoGdEbUgFaTaL", absl::kLogDebugFatal)));
+TEST_P(ParseFlagFromEnumeratorTest, YieldsExpectedValue) {
+  const absl::string_view to_parse = std::get<0>(GetParam());
+  const absl::LogSeverity expected = std::get<1>(GetParam());
+  absl::LogSeverity value;
+  std::string error;
+  ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error;
+  EXPECT_THAT(value, Eq(expected));
+}
+
+using ParseFlagFromGarbageTest = TestWithParam<absl::string_view>;
+INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromGarbageTest,
+                         Values("", "\0", " ", "garbage", "kkinfo", "I",
+                                "kDFATAL", "LogDebugFatal", "lOgDeBuGfAtAl"));
+TEST_P(ParseFlagFromGarbageTest, ReturnsError) {
+  const absl::string_view to_parse = GetParam();
+  absl::LogSeverity value;
+  std::string error;
+  EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value;
+}
+
+using UnparseFlagToEnumeratorTest =
+    TestWithParam<std::tuple<absl::LogSeverity, absl::string_view>>;
+INSTANTIATE_TEST_SUITE_P(
+    Instantiation, UnparseFlagToEnumeratorTest,
+    Values(std::make_tuple(absl::LogSeverity::kInfo, "INFO"),
+           std::make_tuple(absl::LogSeverity::kWarning, "WARNING"),
+           std::make_tuple(absl::LogSeverity::kError, "ERROR"),
+           std::make_tuple(absl::LogSeverity::kFatal, "FATAL")));
+TEST_P(UnparseFlagToEnumeratorTest, ReturnsExpectedValueAndRoundTrips) {
+  const absl::LogSeverity to_unparse = std::get<0>(GetParam());
+  const absl::string_view expected = std::get<1>(GetParam());
+  const std::string stringified_value = absl::UnparseFlag(to_unparse);
+  EXPECT_THAT(stringified_value, Eq(expected));
+  absl::LogSeverity reparsed_value;
+  std::string error;
+  EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error),
+              IsTrue());
+  EXPECT_THAT(reparsed_value, Eq(to_unparse));
+}
+
+using UnparseFlagToOtherIntegerTest = TestWithParam<int>;
+INSTANTIATE_TEST_SUITE_P(Instantiation, UnparseFlagToOtherIntegerTest,
+                         Values(std::numeric_limits<int>::min(), -1, 4,
+                                std::numeric_limits<int>::max()));
+TEST_P(UnparseFlagToOtherIntegerTest, ReturnsExpectedValueAndRoundTrips) {
+  const absl::LogSeverity to_unparse =
+      static_cast<absl::LogSeverity>(GetParam());
+  const std::string expected = absl::StrCat(GetParam());
+  const std::string stringified_value = absl::UnparseFlag(to_unparse);
+  EXPECT_THAT(stringified_value, Eq(expected));
+  absl::LogSeverity reparsed_value;
+  std::string error;
+  EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error),
+              IsTrue());
+  EXPECT_THAT(reparsed_value, Eq(to_unparse));
+}
+
+TEST(LogThresholdTest, LogSeverityAtLeastTest) {
+  EXPECT_LT(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kFatal);
+  EXPECT_GT(absl::LogSeverityAtLeast::kError, absl::LogSeverity::kInfo);
+
+  EXPECT_LE(absl::LogSeverityAtLeast::kInfo, absl::LogSeverity::kError);
+  EXPECT_GE(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kInfo);
+}
+
+TEST(LogThresholdTest, LogSeverityAtMostTest) {
+  EXPECT_GT(absl::LogSeverity::kError, absl::LogSeverityAtMost::kWarning);
+  EXPECT_LT(absl::LogSeverityAtMost::kError, absl::LogSeverity::kFatal);
+
+  EXPECT_GE(absl::LogSeverityAtMost::kFatal, absl::LogSeverity::kError);
+  EXPECT_LE(absl::LogSeverity::kWarning, absl::LogSeverityAtMost::kError);
+}
+
+TEST(LogThresholdTest, Extremes) {
+  EXPECT_LT(absl::LogSeverity::kFatal, absl::LogSeverityAtLeast::kInfinity);
+  EXPECT_GT(absl::LogSeverity::kInfo,
+            absl::LogSeverityAtMost::kNegativeInfinity);
+}
+
+TEST(LogThresholdTest, Output) {
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfo), Eq(">=INFO"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kWarning),
+              Eq(">=WARNING"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kError), Eq(">=ERROR"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kFatal), Eq(">=FATAL"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfinity),
+              Eq("INFINITY"));
+
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kInfo), Eq("<=INFO"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kWarning), Eq("<=WARNING"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kError), Eq("<=ERROR"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kFatal), Eq("<=FATAL"));
+  EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kNegativeInfinity),
+              Eq("NEGATIVE_INFINITY"));
+}
+
+}  // namespace

+ 189 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/macros.h

@@ -0,0 +1,189 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: macros.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the set of language macros used within Abseil code.
+// For the set of macros used to determine supported compilers and platforms,
+// see absl/base/config.h instead.
+//
+// This code is compiled directly on many platforms, including client
+// platforms like Windows, Mac, and embedded systems.  Before making
+// any changes here, make sure that you're not breaking any platforms.
+
+#ifndef ABSL_BASE_MACROS_H_
+#define ABSL_BASE_MACROS_H_
+
+#include <cassert>
+#include <cstddef>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+
+// ABSL_ARRAYSIZE()
+//
+// Returns the number of elements in an array as a compile-time constant, which
+// can be used in defining new arrays. If you use this macro on a pointer by
+// mistake, you will get a compile-time error.
+#define ABSL_ARRAYSIZE(array) \
+  (sizeof(::absl::macros_internal::ArraySizeHelper(array)))
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace macros_internal {
+// Note: this internal template function declaration is used by ABSL_ARRAYSIZE.
+// The function doesn't need a definition, as we only use its type.
+template <typename T, size_t N>
+auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N];
+}  // namespace macros_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+// ABSL_BAD_CALL_IF()
+//
+// Used on a function overload to trap bad calls: any call that matches the
+// overload will cause a compile-time error. This macro uses a clang-specific
+// "enable_if" attribute, as described at
+// https://clang.llvm.org/docs/AttributeReference.html#enable-if
+//
+// Overloads which use this macro should be bracketed by
+// `#ifdef ABSL_BAD_CALL_IF`.
+//
+// Example:
+//
+//   int isdigit(int c);
+//   #ifdef ABSL_BAD_CALL_IF
+//   int isdigit(int c)
+//     ABSL_BAD_CALL_IF(c <= -1 || c > 255,
+//                       "'c' must have the value of an unsigned char or EOF");
+//   #endif // ABSL_BAD_CALL_IF
+#if ABSL_HAVE_ATTRIBUTE(enable_if)
+#define ABSL_BAD_CALL_IF(expr, msg) \
+  __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg)))
+#endif
+
+// ABSL_ASSERT()
+//
+// In C++11, `assert` can't be used portably within constexpr functions.
+// ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr
+// functions.  Example:
+//
+// constexpr double Divide(double a, double b) {
+//   return ABSL_ASSERT(b != 0), a / b;
+// }
+//
+// This macro is inspired by
+// https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/
+#if defined(NDEBUG)
+#define ABSL_ASSERT(expr) \
+  (false ? static_cast<void>(expr) : static_cast<void>(0))
+#else
+#define ABSL_ASSERT(expr)                           \
+  (ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) \
+                             : [] { assert(false && #expr); }())  // NOLINT
+#endif
+
+// `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()`
+// aborts the program in release mode (when NDEBUG is defined). The
+// implementation should abort the program as quickly as possible and ideally it
+// should not be possible to ignore the abort request.
+#define ABSL_INTERNAL_HARDENING_ABORT()   \
+  do {                                    \
+    ABSL_INTERNAL_IMMEDIATE_ABORT_IMPL(); \
+    ABSL_INTERNAL_UNREACHABLE_IMPL();     \
+  } while (false)
+
+// ABSL_HARDENING_ASSERT()
+//
+// `ABSL_HARDENING_ASSERT()` is like `ABSL_ASSERT()`, but used to implement
+// runtime assertions that should be enabled in hardened builds even when
+// `NDEBUG` is defined.
+//
+// When `NDEBUG` is not defined, `ABSL_HARDENING_ASSERT()` is identical to
+// `ABSL_ASSERT()`.
+//
+// See `ABSL_OPTION_HARDENED` in `absl/base/options.h` for more information on
+// hardened mode.
+#if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG)
+#define ABSL_HARDENING_ASSERT(expr)                 \
+  (ABSL_PREDICT_TRUE((expr)) ? static_cast<void>(0) \
+                             : [] { ABSL_INTERNAL_HARDENING_ABORT(); }())
+#else
+#define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr)
+#endif
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+#define ABSL_INTERNAL_TRY try
+#define ABSL_INTERNAL_CATCH_ANY catch (...)
+#define ABSL_INTERNAL_RETHROW do { throw; } while (false)
+#else  // ABSL_HAVE_EXCEPTIONS
+#define ABSL_INTERNAL_TRY if (true)
+#define ABSL_INTERNAL_CATCH_ANY else if (false)
+#define ABSL_INTERNAL_RETHROW do {} while (false)
+#endif  // ABSL_HAVE_EXCEPTIONS
+
+// ABSL_DEPRECATE_AND_INLINE()
+//
+// Marks a function or type alias as deprecated and tags it to be picked up for
+// automated refactoring by go/cpp-inliner. It can added to inline function
+// definitions or type aliases. It should only be used within a header file. It
+// differs from `ABSL_DEPRECATED` in the following ways:
+//
+// 1. New uses of the function or type will be discouraged via Tricorder
+//    warnings.
+// 2. If enabled via `METADATA`, automated changes will be sent out inlining the
+//    functions's body or replacing the type where it is used.
+//
+// For example:
+//
+// ABSL_DEPRECATE_AND_INLINE() inline int OldFunc(int x) {
+//   return NewFunc(x, 0);
+// }
+//
+// will mark `OldFunc` as deprecated, and the go/cpp-inliner service will
+// replace calls to `OldFunc(x)` with calls to `NewFunc(x, 0)`. Once all calls
+// to `OldFunc` have been replaced, `OldFunc` can be deleted.
+//
+// See go/cpp-inliner for more information.
+//
+// Note: go/cpp-inliner is Google-internal service for automated refactoring.
+// While open-source users do not have access to this service, the macro is
+// provided for compatibility, and so that users receive deprecation warnings.
+#if ABSL_HAVE_CPP_ATTRIBUTE(deprecated) && \
+    ABSL_HAVE_CPP_ATTRIBUTE(clang::annotate)
+#define ABSL_DEPRECATE_AND_INLINE() [[deprecated, clang::annotate("inline-me")]]
+#elif ABSL_HAVE_CPP_ATTRIBUTE(deprecated)
+#define ABSL_DEPRECATE_AND_INLINE() [[deprecated]]
+#else
+#define ABSL_DEPRECATE_AND_INLINE()
+#endif
+
+// Requires the compiler to prove that the size of the given object is at least
+// the expected amount.
+#if ABSL_HAVE_ATTRIBUTE(diagnose_if) && ABSL_HAVE_BUILTIN(__builtin_object_size)
+#define ABSL_INTERNAL_NEED_MIN_SIZE(Obj, N)                     \
+  __attribute__((diagnose_if(__builtin_object_size(Obj, 0) < N, \
+                             "object size provably too small "  \
+                             "(this would corrupt memory)",     \
+                             "error")))
+#else
+#define ABSL_INTERNAL_NEED_MIN_SIZE(Obj, N)
+#endif
+
+#endif  // ABSL_BASE_MACROS_H_

+ 218 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/no_destructor.h

@@ -0,0 +1,218 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: no_destructor.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the absl::NoDestructor<T> wrapper for defining a
+// static type that does not need to be destructed upon program exit. Instead,
+// such an object survives during program exit (and can be safely accessed at
+// any time).
+//
+// Objects of such type, if constructed safely and under the right conditions,
+// provide two main benefits over other alternatives:
+//
+//   * Global objects not normally allowed due to concerns of destruction order
+//     (i.e. no "complex globals") can be safely allowed, provided that such
+//     objects can be constant initialized.
+//   * Function scope static objects can be optimized to avoid heap allocation,
+//     pointer chasing, and allow lazy construction.
+//
+// See below for complete details.
+
+
+#ifndef ABSL_BASE_NO_DESTRUCTOR_H_
+#define ABSL_BASE_NO_DESTRUCTOR_H_
+
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+#include "absl/base/nullability.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// absl::NoDestructor<T>
+//
+// NoDestructor<T> is a wrapper around an object of type T that behaves as an
+// object of type T but never calls T's destructor. NoDestructor<T> makes it
+// safer and/or more efficient to use such objects in static storage contexts:
+// as global or function scope static variables.
+//
+// An instance of absl::NoDestructor<T> has similar type semantics to an
+// instance of T:
+//
+// * Constructs in the same manner as an object of type T through perfect
+//   forwarding.
+// * Provides pointer/reference semantic access to the object of type T via
+//   `->`, `*`, and `get()`.
+//   (Note that `const NoDestructor<T>` works like a pointer to const `T`.)
+//
+// An object of type NoDestructor<T> should be defined in static storage:
+// as either a global static object, or as a function scope static variable.
+//
+// Additionally, NoDestructor<T> provides the following benefits:
+//
+// * Never calls T's destructor for the object
+// * If the object is a function-local static variable, the type can be
+//   lazily constructed.
+//
+// An object of type NoDestructor<T> is "trivially destructible" in the notion
+// that its destructor is never run. Provided that an object of this type can be
+// safely initialized and does not need to be cleaned up on program shutdown,
+// NoDestructor<T> allows you to define global static variables, since Google's
+// C++ style guide ban on such objects doesn't apply to objects that are
+// trivially destructible.
+//
+// Usage as Global Static Variables
+//
+// NoDestructor<T> allows declaration of a global object with a non-trivial
+// constructor in static storage without needing to add a destructor.
+// However, such objects still need to worry about initialization order, so
+// such objects should be const initialized:
+//
+//    // Global or namespace scope.
+//    constinit absl::NoDestructor<MyRegistry> reg{"foo", "bar", 8008};
+//
+// Note that if your object already has a trivial destructor, you don't need to
+// use NoDestructor<T>.
+//
+// Usage as Function Scope Static Variables
+//
+// Function static objects will be lazily initialized within static storage:
+//
+//    // Function scope.
+//    const std::string& MyString() {
+//      static const absl::NoDestructor<std::string> x("foo");
+//      return *x;
+//    }
+//
+// For function static variables, NoDestructor avoids heap allocation and can be
+// inlined in static storage, resulting in exactly-once, thread-safe
+// construction of an object, and very fast access thereafter (the cost is a few
+// extra cycles).
+//
+// Using NoDestructor<T> in this manner is generally better than other patterns
+// which require pointer chasing:
+//
+//   // Prefer using absl::NoDestructor<T> instead for the static variable.
+//   const std::string& MyString() {
+//     static const std::string* x = new std::string("foo");
+//     return *x;
+//   }
+//
+template <typename T>
+class NoDestructor {
+ public:
+  // Forwards arguments to the T's constructor: calls T(args...).
+  template <typename... Ts,
+            // Disable this overload when it might collide with copy/move.
+            typename std::enable_if<!std::is_same<void(std::decay_t<Ts>&...),
+                                                  void(NoDestructor&)>::value,
+                                    int>::type = 0>
+  explicit constexpr NoDestructor(Ts&&... args)
+      : impl_(std::forward<Ts>(args)...) {}
+
+  // Forwards copy and move construction for T. Enables usage like this:
+  //   static NoDestructor<std::array<string, 3>> x{{{"1", "2", "3"}}};
+  //   static NoDestructor<std::vector<int>> x{{1, 2, 3}};
+  explicit constexpr NoDestructor(const T& x) : impl_(x) {}
+  explicit constexpr NoDestructor(T&& x)
+      : impl_(std::move(x)) {}
+
+  // No copying.
+  NoDestructor(const NoDestructor&) = delete;
+  NoDestructor& operator=(const NoDestructor&) = delete;
+
+  // Pretend to be a smart pointer to T with deep constness.
+  // Never returns a null pointer.
+  T& operator*() { return *get(); }
+  absl::Nonnull<T*> operator->() { return get(); }
+  absl::Nonnull<T*> get() { return impl_.get(); }
+  const T& operator*() const { return *get(); }
+  absl::Nonnull<const T*> operator->() const { return get(); }
+  absl::Nonnull<const T*> get() const { return impl_.get(); }
+
+ private:
+  class DirectImpl {
+   public:
+    template <typename... Args>
+    explicit constexpr DirectImpl(Args&&... args)
+        : value_(std::forward<Args>(args)...) {}
+    absl::Nonnull<const T*> get() const { return &value_; }
+    absl::Nonnull<T*> get() { return &value_; }
+
+   private:
+    T value_;
+  };
+
+  class PlacementImpl {
+   public:
+    template <typename... Args>
+    explicit PlacementImpl(Args&&... args) {
+      new (&space_) T(std::forward<Args>(args)...);
+    }
+    absl::Nonnull<const T*> get() const {
+      return Launder(reinterpret_cast<const T*>(&space_));
+    }
+    absl::Nonnull<T*> get() { return Launder(reinterpret_cast<T*>(&space_)); }
+
+   private:
+    template <typename P>
+    static absl::Nonnull<P*> Launder(absl::Nonnull<P*> p) {
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L
+      return std::launder(p);
+#elif ABSL_HAVE_BUILTIN(__builtin_launder)
+      return __builtin_launder(p);
+#else
+      // When `std::launder` or equivalent are not available, we rely on
+      // undefined behavior, which works as intended on Abseil's officially
+      // supported platforms as of Q3 2023.
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#endif
+      return p;
+#if defined(__GNUC__) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+#endif
+    }
+
+    alignas(T) unsigned char space_[sizeof(T)];
+  };
+
+  // If the object is trivially destructible we use a member directly to avoid
+  // potential once-init runtime initialization. It somewhat defeats the
+  // purpose of NoDestructor in this case, but this makes the class more
+  // friendly to generic code.
+  std::conditional_t<std::is_trivially_destructible<T>::value, DirectImpl,
+                     PlacementImpl>
+      impl_;
+};
+
+#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+// Provide 'Class Template Argument Deduction': the type of NoDestructor's T
+// will be the same type as the argument passed to NoDestructor's constructor.
+template <typename T>
+NoDestructor(T) -> NoDestructor<T>;
+#endif  // ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_BASE_NO_DESTRUCTOR_H_

+ 165 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/no_destructor_benchmark.cc

@@ -0,0 +1,165 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstdint>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/no_destructor.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+
+// Number of static-NoDestructor-in-a-function to exercise.
+// This must be low enough not to hit template instantiation limits
+// (happens around 1000).
+constexpr int kNumObjects = 1;  // set to 512 when doing benchmarks
+                                // 1 is faster to compile: just one templated
+                                // function instantiation
+
+// Size of individual objects to benchmark static-NoDestructor-in-a-function
+// usage with.
+constexpr int kObjSize = sizeof(void*)*1;
+
+// Simple object of kObjSize bytes (rounded to int).
+// We benchmark complete reading of its state via Verify().
+class BM_Blob {
+ public:
+  BM_Blob(int val) { for (auto& d : data_) d = val; }
+  BM_Blob() : BM_Blob(-1) {}
+  void Verify(int val) const {  // val must be the c-tor argument
+    for (auto& d : data_) ABSL_INTERNAL_CHECK(d == val, "");
+  }
+ private:
+  int data_[kObjSize / sizeof(int) > 0 ? kObjSize / sizeof(int) : 1];
+};
+
+// static-NoDestructor-in-a-function pattern instances.
+// We'll instantiate kNumObjects of them.
+template<int i>
+const BM_Blob& NoDestrBlobFunc() {
+  static absl::NoDestructor<BM_Blob> x(i);
+  return *x;
+}
+
+// static-heap-ptr-in-a-function pattern instances
+// We'll instantiate kNumObjects of them.
+template<int i>
+const BM_Blob& OnHeapBlobFunc() {
+  static BM_Blob* x = new BM_Blob(i);
+  return *x;
+}
+
+// Type for NoDestrBlobFunc or OnHeapBlobFunc.
+typedef const BM_Blob& (*FuncType)();
+
+// ========================================================================= //
+// Simple benchmarks that read a single BM_Blob over and over, hence
+// all they touch fits into L1 CPU cache:
+
+// Direct non-POD global variable (style guide violation) as a baseline.
+static BM_Blob direct_blob(0);
+
+void BM_Direct(benchmark::State& state) {
+  for (auto s : state) {
+    direct_blob.Verify(0);
+  }
+}
+BENCHMARK(BM_Direct);
+
+void BM_NoDestr(benchmark::State& state) {
+  for (auto s : state) {
+    NoDestrBlobFunc<0>().Verify(0);
+  }
+}
+BENCHMARK(BM_NoDestr);
+
+void BM_OnHeap(benchmark::State& state) {
+  for (auto s : state) {
+    OnHeapBlobFunc<0>().Verify(0);
+  }
+}
+BENCHMARK(BM_OnHeap);
+
+// ========================================================================= //
+// Benchmarks that read kNumObjects of BM_Blob over and over, hence with
+// appropriate values of sizeof(BM_Blob) and kNumObjects their working set
+// can exceed a given layer of CPU cache.
+
+// Type of benchmark to select between NoDestrBlobFunc and OnHeapBlobFunc.
+enum BM_Type { kNoDestr, kOnHeap, kDirect };
+
+// BlobFunc<n>(t, i) returns the i-th function of type t.
+// n must be larger than i (we'll use kNumObjects for n).
+template<int n>
+FuncType BlobFunc(BM_Type t, int i) {
+  if (i == n) {
+    switch (t) {
+      case kNoDestr:  return &NoDestrBlobFunc<n>;
+      case kOnHeap:   return &OnHeapBlobFunc<n>;
+      case kDirect:   return nullptr;
+    }
+  }
+  return BlobFunc<n-1>(t, i);
+}
+
+template<>
+FuncType BlobFunc<0>(BM_Type t, int i) {
+  ABSL_INTERNAL_CHECK(i == 0, "");
+  switch (t) {
+    case kNoDestr:  return &NoDestrBlobFunc<0>;
+    case kOnHeap:   return &OnHeapBlobFunc<0>;
+    case kDirect:   return nullptr;
+  }
+  return nullptr;
+}
+
+// Direct non-POD global variables (style guide violation) as a baseline.
+static BM_Blob direct_blobs[kNumObjects];
+
+// Helper that cheaply maps benchmark iteration to randomish index in
+// [0, kNumObjects).
+int RandIdx(int i) {
+  // int64 is to avoid overflow and generating negative return values:
+  return (static_cast<int64_t>(i) * 13) % kNumObjects;
+}
+
+// Generic benchmark working with kNumObjects for any of the possible BM_Type.
+template <BM_Type t>
+void BM_Many(benchmark::State& state) {
+  FuncType funcs[kNumObjects];
+  for (int i = 0; i < kNumObjects; ++i) {
+    funcs[i] = BlobFunc<kNumObjects-1>(t, i);
+  }
+  if (t == kDirect) {
+    for (auto s : state) {
+      int idx = RandIdx(state.iterations());
+      direct_blobs[idx].Verify(-1);
+    }
+  } else {
+    for (auto s : state) {
+      int idx = RandIdx(state.iterations());
+      funcs[idx]().Verify(idx);
+    }
+  }
+}
+
+void BM_DirectMany(benchmark::State& state) { BM_Many<kDirect>(state); }
+void BM_NoDestrMany(benchmark::State& state) { BM_Many<kNoDestr>(state); }
+void BM_OnHeapMany(benchmark::State& state) { BM_Many<kOnHeap>(state); }
+
+BENCHMARK(BM_DirectMany);
+BENCHMARK(BM_NoDestrMany);
+BENCHMARK(BM_OnHeapMany);
+
+}  // namespace

+ 209 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/no_destructor_test.cc

@@ -0,0 +1,209 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/no_destructor.h"
+
+#include <array>
+#include <initializer_list>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/raw_logging.h"
+
+namespace {
+
+struct Blob {
+  Blob() : val(42) {}
+  Blob(int x, int y) : val(x + y) {}
+  Blob(std::initializer_list<int> xs) {
+    val = 0;
+    for (auto& x : xs) val += x;
+  }
+
+  Blob(const Blob& /*b*/) = delete;
+  Blob(Blob&& b) noexcept : val(b.val) {
+    b.moved_out = true;
+  }  // moving is fine
+
+  // no crash: NoDestructor indeed does not destruct (the moved-out Blob
+  // temporaries do get destroyed though)
+  ~Blob() { ABSL_INTERNAL_CHECK(moved_out, "~Blob"); }
+
+  int val;
+  bool moved_out = false;
+};
+
+struct TypeWithDeletedDestructor {
+  ~TypeWithDeletedDestructor() = delete;
+};
+
+TEST(NoDestructorTest, DestructorNeverCalled) {
+  absl::NoDestructor<TypeWithDeletedDestructor> a;
+  (void)a;
+}
+
+TEST(NoDestructorTest, Noncopyable) {
+  using T = absl::NoDestructor<int>;
+
+  EXPECT_FALSE((std::is_constructible<T, T>::value));
+  EXPECT_FALSE((std::is_constructible<T, const T>::value));
+  EXPECT_FALSE((std::is_constructible<T, T&>::value));
+  EXPECT_FALSE((std::is_constructible<T, const T&>::value));
+
+  EXPECT_FALSE((std::is_assignable<T&, T>::value));
+  EXPECT_FALSE((std::is_assignable<T&, const T>::value));
+  EXPECT_FALSE((std::is_assignable<T&, T&>::value));
+  EXPECT_FALSE((std::is_assignable<T&, const T&>::value));
+}
+
+TEST(NoDestructorTest, Interface) {
+  EXPECT_TRUE(std::is_trivially_destructible<absl::NoDestructor<Blob>>::value);
+  EXPECT_TRUE(
+      std::is_trivially_destructible<absl::NoDestructor<const Blob>>::value);
+  {
+    absl::NoDestructor<Blob> b;  // default c-tor
+    // access: *, ->, get()
+    EXPECT_EQ(42, (*b).val);
+    (*b).val = 55;
+    EXPECT_EQ(55, b->val);
+    b->val = 66;
+    EXPECT_EQ(66, b.get()->val);
+    b.get()->val = 42;  // NOLINT
+    EXPECT_EQ(42, (*b).val);
+  }
+  {
+    absl::NoDestructor<const Blob> b(70, 7);  // regular c-tor, const
+    EXPECT_EQ(77, (*b).val);
+    EXPECT_EQ(77, b->val);
+    EXPECT_EQ(77, b.get()->val);
+  }
+  {
+    const absl::NoDestructor<Blob> b{
+        {20, 28, 40}};  // init-list c-tor, deep const
+    // This only works in clang, not in gcc:
+    // const absl::NoDestructor<Blob> b({20, 28, 40});
+    EXPECT_EQ(88, (*b).val);
+    EXPECT_EQ(88, b->val);
+    EXPECT_EQ(88, b.get()->val);
+  }
+}
+
+TEST(NoDestructorTest, SfinaeRegressionAbstractArg) {
+  struct Abstract {
+    virtual ~Abstract() = default;
+    virtual int foo() const = 0;
+  };
+
+  struct Concrete : Abstract {
+    int foo() const override { return 17; }
+  };
+
+  struct UsesAbstractInConstructor {
+    explicit UsesAbstractInConstructor(const Abstract& abstract)
+        : i(abstract.foo()) {}
+    int i;
+  };
+
+  Concrete input;
+  absl::NoDestructor<UsesAbstractInConstructor> foo1(input);
+  EXPECT_EQ(foo1->i, 17);
+  absl::NoDestructor<UsesAbstractInConstructor> foo2(
+      static_cast<const Abstract&>(input));
+  EXPECT_EQ(foo2->i, 17);
+}
+
+// ========================================================================= //
+
+std::string* Str0() {
+  static absl::NoDestructor<std::string> x;
+  return x.get();
+}
+
+extern const std::string& Str2();
+
+const char* Str1() {
+  static absl::NoDestructor<std::string> x(Str2() + "_Str1");
+  return x->c_str();
+}
+
+const std::string& Str2() {
+  static absl::NoDestructor<std::string> x("Str2");
+  return *x;
+}
+
+const std::string& Str2Copy() {
+  // Exercise copy construction
+  static absl::NoDestructor<std::string> x(Str2());
+  return *x;
+}
+
+typedef std::array<std::string, 3> MyArray;
+const MyArray& Array() {
+  static absl::NoDestructor<MyArray> x{{{"foo", "bar", "baz"}}};
+  // This only works in clang, not in gcc:
+  // static absl::NoDestructor<MyArray> x({{"foo", "bar", "baz"}});
+  return *x;
+}
+
+typedef std::vector<int> MyVector;
+const MyVector& Vector() {
+  static absl::NoDestructor<MyVector> x{{1, 2, 3}};
+  return *x;
+}
+
+const int& Int() {
+  static absl::NoDestructor<int> x;
+  return *x;
+}
+
+TEST(NoDestructorTest, StaticPattern) {
+  EXPECT_TRUE(
+      std::is_trivially_destructible<absl::NoDestructor<std::string>>::value);
+  EXPECT_TRUE(
+      std::is_trivially_destructible<absl::NoDestructor<MyArray>>::value);
+  EXPECT_TRUE(
+      std::is_trivially_destructible<absl::NoDestructor<MyVector>>::value);
+  EXPECT_TRUE(std::is_trivially_destructible<absl::NoDestructor<int>>::value);
+
+  EXPECT_EQ(*Str0(), "");
+  Str0()->append("foo");
+  EXPECT_EQ(*Str0(), "foo");
+
+  EXPECT_EQ(std::string(Str1()), "Str2_Str1");
+
+  EXPECT_EQ(Str2(), "Str2");
+  EXPECT_EQ(Str2Copy(), "Str2");
+
+  EXPECT_THAT(Array(), testing::ElementsAre("foo", "bar", "baz"));
+
+  EXPECT_THAT(Vector(), testing::ElementsAre(1, 2, 3));
+
+  EXPECT_EQ(0, Int());  // should get zero-initialized
+}
+
+#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION
+// This would fail to compile if Class Template Argument Deduction was not
+// provided for absl::NoDestructor.
+TEST(NoDestructorTest, ClassTemplateArgumentDeduction) {
+  absl::NoDestructor i(1);
+  static_assert(std::is_same<decltype(i), absl::NoDestructor<int>>::value,
+                "Expected deduced type to be int.");
+}
+#endif
+
+}  // namespace

+ 224 - 0
Sample/lib/node_add_on/protobuf_src/absl/base/nullability.h

@@ -0,0 +1,224 @@
+// Copyright 2023 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: nullability.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a set of "templated annotations" for designating the
+// expected nullability of pointers. These annotations allow you to designate
+// pointers in one of three classification states:
+//
+//  * "Non-null" (for pointers annotated `Nonnull<T>`), indicating that it is
+//    invalid for the given pointer to ever be null.
+//  * "Nullable" (for pointers annotated `Nullable<T>`), indicating that it is
+//    valid for the given pointer to be null.
+//  * "Unknown" (for pointers annotated `NullabilityUnknown<T>`), indicating
+//    that the given pointer has not been yet classified as either nullable or
+//    non-null. This is the default state of unannotated pointers.
+//
+// NOTE: unannotated pointers implicitly bear the annotation
+// `NullabilityUnknown<T>`; you should rarely, if ever, see this annotation used
+// in the codebase explicitly.
+//
+// -----------------------------------------------------------------------------
+// Nullability and Contracts
+// -----------------------------------------------------------------------------
+//
+// These nullability annotations allow you to more clearly specify contracts on
+// software components by narrowing the *preconditions*, *postconditions*, and
+// *invariants* of pointer state(s) in any given interface. It then depends on
+// context who is responsible for fulfilling the annotation's requirements.
+//
+// For example, a function may receive a pointer argument. Designating that
+// pointer argument as "non-null" tightens the precondition of the contract of
+// that function. It is then the responsibility of anyone calling such a
+// function to ensure that the passed pointer is not null.
+//
+// Similarly, a function may have a pointer as a return value. Designating that
+// return value as "non-null" tightens the postcondition of the contract of that
+// function. In this case, however, it is the responsibility of the function
+// itself to ensure that the returned pointer is not null.
+//
+// Clearly defining these contracts allows providers (and consumers) of such
+// pointers to have more confidence in their null state. If a function declares
+// a return value as "non-null", for example, the caller should not need to
+// check whether the returned value is `nullptr`; it can simply assume the
+// pointer is valid.
+//
+// Of course most interfaces already have expectations on the nullability state
+// of pointers, and these expectations are, in effect, a contract; often,
+// however, those contracts are either poorly or partially specified, assumed,
+// or misunderstood. These nullability annotations are designed to allow you to
+// formalize those contracts within the codebase.
+//
+// -----------------------------------------------------------------------------
+// Using Nullability Annotations
+// -----------------------------------------------------------------------------
+//
+// It is important to note that these annotations are not distinct strong
+// *types*. They are alias templates defined to be equal to the underlying
+// pointer type. A pointer annotated `Nonnull<T*>`, for example, is simply a
+// pointer of type `T*`. Each annotation acts as a form of documentation about
+// the contract for the given pointer. Each annotation requires providers or
+// consumers of these pointers across API boundaries to take appropriate steps
+// when setting or using these pointers:
+//
+// * "Non-null" pointers should never be null. It is the responsibility of the
+//   provider of this pointer to ensure that the pointer may never be set to
+//   null. Consumers of such pointers can treat such pointers as non-null.
+// * "Nullable" pointers may or may not be null. Consumers of such pointers
+//   should precede any usage of that pointer (e.g. a dereference operation)
+//   with a a `nullptr` check.
+// * "Unknown" pointers may be either "non-null" or "nullable" but have not been
+//   definitively determined to be in either classification state. Providers of
+//   such pointers across API boundaries should determine --  over time -- to
+//   annotate the pointer in either of the above two states. Consumers of such
+//   pointers across an API boundary should continue to treat such pointers as
+//   they currently do.
+//
+// Example:
+//
+// // PaySalary() requires the passed pointer to an `Employee` to be non-null.
+// void PaySalary(absl::Nonnull<Employee *> e) {
+//   pay(e->salary);  // OK to dereference
+// }
+//
+// // CompleteTransaction() guarantees the returned pointer to an `Account` to
+// // be non-null.
+// absl::Nonnull<Account *> balance CompleteTransaction(double fee) {
+// ...
+// }
+//
+// // Note that specifying a nullability annotation does not prevent someone
+// // from violating the contract:
+//
+// Nullable<Employee *> find(Map& employees, std::string_view name);
+//
+// void g(Map& employees) {
+//   Employee *e = find(employees, "Pat");
+//   // `e` can now be null.
+//   PaySalary(e); // Violates contract, but compiles!
+// }
+//
+// Nullability annotations, in other words, are useful for defining and
+// narrowing contracts; *enforcement* of those contracts depends on use and any
+// additional (static or dynamic analysis) tooling.
+//
+// NOTE: The "unknown" annotation state indicates that a pointer's contract has
+// not yet been positively identified. The unknown state therefore acts as a
+// form of documentation of your technical debt, and a codebase that adopts
+// nullability annotations should aspire to annotate every pointer as either
+// "non-null" or "nullable".
+//
+// -----------------------------------------------------------------------------
+// Applicability of Nullability Annotations
+// -----------------------------------------------------------------------------
+//
+// By default, nullability annotations are applicable to raw and smart
+// pointers. User-defined types can indicate compatibility with nullability
+// annotations by providing an `absl_nullability_compatible` nested type. The
+// actual definition of this inner type is not relevant as it is used merely as
+// a marker. It is common to use a using declaration of
+// `absl_nullability_compatible` set to void.
+//
+// // Example:
+// struct MyPtr {
+//   using absl_nullability_compatible = void;
+//   ...
+// };
+//
+// DISCLAIMER:
+// ===========================================================================
+// These nullability annotations are primarily a human readable signal about the
+// intended contract of the pointer. They are not *types* and do not currently
+// provide any correctness guarantees. For example, a pointer annotated as
+// `Nonnull<T*>` is *not guaranteed* to be non-null, and the compiler won't
+// alert or prevent assignment of a `Nullable<T*>` to a `Nonnull<T*>`.
+// ===========================================================================
+#ifndef ABSL_BASE_NULLABILITY_H_
+#define ABSL_BASE_NULLABILITY_H_
+
+#include "absl/base/internal/nullability_impl.h"
+
+namespace absl {
+
+// absl::Nonnull
+//
+// The indicated pointer is never null. It is the responsibility of the provider
+// of this pointer across an API boundary to ensure that the pointer is never be
+// set to null. Consumers of this pointer across an API boundary may safely
+// dereference the pointer.
+//
+// Example:
+//
+// // `employee` is designated as not null.
+// void PaySalary(absl::Nonnull<Employee *> employee) {
+//   pay(*employee);  // OK to dereference
+// }
+template <typename T>
+using Nonnull = nullability_internal::NonnullImpl<T>;
+
+// absl::Nullable
+//
+// The indicated pointer may, by design, be either null or non-null. Consumers
+// of this pointer across an API boundary should perform a `nullptr` check
+// before performing any operation using the pointer.
+//
+// Example:
+//
+// // `employee` may  be null.
+// void PaySalary(absl::Nullable<Employee *> employee) {
+//   if (employee != nullptr) {
+//     Pay(*employee);  // OK to dereference
+//   }
+// }
+template <typename T>
+using Nullable = nullability_internal::NullableImpl<T>;
+
+// absl::NullabilityUnknown (default)
+//
+// The indicated pointer has not yet been determined to be definitively
+// "non-null" or "nullable." Providers of such pointers across API boundaries
+// should, over time, annotate such pointers as either "non-null" or "nullable."
+// Consumers of these pointers across an API boundary should treat such pointers
+// with the same caution they treat currently unannotated pointers. Most
+// existing code will have "unknown"  pointers, which should eventually be
+// migrated into one of the above two nullability states: `Nonnull<T>` or
+//  `Nullable<T>`.
+//
+// NOTE: Because this annotation is the global default state, pointers without
+// any annotation are assumed to have "unknown" semantics. This assumption is
+// designed to minimize churn and reduce clutter within the codebase.
+//
+// Example:
+//
+// // `employee`s nullability state is unknown.
+// void PaySalary(absl::NullabilityUnknown<Employee *> employee) {
+//   Pay(*employee); // Potentially dangerous. API provider should investigate.
+// }
+//
+// Note that a pointer without an annotation, by default, is assumed to have the
+// annotation `NullabilityUnknown`.
+//
+// // `employee`s nullability state is unknown.
+// void PaySalary(Employee* employee) {
+//   Pay(*employee); // Potentially dangerous. API provider should investigate.
+// }
+template <typename T>
+using NullabilityUnknown = nullability_internal::NullabilityUnknownImpl<T>;
+
+}  // namespace absl
+
+#endif  // ABSL_BASE_NULLABILITY_H_

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff