diff --git a/apps/llm/app/voice_chat/index.tsx b/apps/llm/app/voice_chat/index.tsx
index 27b4ad35d..230d0b8e9 100644
--- a/apps/llm/app/voice_chat/index.tsx
+++ b/apps/llm/app/voice_chat/index.tsx
@@ -101,7 +101,7 @@ function VoiceChatScreen() {
return !llm.isReady || !speechToText.isReady ? (
) : (
diff --git a/packages/react-native-executorch/android/libs/classes.jar b/packages/react-native-executorch/android/libs/classes.jar
index 6fda54e05..31dd44c2e 100644
Binary files a/packages/react-native-executorch/android/libs/classes.jar and b/packages/react-native-executorch/android/libs/classes.jar differ
diff --git a/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp b/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp
index ef81fdc19..2111541a6 100644
--- a/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp
@@ -9,7 +9,7 @@
namespace rnexecutorch {
using namespace facebook;
-using namespace executorch::extension::constants;
+using namespace executorch::extension::llm;
TokenizerModule::TokenizerModule(
std::string source, std::shared_ptr callInvoker)
diff --git a/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.h b/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.h
index 4420103e3..c40fa2569 100644
--- a/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.h
+++ b/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.h
@@ -46,7 +46,7 @@ class BaseModel {
// (unnecessary copies instead of working on JS memory). In this case
// CallInvoker can be used to get jsi::Runtime, and use it in a safe manner.
std::shared_ptr callInvoker;
- std::unique_ptr module_;
+ std::unique_ptr module_;
std::size_t memorySizeLowerBound{0};
diff --git a/packages/react-native-executorch/common/rnexecutorch/tests/integration/SpeechToTextTest.cpp b/packages/react-native-executorch/common/rnexecutorch/tests/integration/SpeechToTextTest.cpp
index 5b15c0040..29edf4e4e 100644
--- a/packages/react-native-executorch/common/rnexecutorch/tests/integration/SpeechToTextTest.cpp
+++ b/packages/react-native-executorch/common/rnexecutorch/tests/integration/SpeechToTextTest.cpp
@@ -74,7 +74,7 @@ TEST(S2TTranscribeTests, TranscribeReturnsValidChars) {
auto result = model.transcribe(audio, "en", true);
ASSERT_EQ(result.language, "en");
EXPECT_GE(result.duration, 20.0f);
- ASSERT_EQ(result.task, "transcription");
+ ASSERT_EQ(result.task, "transcribe");
ASSERT_FALSE(result.segments.empty());
ASSERT_FALSE(result.text.empty());
for (char c : result.text) {
diff --git a/packages/react-native-executorch/common/runner/constants.h b/packages/react-native-executorch/common/runner/constants.h
index 93ac6a876..e75466829 100644
--- a/packages/react-native-executorch/common/runner/constants.h
+++ b/packages/react-native-executorch/common/runner/constants.h
@@ -7,7 +7,7 @@
*/
#pragma once
// constants for LLM runtime
-namespace executorch::extension::constants {
+namespace executorch::extension::llm {
// Runtime metadata key constants
inline constexpr auto kEnableDynamicShape = "enable_dynamic_shape";
@@ -27,4 +27,5 @@ inline constexpr auto kTextModelMethod = "text_decoder";
inline constexpr auto numOfAddedBoSTokens = 0;
inline constexpr auto numOfAddedEoSTokens = 0;
-} // namespace executorch::extension::constants
+
+} // namespace executorch::extension::llm
diff --git a/packages/react-native-executorch/common/runner/irunner.h b/packages/react-native-executorch/common/runner/irunner.h
index ac8115b20..8dedc6687 100644
--- a/packages/react-native-executorch/common/runner/irunner.h
+++ b/packages/react-native-executorch/common/runner/irunner.h
@@ -65,6 +65,10 @@ struct GenerationConfig {
// Use KV_CACHE implementation (if implemented) or not
bool enable_kv_cache = true;
+
+ // Number of eos and bos to add to the prompt
+ int32_t num_bos = 0;
+ int32_t num_eos = 0;
};
// Base interface for LLM runners
diff --git a/packages/react-native-executorch/common/runner/kernel_includes.h b/packages/react-native-executorch/common/runner/kernel_includes.h
index 9d6029a9a..bafaf80e7 100644
--- a/packages/react-native-executorch/common/runner/kernel_includes.h
+++ b/packages/react-native-executorch/common/runner/kernel_includes.h
@@ -15,8 +15,8 @@
#pragma once
// This list should be very conservative since most kernel .cpp files will
-// include these and depend on their transitive deps. Only add a header if 99%
-// of kernels would have included it anyway.
+// include these and depend on their transitive deps. Only add a header if
+// 99% of kernels would have included it anyway.
#include // IWYU pragma: export
#include // IWYU pragma: export
#include // IWYU pragma: export
diff --git a/packages/react-native-executorch/common/runner/runner.cpp b/packages/react-native-executorch/common/runner/runner.cpp
index 5d0fec78c..4da2ee6ba 100644
--- a/packages/react-native-executorch/common/runner/runner.cpp
+++ b/packages/react-native-executorch/common/runner/runner.cpp
@@ -19,7 +19,7 @@
namespace example {
-using namespace executorch::extension::constants;
+using namespace executorch::extension::llm;
using ::executorch::extension::Module;
using ::executorch::runtime::Error;
using ::executorch::runtime::Result;
diff --git a/packages/react-native-executorch/common/runner/sampler.h b/packages/react-native-executorch/common/runner/sampler.h
index b18ba6585..a46a5ed12 100644
--- a/packages/react-native-executorch/common/runner/sampler.h
+++ b/packages/react-native-executorch/common/runner/sampler.h
@@ -26,6 +26,8 @@ namespace extension {
namespace llm {
// A simple llama2 sampler.
+inline constexpr auto kTopp = 0.9f;
+
template struct ProbIndex {
T prob;
int32_t index;
@@ -65,3 +67,9 @@ using ::executorch::extension::llm::ProbIndex;
using ::executorch::extension::llm::Sampler;
} // namespace executor
} // namespace torch
+
+namespace executorch::llm {
+// TODO(T197294990): Remove these deprecated aliases once all users have moved
+// to the new `::executorch::extension::llm` namespaces.
+using ::executorch::extension::llm::kTopp;
+} // namespace executorch::llm
diff --git a/packages/react-native-executorch/common/runner/stats.h b/packages/react-native-executorch/common/runner/stats.h
index 4c1fae519..b94834475 100644
--- a/packages/react-native-executorch/common/runner/stats.h
+++ b/packages/react-native-executorch/common/runner/stats.h
@@ -11,6 +11,7 @@
#include "util.h"
#include
#include
+#include
#include
#include
@@ -44,11 +45,19 @@ struct Stats {
// inference_end_ms: End of inference/generation.
long inference_end_ms;
// Keep a running total of the time spent in sampling.
- long aggregate_sampling_time_ms;
+ long aggregate_sampling_time_ms = 0;
// Token count from prompt
int64_t num_prompt_tokens;
// Token count from generated (total - prompt)
int64_t num_generated_tokens;
+ // GPU memory stats (optional; may be zero if not available)
+ // GPU memory stats (optional). Use sentinel UINT64_MAX / -1.0 to indicate
+ // "not available".
+ uint64_t gpu_total_bytes = std::numeric_limits::max();
+ uint64_t gpu_free_before_load_bytes = std::numeric_limits::max();
+ uint64_t gpu_free_after_load_bytes = std::numeric_limits::max();
+ uint64_t gpu_free_after_generate_bytes = std::numeric_limits::max();
+ double gpu_peak_usage_mb = -1.0;
inline void on_sampling_begin() {
aggregate_sampling_timer_start_timestamp = time_in_ms();
}
@@ -75,6 +84,11 @@ struct Stats {
aggregate_sampling_time_ms = 0;
num_prompt_tokens = 0;
num_generated_tokens = 0;
+ gpu_total_bytes = std::numeric_limits::max();
+ gpu_free_before_load_bytes = std::numeric_limits::max();
+ gpu_free_after_load_bytes = std::numeric_limits::max();
+ gpu_free_after_generate_bytes = std::numeric_limits::max();
+ gpu_peak_usage_mb = -1.0;
aggregate_sampling_timer_start_timestamp = 0;
}
@@ -93,7 +107,29 @@ inline std::string stats_to_json_string(const Stats &stats) {
<< "\"prompt_eval_end_ms\":" << stats.prompt_eval_end_ms << ","
<< "\"first_token_ms\":" << stats.first_token_ms << ","
<< "\"aggregate_sampling_time_ms\":" << stats.aggregate_sampling_time_ms
- << "," << "\"SCALING_FACTOR_UNITS_PER_SECOND\":"
+ << ",";
+ // Only include GPU fields in the JSON if gpu_total_bytes is valid (not
+ // equal to sentinel -1)
+ if (stats.gpu_total_bytes != static_cast(-1)) {
+ ss << "\"gpu_total_bytes\":" << stats.gpu_total_bytes;
+ if (stats.gpu_free_before_load_bytes != static_cast(-1)) {
+ ss << ",\"gpu_free_before_load_bytes\":"
+ << stats.gpu_free_before_load_bytes;
+ }
+ if (stats.gpu_free_after_load_bytes != static_cast(-1)) {
+ ss << ",\"gpu_free_after_load_bytes\":"
+ << stats.gpu_free_after_load_bytes;
+ }
+ if (stats.gpu_free_after_generate_bytes != static_cast(-1)) {
+ ss << ",\"gpu_free_after_generate_bytes\":"
+ << stats.gpu_free_after_generate_bytes;
+ }
+ if (stats.gpu_peak_usage_mb >= 0.0) {
+ ss << ",\"gpu_peak_usage_mb\":" << stats.gpu_peak_usage_mb;
+ }
+ ss << ",";
+ }
+ ss << "\"SCALING_FACTOR_UNITS_PER_SECOND\":"
<< stats.SCALING_FACTOR_UNITS_PER_SECOND << "}";
return ss.str();
}
@@ -145,6 +181,27 @@ inline void print_report(const Stats &stats) {
stats.num_prompt_tokens + stats.num_generated_tokens,
(double)stats.aggregate_sampling_time_ms /
stats.SCALING_FACTOR_UNITS_PER_SECOND);
+
+ // GPU memory reporting (only meaningful if GPU fields were populated)
+ if (stats.gpu_total_bytes != static_cast(-1)) {
+ ET_LOG(Info, "\tGPU total memory: %.2f MB",
+ stats.gpu_total_bytes / 1024.0 / 1024.0);
+ if (stats.gpu_free_before_load_bytes != static_cast(-1)) {
+ ET_LOG(Info, "\tGPU free before load: %.2f MB",
+ stats.gpu_free_before_load_bytes / 1024.0 / 1024.0);
+ }
+ if (stats.gpu_free_after_load_bytes != static_cast(-1)) {
+ ET_LOG(Info, "\tGPU free after load: %.2f MB",
+ stats.gpu_free_after_load_bytes / 1024.0 / 1024.0);
+ }
+ if (stats.gpu_free_after_generate_bytes != static_cast(-1)) {
+ ET_LOG(Info, "\tGPU free after generate: %.2f MB",
+ stats.gpu_free_after_generate_bytes / 1024.0 / 1024.0);
+ }
+ if (stats.gpu_peak_usage_mb >= 0.0) {
+ ET_LOG(Info, "\tGPU peak usage: %.2f MB", stats.gpu_peak_usage_mb);
+ }
+ }
}
} // namespace llm
diff --git a/packages/react-native-executorch/common/runner/text_decoder_runner.cpp b/packages/react-native-executorch/common/runner/text_decoder_runner.cpp
index 97eb3dd13..bc5ca069a 100644
--- a/packages/react-native-executorch/common/runner/text_decoder_runner.cpp
+++ b/packages/react-native-executorch/common/runner/text_decoder_runner.cpp
@@ -32,15 +32,23 @@ TextDecoderRunner::TextDecoderRunner(Module *module, IOManager *io_manager,
::executorch::runtime::Result
TextDecoderRunner::step(TensorPtr &tokens, int64_t start_pos) {
// ET_LOG(Info, "Input token %" PRIu64, input_token);
- auto method_meta = ET_UNWRAP(module_->method_meta("forward"));
+ auto method_meta_result = module_->method_meta("forward");
+ if (!method_meta_result.ok()) {
+ return method_meta_result.error();
+ }
+ auto method_meta = std::move(*method_meta_result);
// If only 1 input, we are not using kv cache
bool use_kv_cache = method_meta.num_inputs() > 1;
std::vector cache_positions;
if (use_kv_cache) {
- auto start_pos_tensor = ET_UNWRAP(populate_start_pos_or_cache_position(
- module_, start_pos, cache_positions, tokens->numel(), "forward"));
+ auto start_pos_tensor_result = populate_start_pos_or_cache_position(
+ module_, start_pos, cache_positions, tokens->numel(), "forward");
+ if (!start_pos_tensor_result.ok()) {
+ return start_pos_tensor_result.error();
+ }
+ auto start_pos_tensor = std::move(*start_pos_tensor_result);
std::vector inputs;
auto inputs_res = io_manager_->prepare_decode(tokens, start_pos_tensor);
diff --git a/packages/react-native-executorch/common/runner/text_prefiller.cpp b/packages/react-native-executorch/common/runner/text_prefiller.cpp
index c7e4296eb..dc961158b 100644
--- a/packages/react-native-executorch/common/runner/text_prefiller.cpp
+++ b/packages/react-native-executorch/common/runner/text_prefiller.cpp
@@ -98,8 +98,11 @@ TextPrefiller::prefill_chunk(std::vector &prompt_tokens,
// run the first token and get back logits tensor. Assuming the first token
// is bos so don't callback.
- auto logits_tensor =
- ET_UNWRAP(text_decoder_runner_->step(tokens, start_pos));
+ auto logits_result = text_decoder_runner_->step(tokens, start_pos);
+ if (!logits_result.ok()) {
+ return logits_result.error();
+ }
+ auto logits_tensor = std::move(*logits_result);
pos += 1; // start the loop from index 1
start_pos += 1;
diff --git a/packages/react-native-executorch/common/runner/text_token_generator.h b/packages/react-native-executorch/common/runner/text_token_generator.h
index 7b0dd3042..024cce456 100644
--- a/packages/react-native-executorch/common/runner/text_token_generator.h
+++ b/packages/react-native-executorch/common/runner/text_token_generator.h
@@ -39,6 +39,10 @@ class TextTokenGenerator {
* @param start_pos The start position of the new tokens, based on how many
* prompt tokens is prefilled.
* @param max_new_tokens Maximum number of new tokens to generate.
+ * @param temperature controls the randomness of predictions by scaling the
+ * logits before applying softmax. A higher temperature results in more
+ * random predictions, while a lower temperature results in more deterministic
+ * predictions.
* @param token_callback what to do after a token is generated.
* @return how many tokens are generated.
*/
@@ -113,7 +117,8 @@ class TextTokenGenerator {
// We pass false, as we want don't want to skip special tokens e.g.
//
- auto decodeResult = tokenizer_->decode(token_cache, false);
+ auto decodeResult =
+ tokenizer_->decode(token_cache, false); // NOTE: difference
if (!decodeResult.ok()) {
throw rnexecutorch::RnExecutorchError(
rnexecutorch::RnExecutorchErrorCode::TokenizerError,
diff --git a/packages/react-native-executorch/common/runner/util.h b/packages/react-native-executorch/common/runner/util.h
index 03ad0c7b3..640b96319 100644
--- a/packages/react-native-executorch/common/runner/util.h
+++ b/packages/react-native-executorch/common/runner/util.h
@@ -10,10 +10,12 @@
#include "constants.h"
#include "text_prefiller.h"
#include
+#include
#include
#include
#include
#include
+#include
#if defined(__linux__) || defined(__ANDROID__) || defined(__unix__)
#include
#endif
@@ -112,8 +114,16 @@ populate_start_pos_or_cache_position(Module *module, int64_t &start_pos,
const char *method_name = "forward") {
// Get expected shape of cache position tensor, which should be the second
// argument
- auto method_meta = ET_UNWRAP(module->method_meta(method_name));
- auto second_input_info = ET_UNWRAP(method_meta.input_tensor_meta(1));
+ auto method_meta_result = module->method_meta(method_name);
+ if (!method_meta_result.ok()) {
+ return method_meta_result.error();
+ }
+ auto method_meta = std::move(*method_meta_result);
+ auto second_input_info_result = method_meta.input_tensor_meta(1);
+ if (!second_input_info_result.ok()) {
+ return second_input_info_result.error();
+ }
+ auto second_input_info = std::move(*second_input_info_result);
auto second_input_sizes = second_input_info.sizes();
auto numel = second_input_sizes[0];
@@ -136,6 +146,31 @@ populate_start_pos_or_cache_position(Module *module, int64_t &start_pos,
}
}
+/**
+ * Helper function to convert a float tensor to bfloat16.
+ * Creates a new tensor with bfloat16 dtype and copies/converts the data.
+ */
+inline ::executorch::runtime::Result<::executorch::extension::TensorPtr>
+convert_to_bfloat16(const ::executorch::extension::TensorPtr &src_tensor) {
+ ET_CHECK_OR_RETURN_ERROR(
+ src_tensor->scalar_type() == ::executorch::aten::ScalarType::Float,
+ InvalidArgument,
+ "BFloat16 conversion only supported from Float source data");
+
+ const auto num_elements = static_cast(src_tensor->numel());
+ const float *float_data = src_tensor->const_data_ptr();
+
+ auto bf16_tensor = ::executorch::extension::empty_like(
+ src_tensor, ::executorch::aten::ScalarType::BFloat16);
+ auto *bf16_data =
+ bf16_tensor->mutable_data_ptr<::executorch::aten::BFloat16>();
+ for (size_t i = 0; i < num_elements; ++i) {
+ bf16_data[i] = ::executorch::aten::BFloat16(float_data[i]);
+ }
+
+ return bf16_tensor;
+}
+
} // namespace llm
} // namespace extension
} // namespace executorch
diff --git a/packages/react-native-executorch/third-party/android/libs/executorch/arm64-v8a/libexecutorch.so b/packages/react-native-executorch/third-party/android/libs/executorch/arm64-v8a/libexecutorch.so
old mode 100644
new mode 100755
index 846897531..8b29d1cb0
Binary files a/packages/react-native-executorch/third-party/android/libs/executorch/arm64-v8a/libexecutorch.so and b/packages/react-native-executorch/third-party/android/libs/executorch/arm64-v8a/libexecutorch.so differ
diff --git a/packages/react-native-executorch/third-party/android/libs/executorch/x86_64/libexecutorch.so b/packages/react-native-executorch/third-party/android/libs/executorch/x86_64/libexecutorch.so
old mode 100644
new mode 100755
index 5e4064515..2836d92bd
Binary files a/packages/react-native-executorch/third-party/android/libs/executorch/x86_64/libexecutorch.so and b/packages/react-native-executorch/third-party/android/libs/executorch/x86_64/libexecutorch.so differ
diff --git a/packages/react-native-executorch/third-party/include/absl/base/casts.h b/packages/react-native-executorch/third-party/include/absl/base/casts.h
new file mode 100644
index 000000000..461cb8d52
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/casts.h
@@ -0,0 +1,180 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: casts.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines casting templates to fit use cases not covered by
+// the standard casts provided in the C++ standard. As with all cast operations,
+// use these with caution and only if alternatives do not exist.
+
+#ifndef ABSL_BASE_CASTS_H_
+#define ABSL_BASE_CASTS_H_
+
+#include
+#include
+#include
+#include
+
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+#include // For std::bit_cast.
+#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+#include "absl/base/internal/identity.h"
+#include "absl/base/macros.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// implicit_cast()
+//
+// Performs an implicit conversion between types following the language
+// rules for implicit conversion; if an implicit conversion is otherwise
+// allowed by the language in the given context, this function performs such an
+// implicit conversion.
+//
+// Example:
+//
+// // If the context allows implicit conversion:
+// From from;
+// To to = from;
+//
+// // Such code can be replaced by:
+// implicit_cast(from);
+//
+// An `implicit_cast()` may also be used to annotate numeric type conversions
+// that, although safe, may produce compiler warnings (such as `long` to `int`).
+// Additionally, an `implicit_cast()` is also useful within return statements to
+// indicate a specific implicit conversion is being undertaken.
+//
+// Example:
+//
+// return implicit_cast(size_in_bytes) / capacity_;
+//
+// Annotating code with `implicit_cast()` allows you to explicitly select
+// particular overloads and template instantiations, while providing a safer
+// cast than `reinterpret_cast()` or `static_cast()`.
+//
+// Additionally, an `implicit_cast()` can be used to allow upcasting within a
+// type hierarchy where incorrect use of `static_cast()` could accidentally
+// allow downcasting.
+//
+// Finally, an `implicit_cast()` can be used to perform implicit conversions
+// from unrelated types that otherwise couldn't be implicitly cast directly;
+// C++ will normally only implicitly cast "one step" in such conversions.
+//
+// That is, if C is a type which can be implicitly converted to B, with B being
+// a type that can be implicitly converted to A, an `implicit_cast()` can be
+// used to convert C to B (which the compiler can then implicitly convert to A
+// using language rules).
+//
+// Example:
+//
+// // Assume an object C is convertible to B, which is implicitly convertible
+// // to A
+// A a = implicit_cast(C);
+//
+// Such implicit cast chaining may be useful within template logic.
+template
+constexpr To implicit_cast(typename absl::internal::type_identity_t to) {
+ return to;
+}
+
+// bit_cast()
+//
+// Creates a value of the new type `Dest` whose representation is the same as
+// that of the argument, which is of (deduced) type `Source` (a "bitwise cast";
+// every bit in the value representation of the result is equal to the
+// corresponding bit in the object representation of the source). Source and
+// destination types must be of the same size, and both types must be trivially
+// copyable.
+//
+// As with most casts, use with caution. A `bit_cast()` might be needed when you
+// need to treat a value as the value of some other type, for example, to access
+// the individual bits of an object which are not normally accessible through
+// the object's type, such as for working with the binary representation of a
+// floating point value:
+//
+// float f = 3.14159265358979;
+// int i = bit_cast(f);
+// // i = 0x40490fdb
+//
+// Reinterpreting and accessing a value directly as a different type (as shown
+// below) usually results in undefined behavior.
+//
+// Example:
+//
+// // WRONG
+// float f = 3.14159265358979;
+// int i = reinterpret_cast(f); // Wrong
+// int j = *reinterpret_cast(&f); // Equally wrong
+// int k = *bit_cast(&f); // Equally wrong
+//
+// Reinterpret-casting results in undefined behavior according to the ISO C++
+// specification, section [basic.lval]. Roughly, this section says: if an object
+// in memory has one type, and a program accesses it with a different type, the
+// result is undefined behavior for most "different type".
+//
+// Using bit_cast on a pointer and then dereferencing it is no better than using
+// reinterpret_cast. You should only use bit_cast on the value itself.
+//
+// Such casting results in type punning: holding an object in memory of one type
+// and reading its bits back using a different type. A `bit_cast()` avoids this
+// issue by copying the object representation to a new value, which avoids
+// introducing this undefined behavior (since the original value is never
+// accessed in the wrong way).
+//
+// The requirements of `absl::bit_cast` are more strict than that of
+// `std::bit_cast` unless compiler support is available. Specifically, without
+// compiler support, this implementation also requires `Dest` to be
+// default-constructible. In C++20, `absl::bit_cast` is replaced by
+// `std::bit_cast`.
+#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+using std::bit_cast;
+
+#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+template <
+ typename Dest, typename Source,
+ typename std::enable_if::value &&
+ std::is_trivially_copyable::value
+#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+ && std::is_default_constructible::value
+#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+ ,
+ int>::type = 0>
+#if ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+inline constexpr Dest bit_cast(const Source &source) {
+ return __builtin_bit_cast(Dest, source);
+}
+#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+inline Dest bit_cast(const Source &source) {
+ Dest dest;
+ memcpy(static_cast(std::addressof(dest)),
+ static_cast(std::addressof(source)), sizeof(dest));
+ return dest;
+}
+#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast)
+
+#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_CASTS_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/const_init.h b/packages/react-native-executorch/third-party/include/absl/base/const_init.h
new file mode 100644
index 000000000..96bef4dd3
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/const_init.h
@@ -0,0 +1,76 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// kConstInit
+// -----------------------------------------------------------------------------
+//
+// A constructor tag used to mark an object as safe for use as a global
+// variable, avoiding the usual lifetime issues that can affect globals.
+
+#ifndef ABSL_BASE_CONST_INIT_H_
+#define ABSL_BASE_CONST_INIT_H_
+
+#include "absl/base/config.h"
+
+// In general, objects with static storage duration (such as global variables)
+// can trigger tricky object lifetime situations. Attempting to access them
+// from the constructors or destructors of other global objects can result in
+// undefined behavior, unless their constructors and destructors are designed
+// with this issue in mind.
+//
+// The normal way to deal with this issue in C++11 is to use constant
+// initialization and trivial destructors.
+//
+// Constant initialization is guaranteed to occur before any other code
+// executes. Constructors that are declared 'constexpr' are eligible for
+// constant initialization. You can annotate a variable declaration with the
+// ABSL_CONST_INIT macro to express this intent. For compilers that support
+// it, this annotation will cause a compilation error for declarations that
+// aren't subject to constant initialization (perhaps because a runtime value
+// was passed as a constructor argument).
+//
+// On program shutdown, lifetime issues can be avoided on global objects by
+// ensuring that they contain trivial destructors. A class has a trivial
+// destructor unless it has a user-defined destructor, a virtual method or base
+// class, or a data member or base class with a non-trivial destructor of its
+// own. Objects with static storage duration and a trivial destructor are not
+// cleaned up on program shutdown, and are thus safe to access from other code
+// running during shutdown.
+//
+// For a few core Abseil classes, we make a best effort to allow for safe global
+// instances, even though these classes have non-trivial destructors. These
+// objects can be created with the absl::kConstInit tag. For example:
+// ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit);
+//
+// The line above declares a global variable of type absl::Mutex which can be
+// accessed at any point during startup or shutdown. global_mutex's destructor
+// will still run, but will not invalidate the object. Note that C++ specifies
+// that accessing an object after its destructor has run results in undefined
+// behavior, but this pattern works on the toolchains we support.
+//
+// The absl::kConstInit tag should only be used to define objects with static
+// or thread_local storage duration.
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+enum ConstInitType {
+ kConstInit,
+};
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_CONST_INIT_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/dynamic_annotations.h b/packages/react-native-executorch/third-party/include/absl/base/dynamic_annotations.h
new file mode 100644
index 000000000..5deb2f2ce
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/dynamic_annotations.h
@@ -0,0 +1,476 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines dynamic annotations for use with dynamic analysis tool
+// such as valgrind, PIN, etc.
+//
+// Dynamic annotation is a source code annotation that affects the generated
+// code (that is, the annotation is not a comment). Each such annotation is
+// attached to a particular instruction and/or to a particular object (address)
+// in the program.
+//
+// The annotations that should be used by users are macros in all upper-case
+// (e.g., ABSL_ANNOTATE_THREAD_NAME).
+//
+// Actual implementation of these macros may differ depending on the dynamic
+// analysis tool being used.
+//
+// This file supports the following configurations:
+// - Dynamic Annotations enabled (with static thread-safety warnings disabled).
+// In this case, macros expand to functions implemented by Thread Sanitizer,
+// when building with TSan. When not provided an external implementation,
+// dynamic_annotations.cc provides no-op implementations.
+//
+// - Static Clang thread-safety warnings enabled.
+// When building with a Clang compiler that supports thread-safety warnings,
+// a subset of annotations can be statically-checked at compile-time. We
+// expand these macros to static-inline functions that can be analyzed for
+// thread-safety, but afterwards elided when building the final binary.
+//
+// - All annotations are disabled.
+// If neither Dynamic Annotations nor Clang thread-safety warnings are
+// enabled, then all annotation-macros expand to empty.
+
+#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
+#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
+
+#include
+#include
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#ifdef __cplusplus
+#include "absl/base/macros.h"
+#endif
+
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+#include
+#endif
+
+// TODO(rogeeff): Remove after the backward compatibility period.
+#include "absl/base/internal/dynamic_annotations.h" // IWYU pragma: export
+
+// -------------------------------------------------------------------------
+// Decide which features are enabled.
+
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1
+
+#else
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0
+
+// Clang provides limited support for static thread-safety analysis through a
+// feature called Annotalysis. We configure macro-definitions according to
+// whether Annotalysis support is available. When running in opt-mode, GCC
+// will issue a warning, if these attributes are compiled. Only include them
+// when compiling using Clang.
+
+#if defined(__clang__)
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1
+#if !defined(SWIG)
+#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
+#endif
+#else
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#endif
+
+// Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
+ ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+
+#endif // ABSL_HAVE_THREAD_SANITIZER
+
+#ifdef __cplusplus
+#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
+#define ABSL_INTERNAL_END_EXTERN_C } // extern "C"
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
+#define ABSL_INTERNAL_STATIC_INLINE inline
+#else
+#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty
+#define ABSL_INTERNAL_END_EXTERN_C // empty
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F
+#define ABSL_INTERNAL_STATIC_INLINE static inline
+#endif
+
+// -------------------------------------------------------------------------
+// Define race annotations.
+
+#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
+// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are
+// defined by the compiler-based sanitizer implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
+
+// -------------------------------------------------------------
+// Annotations that suppress errors. It is usually better to express the
+// program's synchronization using the other annotations, but these can be used
+// when all else fails.
+
+// Report that we may have a benign race at `pointer`, with size
+// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
+// point where `pointer` has been allocated, preferably close to the point
+// where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC.
+#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+// Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
+// the memory range [`address`, `address`+`size`).
+#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, address, size, description)
+
+// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
+// This annotation could be useful if you want to skip expensive race analysis
+// during some period of program execution, e.g. during initialization.
+#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
+ (__FILE__, __LINE__, enable)
+
+// -------------------------------------------------------------
+// Annotations useful for debugging.
+
+// Report the current thread `name` to a race detector.
+#define ABSL_ANNOTATE_THREAD_NAME(name) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+// -------------------------------------------------------------
+// Annotations useful when implementing locks. They are not normally needed by
+// modules that merely use locks. The `lock` argument is a pointer to the lock
+// object.
+
+// Report that a lock has been created at address `lock`.
+#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+// Report that a linker initialized lock has been created at address `lock`.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
+ (__FILE__, __LINE__, lock)
+#else
+#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+ ABSL_ANNOTATE_RWLOCK_CREATE(lock)
+#endif
+
+// Report that the lock at address `lock` is about to be destroyed.
+#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+// Report that the lock at address `lock` has been acquired.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Report that the lock at address `lock` is about to be released.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
+#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
+ namespace { \
+ class static_var##_annotator { \
+ public: \
+ static_var##_annotator() { \
+ ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
+ #static_var ": " description); \
+ } \
+ }; \
+ static static_var##_annotator the##static_var##_annotator; \
+ } // namespace
+
+// Function prototypes of annotations provided by the compiler-based sanitizer
+// implementation.
+ABSL_INTERNAL_BEGIN_EXTERN_C
+void AnnotateRWLockCreate(const char *file, int line,
+ const volatile void *lock);
+void AnnotateRWLockCreateStatic(const char *file, int line,
+ const volatile void *lock);
+void AnnotateRWLockDestroy(const char *file, int line,
+ const volatile void *lock);
+void AnnotateRWLockAcquired(const char *file, int line,
+ const volatile void *lock, long is_w); // NOLINT
+void AnnotateRWLockReleased(const char *file, int line,
+ const volatile void *lock, long is_w); // NOLINT
+void AnnotateBenignRace(const char *file, int line,
+ const volatile void *address, const char *description);
+void AnnotateBenignRaceSized(const char *file, int line,
+ const volatile void *address, size_t size,
+ const char *description);
+void AnnotateThreadName(const char *file, int line, const char *name);
+void AnnotateEnableRaceDetection(const char *file, int line, int enable);
+ABSL_INTERNAL_END_EXTERN_C
+
+#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0
+
+#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) // empty
+#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty
+#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) // empty
+#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty
+#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty
+#define ABSL_ANNOTATE_BENIGN_RACE(address, description) // empty
+#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty
+#define ABSL_ANNOTATE_THREAD_NAME(name) // empty
+#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty
+#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty
+
+#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define memory annotations.
+
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+
+#include
+
+#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ __msan_unpoison(address, size)
+
+#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ __msan_allocated_memory(address, size)
+
+#else // !defined(ABSL_HAVE_MEMORY_SANITIZER)
+
+#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
+#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
+
+#endif // ABSL_HAVE_MEMORY_SANITIZER
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END attributes.
+
+#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
+ __attribute((exclusive_lock_function("*")))
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
+ __attribute((unlock_function("*")))
+
+#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty
+
+#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
+// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are
+// defined by the compiler-based implementation, not by the Abseil
+// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL.
+
+// Request the analysis tool to ignore all reads in the current thread until
+// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
+// reads, while still checking other reads and all writes.
+// See also ABSL_ANNOTATE_UNPROTECTED_READ.
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \
+ (__FILE__, __LINE__)
+
+// Stop ignoring reads.
+#define ABSL_ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \
+ (__FILE__, __LINE__)
+
+// Function prototypes of annotations provided by the compiler-based sanitizer
+// implementation.
+ABSL_INTERNAL_BEGIN_EXTERN_C
+void AnnotateIgnoreReadsBegin(const char *file, int line)
+ ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE;
+void AnnotateIgnoreReadsEnd(const char *file,
+ int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE;
+ABSL_INTERNAL_END_EXTERN_C
+
+#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
+
+// When Annotalysis is enabled without Dynamic Annotations, the use of
+// static-inline functions allows the annotations to be read at compile-time,
+// while still letting the compiler elide the functions from the final build.
+//
+// TODO(delesley) -- The exclusive lock here ignores writes as well, but
+// allows IGNORE_READS_AND_WRITES to work properly.
+
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED( \
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \
+ ()
+
+#define ABSL_ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED( \
+ ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \
+ ()
+
+ABSL_INTERNAL_STATIC_INLINE void
+ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)()
+ ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {}
+
+ABSL_INTERNAL_STATIC_INLINE void
+ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)()
+ ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {}
+
+#else
+
+#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() // empty
+#define ABSL_ANNOTATE_IGNORE_READS_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define IGNORE_WRITES_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1
+
+// Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
+#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+// Stop ignoring writes.
+#define ABSL_ANNOTATE_IGNORE_WRITES_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+// Function prototypes of annotations provided by the compiler-based sanitizer
+// implementation.
+ABSL_INTERNAL_BEGIN_EXTERN_C
+void AnnotateIgnoreWritesBegin(const char *file, int line);
+void AnnotateIgnoreWritesEnd(const char *file, int line);
+ABSL_INTERNAL_END_EXTERN_C
+
+#else
+
+#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() // empty
+#define ABSL_ANNOTATE_IGNORE_WRITES_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define the ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
+// primitive annotations defined above.
+//
+// Instead of doing
+// ABSL_ANNOTATE_IGNORE_READS_BEGIN();
+// ... = x;
+// ABSL_ANNOTATE_IGNORE_READS_END();
+// one can use
+// ... = ABSL_ANNOTATE_UNPROTECTED_READ(x);
+
+#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED)
+
+// Start ignoring all memory accesses (both reads and writes).
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+ do { \
+ ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \
+ ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \
+ } while (0)
+
+// Stop ignoring both reads and writes.
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+ do { \
+ ABSL_ANNOTATE_IGNORE_WRITES_END(); \
+ ABSL_ANNOTATE_IGNORE_READS_END(); \
+ } while (0)
+
+#ifdef __cplusplus
+// ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+#define ABSL_ANNOTATE_UNPROTECTED_READ(x) \
+ absl::base_internal::AnnotateUnprotectedRead(x)
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+template
+inline T AnnotateUnprotectedRead(const volatile T &x) { // NOLINT
+ ABSL_ANNOTATE_IGNORE_READS_BEGIN();
+ T res = x;
+ ABSL_ANNOTATE_IGNORE_READS_END();
+ return res;
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+#endif
+
+#else
+
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty
+#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty
+#define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x)
+
+#endif
+
+// -------------------------------------------------------------------------
+// Address sanitizer annotations
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+// Describe the current state of a contiguous container such as e.g.
+// std::vector or std::string. For more details see
+// sanitizer/common_interface_defs.h, which is provided by the compiler.
+#include
+
+#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
+ __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
+#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \
+ struct { \
+ alignas(8) char x[8]; \
+ } name
+
+#else
+
+#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) // empty
+#define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
+
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+
+// -------------------------------------------------------------------------
+// HWAddress sanitizer annotations
+
+#ifdef __cplusplus
+namespace absl {
+#ifdef ABSL_HAVE_HWADDRESS_SANITIZER
+// Under HWASAN changes the tag of the pointer.
+template T *HwasanTagPointer(T *ptr, uintptr_t tag) {
+ return reinterpret_cast(__hwasan_tag_pointer(ptr, tag));
+}
+#else
+template T *HwasanTagPointer(T *ptr, uintptr_t) { return ptr; }
+#endif
+} // namespace absl
+#endif
+
+// -------------------------------------------------------------------------
+// Undefine the macros intended only for this file.
+
+#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_BEGIN_EXTERN_C
+#undef ABSL_INTERNAL_END_EXTERN_C
+#undef ABSL_INTERNAL_STATIC_INLINE
+
+#endif // ABSL_BASE_DYNAMIC_ANNOTATIONS_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/atomic_hook_test_helper.h b/packages/react-native-executorch/third-party/include/absl/base/internal/atomic_hook_test_helper.h
new file mode 100644
index 000000000..f5ff0fea3
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/internal/atomic_hook_test_helper.h
@@ -0,0 +1,34 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_
+#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_
+
+#include "absl/base/internal/atomic_hook.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace atomic_hook_internal {
+
+using VoidF = void (*)();
+extern absl::base_internal::AtomicHook func;
+extern int default_func_calls;
+void DefaultFunc();
+void RegisterFunc(VoidF func);
+
+} // namespace atomic_hook_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock.h b/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock.h
new file mode 100644
index 000000000..a6ca008c4
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock.h
@@ -0,0 +1,144 @@
+//
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+// -----------------------------------------------------------------------------
+// File: cycleclock.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines a `CycleClock`, which yields the value and frequency
+// of a cycle counter that increments at a rate that is approximately constant.
+//
+// NOTE:
+//
+// The cycle counter frequency is not necessarily related to the core clock
+// frequency and should not be treated as such. That is, `CycleClock` cycles are
+// not necessarily "CPU cycles" and code should not rely on that behavior, even
+// if experimentally observed.
+//
+// An arbitrary offset may have been added to the counter at power on.
+//
+// On some platforms, the rate and offset of the counter may differ
+// slightly when read from different CPUs of a multiprocessor. Usually,
+// we try to ensure that the operating system adjusts values periodically
+// so that values agree approximately. If you need stronger guarantees,
+// consider using alternate interfaces.
+//
+// The CPU is not required to maintain the ordering of a cycle counter read
+// with respect to surrounding instructions.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_
+
+#include
+#include
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/cycleclock_config.h"
+#include "absl/base/internal/unscaledcycleclock.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+using CycleClockSourceFunc = int64_t (*)();
+
+// -----------------------------------------------------------------------------
+// CycleClock
+// -----------------------------------------------------------------------------
+class CycleClock {
+public:
+ // CycleClock::Now()
+ //
+ // Returns the value of a cycle counter that counts at a rate that is
+ // approximately constant.
+ static int64_t Now();
+
+ // CycleClock::Frequency()
+ //
+ // Returns the amount by which `CycleClock::Now()` increases per second. Note
+ // that this value may not necessarily match the core CPU clock frequency.
+ static double Frequency();
+
+private:
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+ static CycleClockSourceFunc LoadCycleClockSource();
+
+ static constexpr int32_t kShift = kCycleClockShift;
+ static constexpr double kFrequencyScale = kCycleClockFrequencyScale;
+
+ ABSL_CONST_INIT static std::atomic cycle_clock_source_;
+#endif // ABSL_USE_UNSCALED_CYCLECLOC
+
+ CycleClock() = delete; // no instances
+ CycleClock(const CycleClock &) = delete;
+ CycleClock &operator=(const CycleClock &) = delete;
+
+ friend class CycleClockSource;
+};
+
+class CycleClockSource {
+private:
+ // CycleClockSource::Register()
+ //
+ // Register a function that provides an alternate source for the unscaled CPU
+ // cycle count value. The source function must be async signal safe, must not
+ // call CycleClock::Now(), and must have a frequency that matches that of the
+ // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use
+ // the default source.
+ static void Register(CycleClockSourceFunc source);
+};
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+
+inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() {
+#if !defined(__x86_64__)
+ // Optimize for the common case (no callback) by first doing a relaxed load;
+ // this is significantly faster on non-x86 platforms.
+ if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) {
+ return nullptr;
+ }
+#endif // !defined(__x86_64__)
+
+ // This corresponds to the store(std::memory_order_release) in
+ // CycleClockSource::Register, and makes sure that any updates made prior to
+ // registering the callback are visible to this thread before the callback
+ // is invoked.
+ return cycle_clock_source_.load(std::memory_order_acquire);
+}
+
+// Accessing globals in inlined code in Window DLLs is problematic.
+#ifndef _WIN32
+inline int64_t CycleClock::Now() {
+ auto fn = LoadCycleClockSource();
+ if (fn == nullptr) {
+ return base_internal::UnscaledCycleClock::Now() >> kShift;
+ }
+ return fn() >> kShift;
+}
+#endif
+
+inline double CycleClock::Frequency() {
+ return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency();
+}
+
+#endif // ABSL_USE_UNSCALED_CYCLECLOCK
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock_config.h b/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock_config.h
new file mode 100644
index 000000000..3e15b9712
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock_config.h
@@ -0,0 +1,55 @@
+// Copyright 2022 The Abseil Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_
+#define ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_
+
+#include
+
+#include "absl/base/config.h"
+#include "absl/base/internal/inline_variable.h"
+#include "absl/base/internal/unscaledcycleclock_config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+#if ABSL_USE_UNSCALED_CYCLECLOCK
+#ifdef NDEBUG
+#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY
+// Not debug mode and the UnscaledCycleClock frequency is the CPU
+// frequency. Scale the CycleClock to prevent overflow if someone
+// tries to represent the time as cycles since the Unix epoch.
+ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 1);
+#else
+// Not debug mode and the UnscaledCycleClock isn't operating at the
+// raw CPU frequency. There is no need to do any scaling, so don't
+// needlessly sacrifice precision.
+ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 0);
+#endif
+#else // NDEBUG
+// In debug mode use a different shift to discourage depending on a
+// particular shift value.
+ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 2);
+#endif // NDEBUG
+
+ABSL_INTERNAL_INLINE_CONSTEXPR(double, kCycleClockFrequencyScale,
+ 1.0 / (1 << kCycleClockShift));
+#endif // ABSL_USE_UNSCALED_CYCLECLOC
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/direct_mmap.h b/packages/react-native-executorch/third-party/include/absl/base/internal/direct_mmap.h
new file mode 100644
index 000000000..d657f11b5
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/internal/direct_mmap.h
@@ -0,0 +1,170 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Functions for directly invoking mmap() via syscall, avoiding the case where
+// mmap() has been locally overridden.
+
+#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_MMAP
+
+#include
+
+#ifdef __linux__
+
+#include
+#ifdef __BIONIC__
+#include
+#else
+#include
+#endif
+
+#include
+#include
+#include
+#include
+#include
+
+#ifdef __mips__
+// Include definitions of the ABI currently in use.
+#if defined(__BIONIC__) || !defined(__GLIBC__)
+// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the
+// definitions we need.
+#include
+#else
+#include
+#endif // __BIONIC__ || !__GLIBC__
+#endif // __mips__
+
+// SYS_mmap and SYS_munmap are not defined in Android.
+#ifdef __BIONIC__
+extern "C" void *__mmap2(void *, size_t, int, int, int, size_t);
+#if defined(__NR_mmap) && !defined(SYS_mmap)
+#define SYS_mmap __NR_mmap
+#endif
+#ifndef SYS_munmap
+#define SYS_munmap __NR_munmap
+#endif
+#endif // __BIONIC__
+
+#if defined(__NR_mmap2) && !defined(SYS_mmap2)
+#define SYS_mmap2 __NR_mmap2
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// Platform specific logic extracted from
+// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h
+inline void *DirectMmap(void *start, size_t length, int prot, int flags, int fd,
+ off_t offset) noexcept {
+#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \
+ defined(__m68k__) || defined(__sh__) || \
+ (defined(__hppa__) && !defined(__LP64__)) || \
+ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \
+ (defined(__PPC__) && !defined(__PPC64__)) || \
+ (defined(__riscv) && __riscv_xlen == 32) || \
+ (defined(__s390__) && !defined(__s390x__)) || \
+ (defined(__sparc__) && !defined(__arch64__))
+ // On these architectures, implement mmap with mmap2.
+ static int pagesize = 0;
+ if (pagesize == 0) {
+#if defined(__wasm__) || defined(__asmjs__)
+ pagesize = getpagesize();
+#else
+ pagesize = sysconf(_SC_PAGESIZE);
+#endif
+ }
+ if (offset < 0 || offset % pagesize != 0) {
+ errno = EINVAL;
+ return MAP_FAILED;
+ }
+#ifdef __BIONIC__
+ // SYS_mmap2 has problems on Android API level <= 16.
+ // Workaround by invoking __mmap2() instead.
+ return __mmap2(start, length, prot, flags, fd,
+ static_cast(offset / pagesize));
+#else
+ return reinterpret_cast(
+ syscall(SYS_mmap2, start, length, prot, flags, fd,
+ static_cast(offset / pagesize))); // NOLINT
+#endif
+#elif defined(__s390x__)
+ // On s390x, mmap() arguments are passed in memory.
+ unsigned long buf[6] = {reinterpret_cast(start), // NOLINT
+ static_cast(length), // NOLINT
+ static_cast(prot), // NOLINT
+ static_cast(flags), // NOLINT
+ static_cast(fd), // NOLINT
+ static_cast(offset)}; // NOLINT
+ return reinterpret_cast(syscall(SYS_mmap, buf));
+#elif defined(__x86_64__)
+// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit.
+// We need to explicitly cast to an unsigned 64 bit type to avoid implicit
+// sign extension. We can't cast pointers directly because those are
+// 32 bits, and gcc will dump ugly warnings about casting from a pointer
+// to an integer of a different size. We also need to make sure __off64_t
+// isn't truncated to 32-bits under x32.
+#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x))
+ return reinterpret_cast(
+ syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length),
+ MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags),
+ MMAP_SYSCALL_ARG(fd), static_cast(offset)));
+#undef MMAP_SYSCALL_ARG
+#else // Remaining 64-bit aritectures.
+ static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit");
+ return reinterpret_cast(
+ syscall(SYS_mmap, start, length, prot, flags, fd, offset));
+#endif
+}
+
+inline int DirectMunmap(void *start, size_t length) {
+ return static_cast(syscall(SYS_munmap, start, length));
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#else // !__linux__
+
+// For non-linux platforms where we have mmap, just dispatch directly to the
+// actual mmap()/munmap() methods.
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+inline void *DirectMmap(void *start, size_t length, int prot, int flags, int fd,
+ off_t offset) {
+ return mmap(start, length, prot, flags, fd, offset);
+}
+
+inline int DirectMunmap(void *start, size_t length) {
+ return munmap(start, length);
+}
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // __linux__
+
+#endif // ABSL_HAVE_MMAP
+
+#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/dynamic_annotations.h b/packages/react-native-executorch/third-party/include/absl/base/internal/dynamic_annotations.h
new file mode 100644
index 000000000..4366fc7ef
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/internal/dynamic_annotations.h
@@ -0,0 +1,398 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines dynamic annotations for use with dynamic analysis tool
+// such as valgrind, PIN, etc.
+//
+// Dynamic annotation is a source code annotation that affects the generated
+// code (that is, the annotation is not a comment). Each such annotation is
+// attached to a particular instruction and/or to a particular object (address)
+// in the program.
+//
+// The annotations that should be used by users are macros in all upper-case
+// (e.g., ANNOTATE_THREAD_NAME).
+//
+// Actual implementation of these macros may differ depending on the dynamic
+// analysis tool being used.
+//
+// This file supports the following configurations:
+// - Dynamic Annotations enabled (with static thread-safety warnings disabled).
+// In this case, macros expand to functions implemented by Thread Sanitizer,
+// when building with TSan. When not provided an external implementation,
+// dynamic_annotations.cc provides no-op implementations.
+//
+// - Static Clang thread-safety warnings enabled.
+// When building with a Clang compiler that supports thread-safety warnings,
+// a subset of annotations can be statically-checked at compile-time. We
+// expand these macros to static-inline functions that can be analyzed for
+// thread-safety, but afterwards elided when building the final binary.
+//
+// - All annotations are disabled.
+// If neither Dynamic Annotations nor Clang thread-safety warnings are
+// enabled, then all annotation-macros expand to empty.
+
+#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
+
+#include
+
+#include "absl/base/config.h"
+
+// -------------------------------------------------------------------------
+// Decide which features are enabled
+
+#ifndef DYNAMIC_ANNOTATIONS_ENABLED
+#define DYNAMIC_ANNOTATIONS_ENABLED 0
+#endif
+
+#if defined(__clang__) && !defined(SWIG)
+#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1
+#endif
+
+#if DYNAMIC_ANNOTATIONS_ENABLED != 0
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1
+
+#else
+
+#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0
+#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0
+
+// Clang provides limited support for static thread-safety analysis through a
+// feature called Annotalysis. We configure macro-definitions according to
+// whether Annotalysis support is available. When running in opt-mode, GCC
+// will issue a warning, if these attributes are compiled. Only include them
+// when compiling using Clang.
+
+// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1
+#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \
+ defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+// Read/write annotations are enabled in Annotalysis mode; disabled otherwise.
+#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \
+ ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#endif
+
+// Memory annotations are also made available to LLVM's Memory Sanitizer
+#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__)
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1
+#endif
+
+#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0
+#endif
+
+#ifdef __cplusplus
+#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" {
+#define ABSL_INTERNAL_END_EXTERN_C } // extern "C"
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F
+#define ABSL_INTERNAL_STATIC_INLINE inline
+#else
+#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty
+#define ABSL_INTERNAL_END_EXTERN_C // empty
+#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F
+#define ABSL_INTERNAL_STATIC_INLINE static inline
+#endif
+
+// -------------------------------------------------------------------------
+// Define race annotations.
+
+#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1
+
+// -------------------------------------------------------------
+// Annotations that suppress errors. It is usually better to express the
+// program's synchronization using the other annotations, but these can be used
+// when all else fails.
+
+// Report that we may have a benign race at `pointer`, with size
+// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the
+// point where `pointer` has been allocated, preferably close to the point
+// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC.
+#define ANNOTATE_BENIGN_RACE(pointer, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description)
+
+// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to
+// the memory range [`address`, `address`+`size`).
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \
+ (__FILE__, __LINE__, address, size, description)
+
+// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads.
+// This annotation could be useful if you want to skip expensive race analysis
+// during some period of program execution, e.g. during initialization.
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \
+ (__FILE__, __LINE__, enable)
+
+// -------------------------------------------------------------
+// Annotations useful for debugging.
+
+// Report the current thread `name` to a race detector.
+#define ANNOTATE_THREAD_NAME(name) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name)
+
+// -------------------------------------------------------------
+// Annotations useful when implementing locks. They are not normally needed by
+// modules that merely use locks. The `lock` argument is a pointer to the lock
+// object.
+
+// Report that a lock has been created at address `lock`.
+#define ANNOTATE_RWLOCK_CREATE(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock)
+
+// Report that a linker initialized lock has been created at address `lock`.
+#ifdef ABSL_HAVE_THREAD_SANITIZER
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \
+ (__FILE__, __LINE__, lock)
+#else
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock)
+#endif
+
+// Report that the lock at address `lock` is about to be destroyed.
+#define ANNOTATE_RWLOCK_DESTROY(lock) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock)
+
+// Report that the lock at address `lock` has been acquired.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Report that the lock at address `lock` is about to be released.
+// `is_w`=1 for writer lock, `is_w`=0 for reader lock.
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \
+ (__FILE__, __LINE__, lock, is_w)
+
+// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`.
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \
+ namespace { \
+ class static_var##_annotator { \
+ public: \
+ static_var##_annotator() { \
+ ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \
+ #static_var ": " description); \
+ } \
+ }; \
+ static static_var##_annotator the##static_var##_annotator; \
+ } // namespace
+
+#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0
+
+#define ANNOTATE_RWLOCK_CREATE(lock) // empty
+#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty
+#define ANNOTATE_RWLOCK_DESTROY(lock) // empty
+#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty
+#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty
+#define ANNOTATE_BENIGN_RACE(address, description) // empty
+#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty
+#define ANNOTATE_THREAD_NAME(name) // empty
+#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty
+#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty
+
+#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define memory annotations.
+
+#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1
+
+#include
+
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ __msan_unpoison(address, size)
+
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ __msan_allocated_memory(address, size)
+
+#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0
+
+#if DYNAMIC_ANNOTATIONS_ENABLED == 1
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \
+ do { \
+ (void)(address); \
+ (void)(size); \
+ } while (0)
+#else
+#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty
+#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty
+#endif
+
+#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END attributes.
+
+#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \
+ __attribute((exclusive_lock_function("*")))
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \
+ __attribute((unlock_function("*")))
+
+#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty
+#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty
+
+#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED)
+
+// -------------------------------------------------------------------------
+// Define IGNORE_READS_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1
+
+// Request the analysis tool to ignore all reads in the current thread until
+// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey
+// reads, while still checking other reads and all writes.
+// See also ANNOTATE_UNPROTECTED_READ.
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__)
+
+// Stop ignoring reads.
+#define ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__)
+
+#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED)
+
+// When Annotalysis is enabled without Dynamic Annotations, the use of
+// static-inline functions allows the annotations to be read at compile-time,
+// while still letting the compiler elide the functions from the final build.
+//
+// TODO(delesley) -- The exclusive lock here ignores writes as well, but
+// allows IGNORE_READS_AND_WRITES to work properly.
+
+#define ANNOTATE_IGNORE_READS_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)()
+
+#define ANNOTATE_IGNORE_READS_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)()
+
+#else
+
+#define ANNOTATE_IGNORE_READS_BEGIN() // empty
+#define ANNOTATE_IGNORE_READS_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define IGNORE_WRITES_BEGIN/_END annotations.
+
+#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1
+
+// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead.
+#define ANNOTATE_IGNORE_WRITES_BEGIN() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__)
+
+// Stop ignoring writes.
+#define ANNOTATE_IGNORE_WRITES_END() \
+ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__)
+
+#else
+
+#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty
+#define ANNOTATE_IGNORE_WRITES_END() // empty
+
+#endif
+
+// -------------------------------------------------------------------------
+// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more
+// primitive annotations defined above.
+//
+// Instead of doing
+// ANNOTATE_IGNORE_READS_BEGIN();
+// ... = x;
+// ANNOTATE_IGNORE_READS_END();
+// one can use
+// ... = ANNOTATE_UNPROTECTED_READ(x);
+
+#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED)
+
+// Start ignoring all memory accesses (both reads and writes).
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \
+ do { \
+ ANNOTATE_IGNORE_READS_BEGIN(); \
+ ANNOTATE_IGNORE_WRITES_BEGIN(); \
+ } while (0)
+
+// Stop ignoring both reads and writes.
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \
+ do { \
+ ANNOTATE_IGNORE_WRITES_END(); \
+ ANNOTATE_IGNORE_READS_END(); \
+ } while (0)
+
+#ifdef __cplusplus
+// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads.
+#define ANNOTATE_UNPROTECTED_READ(x) \
+ absl::base_internal::AnnotateUnprotectedRead(x)
+
+#endif
+
+#else
+
+#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty
+#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty
+#define ANNOTATE_UNPROTECTED_READ(x) (x)
+
+#endif
+
+// -------------------------------------------------------------------------
+// Address sanitizer annotations
+
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+// Describe the current state of a contiguous container such as e.g.
+// std::vector or std::string. For more details see
+// sanitizer/common_interface_defs.h, which is provided by the compiler.
+#include
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \
+ __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name) \
+ struct { \
+ char x[8] __attribute__((aligned(8))); \
+ } name
+
+#else
+
+#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid)
+#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "")
+
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+
+// -------------------------------------------------------------------------
+// Undefine the macros intended only for this file.
+
+#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED
+#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED
+#undef ABSL_INTERNAL_BEGIN_EXTERN_C
+#undef ABSL_INTERNAL_END_EXTERN_C
+#undef ABSL_INTERNAL_STATIC_INLINE
+
+#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/endian.h b/packages/react-native-executorch/third-party/include/absl/base/internal/endian.h
new file mode 100644
index 000000000..4b630e76e
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/internal/endian.h
@@ -0,0 +1,282 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_
+#define ABSL_BASE_INTERNAL_ENDIAN_H_
+
+#include
+#include
+
+#include "absl/base/casts.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/nullability.h"
+#include "absl/base/port.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+inline uint64_t gbswap_64(uint64_t host_int) {
+#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__)
+ return __builtin_bswap64(host_int);
+#elif defined(_MSC_VER)
+ return _byteswap_uint64(host_int);
+#else
+ return (((host_int & uint64_t{0xFF}) << 56) |
+ ((host_int & uint64_t{0xFF00}) << 40) |
+ ((host_int & uint64_t{0xFF0000}) << 24) |
+ ((host_int & uint64_t{0xFF000000}) << 8) |
+ ((host_int & uint64_t{0xFF00000000}) >> 8) |
+ ((host_int & uint64_t{0xFF0000000000}) >> 24) |
+ ((host_int & uint64_t{0xFF000000000000}) >> 40) |
+ ((host_int & uint64_t{0xFF00000000000000}) >> 56));
+#endif
+}
+
+inline uint32_t gbswap_32(uint32_t host_int) {
+#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__)
+ return __builtin_bswap32(host_int);
+#elif defined(_MSC_VER)
+ return _byteswap_ulong(host_int);
+#else
+ return (((host_int & uint32_t{0xFF}) << 24) |
+ ((host_int & uint32_t{0xFF00}) << 8) |
+ ((host_int & uint32_t{0xFF0000}) >> 8) |
+ ((host_int & uint32_t{0xFF000000}) >> 24));
+#endif
+}
+
+inline uint16_t gbswap_16(uint16_t host_int) {
+#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__)
+ return __builtin_bswap16(host_int);
+#elif defined(_MSC_VER)
+ return _byteswap_ushort(host_int);
+#else
+ return (((host_int & uint16_t{0xFF}) << 8) |
+ ((host_int & uint16_t{0xFF00}) >> 8));
+#endif
+}
+
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+// Portable definitions for htonl (host-to-network) and friends on little-endian
+// architectures.
+inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); }
+inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); }
+inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+// Portable definitions for htonl (host-to-network) etc on big-endian
+// architectures. These definitions are simpler since the host byte order is the
+// same as network byte order.
+inline uint16_t ghtons(uint16_t x) { return x; }
+inline uint32_t ghtonl(uint32_t x) { return x; }
+inline uint64_t ghtonll(uint64_t x) { return x; }
+
+#else
+#error "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \
+ "ABSL_IS_LITTLE_ENDIAN must be defined"
+#endif // byte order
+
+inline uint16_t gntohs(uint16_t x) { return ghtons(x); }
+inline uint32_t gntohl(uint32_t x) { return ghtonl(x); }
+inline uint64_t gntohll(uint64_t x) { return ghtonll(x); }
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and little-endian byte order
+//
+// Load/Store methods are alignment safe
+namespace little_endian {
+// Conversion functions.
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast(FromHost16(bit_cast(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast(FromHost32(bit_cast(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast(FromHost64(bit_cast(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast(ToHost16(bit_cast(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast(ToHost32(bit_cast(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast(ToHost64(bit_cast(x)));
+}
+
+// Functions to do unaligned loads and stores in little-endian order.
+inline uint16_t Load16(absl::Nonnull p) {
+ return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(absl::Nonnull p, uint16_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(absl::Nonnull p) {
+ return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(absl::Nonnull p, uint32_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(absl::Nonnull p) {
+ return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(absl::Nonnull p, uint64_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+} // namespace little_endian
+
+// Utilities to convert numbers between the current hosts's native byte
+// order and big-endian byte order (same as network byte order)
+//
+// Load/Store methods are alignment safe
+namespace big_endian {
+#ifdef ABSL_IS_LITTLE_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); }
+inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); }
+
+inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); }
+inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); }
+
+inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); }
+inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); }
+
+inline constexpr bool IsLittleEndian() { return true; }
+
+#elif defined ABSL_IS_BIG_ENDIAN
+
+inline uint16_t FromHost16(uint16_t x) { return x; }
+inline uint16_t ToHost16(uint16_t x) { return x; }
+
+inline uint32_t FromHost32(uint32_t x) { return x; }
+inline uint32_t ToHost32(uint32_t x) { return x; }
+
+inline uint64_t FromHost64(uint64_t x) { return x; }
+inline uint64_t ToHost64(uint64_t x) { return x; }
+
+inline constexpr bool IsLittleEndian() { return false; }
+
+#endif /* ENDIAN */
+
+inline uint8_t FromHost(uint8_t x) { return x; }
+inline uint16_t FromHost(uint16_t x) { return FromHost16(x); }
+inline uint32_t FromHost(uint32_t x) { return FromHost32(x); }
+inline uint64_t FromHost(uint64_t x) { return FromHost64(x); }
+inline uint8_t ToHost(uint8_t x) { return x; }
+inline uint16_t ToHost(uint16_t x) { return ToHost16(x); }
+inline uint32_t ToHost(uint32_t x) { return ToHost32(x); }
+inline uint64_t ToHost(uint64_t x) { return ToHost64(x); }
+
+inline int8_t FromHost(int8_t x) { return x; }
+inline int16_t FromHost(int16_t x) {
+ return bit_cast(FromHost16(bit_cast(x)));
+}
+inline int32_t FromHost(int32_t x) {
+ return bit_cast(FromHost32(bit_cast(x)));
+}
+inline int64_t FromHost(int64_t x) {
+ return bit_cast(FromHost64(bit_cast(x)));
+}
+inline int8_t ToHost(int8_t x) { return x; }
+inline int16_t ToHost(int16_t x) {
+ return bit_cast(ToHost16(bit_cast(x)));
+}
+inline int32_t ToHost(int32_t x) {
+ return bit_cast(ToHost32(bit_cast(x)));
+}
+inline int64_t ToHost(int64_t x) {
+ return bit_cast(ToHost64(bit_cast(x)));
+}
+
+// Functions to do unaligned loads and stores in big-endian order.
+inline uint16_t Load16(absl::Nonnull p) {
+ return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p));
+}
+
+inline void Store16(absl::Nonnull p, uint16_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v));
+}
+
+inline uint32_t Load32(absl::Nonnull p) {
+ return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p));
+}
+
+inline void Store32(absl::Nonnull p, uint32_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v));
+}
+
+inline uint64_t Load64(absl::Nonnull p) {
+ return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p));
+}
+
+inline void Store64(absl::Nonnull p, uint64_t v) {
+ ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v));
+}
+
+} // namespace big_endian
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_ENDIAN_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/errno_saver.h b/packages/react-native-executorch/third-party/include/absl/base/internal/errno_saver.h
new file mode 100644
index 000000000..6adacc523
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/internal/errno_saver.h
@@ -0,0 +1,43 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
+#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
+
+#include
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace base_internal {
+
+// `ErrnoSaver` captures the value of `errno` upon construction and restores it
+// upon deletion. It is used in low-level code and must be super fast. Do not
+// add instrumentation, even in debug modes.
+class ErrnoSaver {
+public:
+ ErrnoSaver() : saved_errno_(errno) {}
+ ~ErrnoSaver() { errno = saved_errno_; }
+ int operator()() const { return saved_errno_; }
+
+private:
+ const int saved_errno_;
+};
+
+} // namespace base_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_
diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/exception_safety_testing.h b/packages/react-native-executorch/third-party/include/absl/base/internal/exception_safety_testing.h
new file mode 100644
index 000000000..ff15dd3a4
--- /dev/null
+++ b/packages/react-native-executorch/third-party/include/absl/base/internal/exception_safety_testing.h
@@ -0,0 +1,1108 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Utilities for testing exception-safety
+
+#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_
+
+#include "absl/base/config.h"
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "absl/base/internal/pretty_function.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/substitute.h"
+#include "absl/utility/utility.h"
+#include "gtest/gtest.h"
+
+namespace testing {
+
+enum class TypeSpec;
+enum class AllocSpec;
+
+constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) {
+ using T = absl::underlying_type_t;
+ return static_cast(static_cast(a) | static_cast(b));
+}
+
+constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) {
+ using T = absl::underlying_type_t;
+ return static_cast(static_cast(a) & static_cast(b));
+}
+
+constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) {
+ using T = absl::underlying_type_t;
+ return static_cast(static_cast(a) | static_cast(b));
+}
+
+constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) {
+ using T = absl::underlying_type_t;
+ return static_cast(static_cast(a) & static_cast(b));
+}
+
+namespace exceptions_internal {
+
+std::string GetSpecString(TypeSpec);
+std::string GetSpecString(AllocSpec);
+
+struct NoThrowTag {};
+struct StrongGuaranteeTagType {};
+
+// A simple exception class. We throw this so that test code can catch
+// exceptions specifically thrown by ThrowingValue.
+class TestException {
+public:
+ explicit TestException(absl::string_view msg) : msg_(msg) {}
+ virtual ~TestException() {}
+ virtual const char *what() const noexcept { return msg_.c_str(); }
+
+private:
+ std::string msg_;
+};
+
+// TestBadAllocException exists because allocation functions must throw an
+// exception which can be caught by a handler of std::bad_alloc. We use a child
+// class of std::bad_alloc so we can customise the error message, and also
+// derive from TestException so we don't accidentally end up catching an actual
+// bad_alloc exception in TestExceptionSafety.
+class TestBadAllocException : public std::bad_alloc, public TestException {
+public:
+ explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {}
+ using TestException::what;
+};
+
+extern int countdown;
+
+// Allows the countdown variable to be set manually (defaulting to the initial
+// value of 0)
+inline void SetCountdown(int i = 0) { countdown = i; }
+// Sets the countdown to the terminal value -1
+inline void UnsetCountdown() { SetCountdown(-1); }
+
+void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false);
+
+testing::AssertionResult FailureMessage(const TestException &e,
+ int countdown) noexcept;
+
+struct TrackedAddress {
+ bool is_alive;
+ std::string description;
+};
+
+// Inspects the constructions and destructions of anything inheriting from
+// TrackedObject. This allows us to safely "leak" TrackedObjects, as
+// ConstructorTracker will destroy everything left over in its destructor.
+class ConstructorTracker {
+public:
+ explicit ConstructorTracker(int count) : countdown_(count) {
+ assert(current_tracker_instance_ == nullptr);
+ current_tracker_instance_ = this;
+ }
+
+ ~ConstructorTracker() {
+ assert(current_tracker_instance_ == this);
+ current_tracker_instance_ = nullptr;
+
+ for (auto &it : address_map_) {
+ void *address = it.first;
+ TrackedAddress &tracked_address = it.second;
+ if (tracked_address.is_alive) {
+ ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+ countdown_, "Object was not destroyed.");
+ }
+ }
+ }
+
+ static void ObjectConstructed(void *address, std::string description) {
+ if (!CurrentlyTracking())
+ return;
+
+ TrackedAddress &tracked_address =
+ current_tracker_instance_->address_map_[address];
+ if (tracked_address.is_alive) {
+ ADD_FAILURE() << ErrorMessage(
+ address, tracked_address.description,
+ current_tracker_instance_->countdown_,
+ "Object was re-constructed. Current object was constructed by " +
+ description);
+ }
+ tracked_address = {true, std::move(description)};
+ }
+
+ static void ObjectDestructed(void *address) {
+ if (!CurrentlyTracking())
+ return;
+
+ auto it = current_tracker_instance_->address_map_.find(address);
+ // Not tracked. Ignore.
+ if (it == current_tracker_instance_->address_map_.end())
+ return;
+
+ TrackedAddress &tracked_address = it->second;
+ if (!tracked_address.is_alive) {
+ ADD_FAILURE() << ErrorMessage(address, tracked_address.description,
+ current_tracker_instance_->countdown_,
+ "Object was re-destroyed.");
+ }
+ tracked_address.is_alive = false;
+ }
+
+private:
+ static bool CurrentlyTracking() {
+ return current_tracker_instance_ != nullptr;
+ }
+
+ static std::string ErrorMessage(void *address,
+ const std::string &address_description,
+ int countdown,
+ const std::string &error_description) {
+ return absl::Substitute("With coundtown at $0:\n"
+ " $1\n"
+ " Object originally constructed by $2\n"
+ " Object address: $3\n",
+ countdown, error_description, address_description,
+ address);
+ }
+
+ std::unordered_map address_map_;
+ int countdown_;
+
+ static ConstructorTracker *current_tracker_instance_;
+};
+
+class TrackedObject {
+public:
+ TrackedObject(const TrackedObject &) = delete;
+ TrackedObject(TrackedObject &&) = delete;
+
+protected:
+ explicit TrackedObject(std::string description) {
+ ConstructorTracker::ObjectConstructed(this, std::move(description));
+ }
+
+ ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); }
+};
+} // namespace exceptions_internal
+
+extern exceptions_internal::NoThrowTag nothrow_ctor;
+
+extern exceptions_internal::StrongGuaranteeTagType strong_guarantee;
+
+// A test class which is convertible to bool. The conversion can be
+// instrumented to throw at a controlled time.
+class ThrowingBool {
+public:
+ ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit)
+ operator bool() const { // NOLINT
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return b_;
+ }
+
+private:
+ bool b_;
+};
+
+/*
+ * Configuration enum for the ThrowingValue type that defines behavior for the
+ * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer
+ * constructor from throwing.
+ *
+ * kEverythingThrows: Every operation can throw an exception
+ * kNoThrowCopy: Copy construction and copy assignment will not throw
+ * kNoThrowMove: Move construction and move assignment will not throw
+ * kNoThrowNew: Overloaded operators new and new[] will not throw
+ */
+enum class TypeSpec {
+ kEverythingThrows = 0,
+ kNoThrowCopy = 1,
+ kNoThrowMove = 1 << 1,
+ kNoThrowNew = 1 << 2,
+};
+
+/*
+ * A testing class instrumented to throw an exception at a controlled time.
+ *
+ * ThrowingValue implements a slightly relaxed version of the Regular concept --
+ * that is it's a value type with the expected semantics. It also implements
+ * arithmetic operations. It doesn't implement member and pointer operators
+ * like operator-> or operator[].
+ *
+ * ThrowingValue can be instrumented to have certain operations be noexcept by
+ * using compile-time bitfield template arguments. That is, to make an
+ * ThrowingValue which has noexcept move construction/assignment and noexcept
+ * copy construction/assignment, use the following:
+ * ThrowingValue my_thrwr{val};
+ */
+template
+class ThrowingValue : private exceptions_internal::TrackedObject {
+ static constexpr bool IsSpecified(TypeSpec spec) {
+ return static_cast(Spec & spec);
+ }
+
+ static constexpr int kDefaultValue = 0;
+ static constexpr int kBadValue = 938550620;
+
+public:
+ ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = kDefaultValue;
+ }
+
+ ThrowingValue(const ThrowingValue &other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowCopy))
+ : TrackedObject(GetInstanceString(other.dummy_)) {
+ if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ }
+
+ ThrowingValue(ThrowingValue &&other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowMove))
+ : TrackedObject(GetInstanceString(other.dummy_)) {
+ if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ }
+
+ explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = i;
+ }
+
+ ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept
+ : TrackedObject(GetInstanceString(i)), dummy_(i) {}
+
+ // absl expects nothrow destructors
+ ~ThrowingValue() noexcept = default;
+
+ ThrowingValue &operator=(const ThrowingValue &other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowCopy)) {
+ dummy_ = kBadValue;
+ if (!IsSpecified(TypeSpec::kNoThrowCopy)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue &operator=(ThrowingValue &&other) noexcept(
+ IsSpecified(TypeSpec::kNoThrowMove)) {
+ dummy_ = kBadValue;
+ if (!IsSpecified(TypeSpec::kNoThrowMove)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ }
+ dummy_ = other.dummy_;
+ return *this;
+ }
+
+ // Arithmetic Operators
+ ThrowingValue operator+(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator+() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator-(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator-() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(-dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue &operator++() {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ ++dummy_;
+ return *this;
+ }
+
+ ThrowingValue operator++(int) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ auto out = ThrowingValue(dummy_, nothrow_ctor);
+ ++dummy_;
+ return out;
+ }
+
+ ThrowingValue &operator--() {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ --dummy_;
+ return *this;
+ }
+
+ ThrowingValue operator--(int) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ auto out = ThrowingValue(dummy_, nothrow_ctor);
+ --dummy_;
+ return out;
+ }
+
+ ThrowingValue operator*(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator/(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator%(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator<<(int shift) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ << shift, nothrow_ctor);
+ }
+
+ ThrowingValue operator>>(int shift) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ >> shift, nothrow_ctor);
+ }
+
+ // Comparison Operators
+ // NOTE: We use `ThrowingBool` instead of `bool` because most STL
+ // types/containers requires T to be convertible to bool.
+ friend ThrowingBool operator==(const ThrowingValue &a,
+ const ThrowingValue &b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ == b.dummy_;
+ }
+ friend ThrowingBool operator!=(const ThrowingValue &a,
+ const ThrowingValue &b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ != b.dummy_;
+ }
+ friend ThrowingBool operator<(const ThrowingValue &a,
+ const ThrowingValue &b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ < b.dummy_;
+ }
+ friend ThrowingBool operator<=(const ThrowingValue &a,
+ const ThrowingValue &b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ <= b.dummy_;
+ }
+ friend ThrowingBool operator>(const ThrowingValue &a,
+ const ThrowingValue &b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ > b.dummy_;
+ }
+ friend ThrowingBool operator>=(const ThrowingValue &a,
+ const ThrowingValue &b) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return a.dummy_ >= b.dummy_;
+ }
+
+ // Logical Operators
+ ThrowingBool operator!() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return !dummy_;
+ }
+
+ ThrowingBool operator&&(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return dummy_ && other.dummy_;
+ }
+
+ ThrowingBool operator||(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return dummy_ || other.dummy_;
+ }
+
+ // Bitwise Logical Operators
+ ThrowingValue operator~() const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(~dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator&(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator|(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor);
+ }
+
+ ThrowingValue operator^(const ThrowingValue &other) const {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor);
+ }
+
+ // Compound Assignment operators
+ ThrowingValue &operator+=(const ThrowingValue &other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ += other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue &operator-=(const ThrowingValue &other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ -= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue &operator*=(const ThrowingValue &other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ *= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue &operator/=(const ThrowingValue &other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ /= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue &operator%=(const ThrowingValue &other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ %= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue &operator&=(const ThrowingValue &other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ &= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue &operator|=(const ThrowingValue &other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ |= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue &operator^=(const ThrowingValue &other) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ ^= other.dummy_;
+ return *this;
+ }
+
+ ThrowingValue &operator<<=(int shift) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ <<= shift;
+ return *this;
+ }
+
+ ThrowingValue &operator>>=(int shift) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ >>= shift;
+ return *this;
+ }
+
+ // Pointer operators
+ void operator&() const = delete; // NOLINT(runtime/operator)
+
+ // Stream operators
+ friend std::ostream &operator<<(std::ostream &os, const ThrowingValue &tv) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return os << GetInstanceString(tv.dummy_);
+ }
+
+ friend std::istream &operator>>(std::istream &is, const ThrowingValue &) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ return is;
+ }
+
+ // Memory management operators
+ static void *
+ operator new(size_t s) noexcept(IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new(s);
+ }
+
+ static void *
+ operator new[](size_t s) noexcept(IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new[](s);
+ }
+
+ template
+ static void *
+ operator new(size_t s,
+ Args &&...args) noexcept(IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new(s, std::forward(args)...);
+ }
+
+ template
+ static void *
+ operator new[](size_t s,
+ Args &&...args) noexcept(IsSpecified(TypeSpec::kNoThrowNew)) {
+ if (!IsSpecified(TypeSpec::kNoThrowNew)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true);
+ }
+ return ::operator new[](s, std::forward(args)...);
+ }
+
+ // Abseil doesn't support throwing overloaded operator delete. These are
+ // provided so a throwing operator-new can clean up after itself.
+ void operator delete(void *p) noexcept { ::operator delete(p); }
+
+ template
+ void operator delete(void *p, Args &&...args) noexcept {
+ ::operator delete(p, std::forward(args)...);
+ }
+
+ void operator delete[](void *p) noexcept { return ::operator delete[](p); }
+
+ template
+ void operator delete[](void *p, Args &&...args) noexcept {
+ return ::operator delete[](p, std::forward(args)...);
+ }
+
+ // Non-standard access to the actual contained value. No need for this to
+ // throw.
+ int &Get() noexcept { return dummy_; }
+ const int &Get() const noexcept { return dummy_; }
+
+private:
+ static std::string GetInstanceString(int dummy) {
+ return absl::StrCat("ThrowingValue<",
+ exceptions_internal::GetSpecString(Spec), ">(", dummy,
+ ")");
+ }
+
+ int dummy_;
+};
+// While not having to do with exceptions, explicitly delete comma operator, to
+// make sure we don't use it on user-supplied types.
+template
+void operator,(const ThrowingValue &, T &&) = delete;
+template
+void operator,(T &&, const ThrowingValue &) = delete;
+
+/*
+ * Configuration enum for the ThrowingAllocator type that defines behavior for
+ * the lifetime of the instance.
+ *
+ * kEverythingThrows: Calls to the member functions may throw
+ * kNoThrowAllocate: Calls to the member functions will not throw
+ */
+enum class AllocSpec {
+ kEverythingThrows = 0,
+ kNoThrowAllocate = 1,
+};
+
+/*
+ * An allocator type which is instrumented to throw at a controlled time, or not
+ * to throw, using AllocSpec. The supported settings are the default of every
+ * function which is allowed to throw in a conforming allocator possibly
+ * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS
+ * configuration macro.
+ */
+template
+class ThrowingAllocator : private exceptions_internal::TrackedObject {
+ static constexpr bool IsSpecified(AllocSpec spec) {
+ return static_cast(Spec & spec);
+ }
+
+public:
+ using pointer = T *;
+ using const_pointer = const T *;
+ using reference = T &;
+ using const_reference = const T &;
+ using void_pointer = void *;
+ using const_void_pointer = const void *;
+ using value_type = T;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+
+ using is_nothrow =
+ std::integral_constant;
+ using propagate_on_container_copy_assignment = std::true_type;
+ using propagate_on_container_move_assignment = std::true_type;
+ using propagate_on_container_swap = std::true_type;
+ using is_always_equal = std::false_type;
+
+ ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) {
+ exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION);
+ dummy_ = std::make_shared(next_id_++);
+ }
+
+ template
+ ThrowingAllocator(const ThrowingAllocator &other) noexcept // NOLINT
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(other.State()) {}
+
+ // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of
+ // allocator shall not exit via an exception, thus they are marked noexcept.
+ ThrowingAllocator(const ThrowingAllocator &other) noexcept
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(other.State()) {}
+
+ template
+ ThrowingAllocator(ThrowingAllocator &&other) noexcept // NOLINT
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(std::move(other.State())) {}
+
+ ThrowingAllocator(ThrowingAllocator &&other) noexcept
+ : TrackedObject(GetInstanceString(*other.State())),
+ dummy_(std::move(other.State())) {}
+
+ ~ThrowingAllocator() noexcept = default;
+
+ ThrowingAllocator &operator=(const ThrowingAllocator &other) noexcept {
+ dummy_ = other.State();
+ return *this;
+ }
+
+ template
+ ThrowingAllocator &
+ operator=(const ThrowingAllocator &other) noexcept {
+ dummy_ = other.State();
+ return *this;
+ }
+
+ template
+ ThrowingAllocator &operator=(ThrowingAllocator &&other) noexcept {
+ dummy_ = std::move(other.State());
+ return *this;
+ }
+
+ template struct rebind {
+ using other = ThrowingAllocator;
+ };
+
+ pointer
+ allocate(size_type n) noexcept(IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ return static_cast(::operator new(n * sizeof(T)));
+ }
+
+ pointer allocate(size_type n, const_void_pointer) noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ return allocate(n);
+ }
+
+ void deallocate(pointer ptr, size_type) noexcept {
+ ReadState();
+ ::operator delete(static_cast(ptr));
+ }
+
+ template
+ void
+ construct(U *ptr,
+ Args &&...args) noexcept(IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ ::new (static_cast(ptr)) U(std::forward(args)...);
+ }
+
+ template void destroy(U *p) noexcept {
+ ReadState();
+ p->~U();
+ }
+
+ size_type max_size() const noexcept {
+ return (std::numeric_limits::max)() / sizeof(value_type);
+ }
+
+ ThrowingAllocator select_on_container_copy_construction() noexcept(
+ IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION);
+ return *this;
+ }
+
+ template
+ bool operator==(const ThrowingAllocator &other) const noexcept {
+ return dummy_ == other.dummy_;
+ }
+
+ template
+ bool operator!=(const ThrowingAllocator &other) const noexcept {
+ return dummy_ != other.dummy_;
+ }
+
+ template friend class ThrowingAllocator;
+
+private:
+ static std::string GetInstanceString(int dummy) {
+ return absl::StrCat("ThrowingAllocator<",
+ exceptions_internal::GetSpecString(Spec), ">(", dummy,
+ ")");
+ }
+
+ const std::shared_ptr &State() const { return dummy_; }
+ std::shared_ptr &State() { return dummy_; }
+
+ void ReadState() {
+ // we know that this will never be true, but the compiler doesn't, so this
+ // should safely force a read of the value.
+ if (*dummy_ < 0)
+ std::abort();
+ }
+
+ void ReadStateAndMaybeThrow(absl::string_view msg) const {
+ if (!IsSpecified(AllocSpec::kNoThrowAllocate)) {
+ exceptions_internal::MaybeThrow(
+ absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg));
+ }
+ }
+
+ static int next_id_;
+ std::shared_ptr dummy_;
+};
+
+template
+int ThrowingAllocator::next_id_ = 0;
+
+// Tests for resource leaks by attempting to construct a T using args repeatedly
+// until successful, using the countdown method. Side effects can then be
+// tested for resource leaks.
+template void TestThrowingCtor(Args &&...args) {
+ struct Cleanup {
+ ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+ } c;
+ for (int count = 0;; ++count) {
+ exceptions_internal::ConstructorTracker ct(count);
+ exceptions_internal::SetCountdown(count);
+ try {
+ T temp(std::forward(args)...);
+ static_cast(temp);
+ break;
+ } catch (const exceptions_internal::TestException &) {
+ }
+ }
+}
+
+// Tests the nothrow guarantee of the provided nullary operation. If the an
+// exception is thrown, the result will be AssertionFailure(). Otherwise, it
+// will be AssertionSuccess().
+template
+testing::AssertionResult TestNothrowOp(const Operation &operation) {
+ struct Cleanup {
+ Cleanup() { exceptions_internal::SetCountdown(); }
+ ~Cleanup() { exceptions_internal::UnsetCountdown(); }
+ } c;
+ try {
+ operation();
+ return testing::AssertionSuccess();
+ } catch (const exceptions_internal::TestException &) {
+ return testing::AssertionFailure()
+ << "TestException thrown during call to operation() when nothrow "
+ "guarantee was expected.";
+ } catch (...) {
+ return testing::AssertionFailure()
+ << "Unknown exception thrown during call to operation() when "
+ "nothrow guarantee was expected.";
+ }
+}
+
+namespace exceptions_internal {
+
+// Dummy struct for ExceptionSafetyTestBuilder<> partial state.
+struct UninitializedT {};
+
+template class DefaultFactory {
+public:
+ explicit DefaultFactory(const T &t) : t_(t) {}
+ std::unique_ptr operator()() const { return absl::make_unique(t_); }
+
+private:
+ T t_;
+};
+
+template
+using EnableIfTestable = typename absl::enable_if_t<
+ LazyContractsCount != 0 &&
+ !std::is_same::value &&
+ !std::is_same::value>;
+
+template
+class ExceptionSafetyTestBuilder;
+
+} // namespace exceptions_internal
+
+/*
+ * Constructs an empty ExceptionSafetyTestBuilder. All
+ * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation
+ * methods return new instances of ExceptionSafetyTestBuilder.
+ *
+ * In order to test a T for exception safety, a factory for that T, a testable
+ * operation, and at least one contract callback returning an assertion
+ * result must be applied using the respective methods.
+ */
+exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester();
+
+namespace exceptions_internal {
+template struct IsUniquePtr : std::false_type {};
+
+template
+struct IsUniquePtr> : std::true_type {};
+
+template struct FactoryPtrTypeHelper {
+ using type = decltype(std::declval()());
+
+ static_assert(IsUniquePtr::value, "Factories must return a unique_ptr");
+};
+
+template
+using FactoryPtrType = typename FactoryPtrTypeHelper::type;
+
+template
+using FactoryElementType = typename FactoryPtrType::element_type;
+
+template class ExceptionSafetyTest {
+ using Factory = std::function()>;
+ using Operation = std::function;
+ using Contract = std::function;
+
+public:
+ template
+ explicit ExceptionSafetyTest(const Factory &f, const Operation &op,
+ const Contracts &...contracts)
+ : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {}
+
+ AssertionResult Test() const {
+ for (int count = 0;; ++count) {
+ exceptions_internal::ConstructorTracker ct(count);
+
+ for (const auto &contract : contracts_) {
+ auto t_ptr = factory_();
+ try {
+ SetCountdown(count);
+ operation_(t_ptr.get());
+ // Unset for the case that the operation throws no exceptions, which
+ // would leave the countdown set and break the *next* exception safety
+ // test after this one.
+ UnsetCountdown();
+ return AssertionSuccess();
+ } catch (const exceptions_internal::TestException &e) {
+ if (!contract(t_ptr.get())) {
+ return AssertionFailure() << e.what() << " failed contract check";
+ }
+ }
+ }
+ }
+ }
+
+private:
+ template
+ Contract WrapContract(const ContractFn &contract) {
+ return [contract](T *t_ptr) { return AssertionResult(contract(t_ptr)); };
+ }
+
+ Contract WrapContract(StrongGuaranteeTagType) {
+ return [this](T *t_ptr) { return AssertionResult(*factory_() == *t_ptr); };
+ }
+
+ Factory factory_;
+ Operation operation_;
+ std::vector contracts_;
+};
+
+/*
+ * Builds a tester object that tests if performing a operation on a T follows
+ * exception safety guarantees. Verification is done via contract assertion
+ * callbacks applied to T instances post-throw.
+ *
+ * Template parameters for ExceptionSafetyTestBuilder:
+ *
+ * - Factory: The factory object (passed in via tester.WithFactory(...) or
+ * tester.WithInitialValue(...)) must be invocable with the signature
+ * `std::unique_ptr operator()() const` where T is the type being tested.
+ * It is used for reliably creating identical T instances to test on.
+ *
+ * - Operation: The operation object (passed in via tester.WithOperation(...)
+ * or tester.Test(...)) must be invocable with the signature
+ * `void operator()(T*) const` where T is the type being tested. It is used
+ * for performing steps on a T instance that may throw and that need to be
+ * checked for exception safety. Each call to the operation will receive a
+ * fresh T instance so it's free to modify and destroy the T instances as it
+ * pleases.
+ *
+ * - Contracts...: The contract assertion callback objects (passed in via
+ * tester.WithContracts(...)) must be invocable with the signature
+ * `testing::AssertionResult operator()(T*) const` where T is the type being
+ * tested. Contract assertion callbacks are provided T instances post-throw.
+ * They must return testing::AssertionSuccess when the type contracts of the
+ * provided T instance hold. If the type contracts of the T instance do not
+ * hold, they must return testing::AssertionFailure. Execution order of
+ * Contracts... is unspecified. They will each individually get a fresh T
+ * instance so they are free to modify and destroy the T instances as they
+ * please.
+ */
+template
+class ExceptionSafetyTestBuilder {
+public:
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with an included T factory based
+ * on the provided T instance. The existing factory will not be included in
+ * the newly created tester instance. The created factory returns a new T
+ * instance by copy-constructing the provided const T& t.
+ *
+ * Preconditions for tester.WithInitialValue(const T& t):
+ *
+ * - The const T& t object must be copy-constructible where T is the type
+ * being tested. For non-copy-constructible objects, use the method
+ * tester.WithFactory(...).
+ */
+ template
+ ExceptionSafetyTestBuilder, Operation, Contracts...>
+ WithInitialValue(const T &t) const {
+ return WithFactory(DefaultFactory(t));
+ }
+
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with the provided T factory
+ * included. The existing factory will not be included in the newly-created
+ * tester instance. This method is intended for use with types lacking a copy
+ * constructor. Types that can be copy-constructed should instead use the
+ * method tester.WithInitialValue(...).
+ */
+ template
+ ExceptionSafetyTestBuilder, Operation, Contracts...>
+ WithFactory(const NewFactory &new_factory) const {
+ return {new_factory, operation_, contracts_};
+ }
+
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with the provided testable
+ * operation included. The existing operation will not be included in the
+ * newly created tester.
+ */
+ template
+ ExceptionSafetyTestBuilder, Contracts...>
+ WithOperation(const NewOperation &new_operation) const {
+ return {factory_, new_operation, contracts_};
+ }
+
+ /*
+ * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts...
+ * combined with the Contracts... that were already included in the instance
+ * on which the method was called. Contracts... cannot be removed or replaced
+ * once added to an ExceptionSafetyTestBuilder instance. A fresh object must
+ * be created in order to get an empty Contracts... list.
+ *
+ * In addition to passing in custom contract assertion callbacks, this method
+ * accepts `testing::strong_guarantee` as an argument which checks T instances
+ * post-throw against freshly created T instances via operator== to verify
+ * that any state changes made during the execution of the operation were
+ * properly rolled back.
+ */
+ template
+ ExceptionSafetyTestBuilder