diff --git a/apps/llm/app/voice_chat/index.tsx b/apps/llm/app/voice_chat/index.tsx index 27b4ad35d..230d0b8e9 100644 --- a/apps/llm/app/voice_chat/index.tsx +++ b/apps/llm/app/voice_chat/index.tsx @@ -101,7 +101,7 @@ function VoiceChatScreen() { return !llm.isReady || !speechToText.isReady ? ( ) : ( diff --git a/packages/react-native-executorch/android/libs/classes.jar b/packages/react-native-executorch/android/libs/classes.jar index 6fda54e05..31dd44c2e 100644 Binary files a/packages/react-native-executorch/android/libs/classes.jar and b/packages/react-native-executorch/android/libs/classes.jar differ diff --git a/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp b/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp index ef81fdc19..2111541a6 100644 --- a/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/TokenizerModule.cpp @@ -9,7 +9,7 @@ namespace rnexecutorch { using namespace facebook; -using namespace executorch::extension::constants; +using namespace executorch::extension::llm; TokenizerModule::TokenizerModule( std::string source, std::shared_ptr callInvoker) diff --git a/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.h b/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.h index 4420103e3..c40fa2569 100644 --- a/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.h +++ b/packages/react-native-executorch/common/rnexecutorch/models/BaseModel.h @@ -46,7 +46,7 @@ class BaseModel { // (unnecessary copies instead of working on JS memory). In this case // CallInvoker can be used to get jsi::Runtime, and use it in a safe manner. std::shared_ptr callInvoker; - std::unique_ptr module_; + std::unique_ptr module_; std::size_t memorySizeLowerBound{0}; diff --git a/packages/react-native-executorch/common/rnexecutorch/tests/integration/SpeechToTextTest.cpp b/packages/react-native-executorch/common/rnexecutorch/tests/integration/SpeechToTextTest.cpp index 5b15c0040..29edf4e4e 100644 --- a/packages/react-native-executorch/common/rnexecutorch/tests/integration/SpeechToTextTest.cpp +++ b/packages/react-native-executorch/common/rnexecutorch/tests/integration/SpeechToTextTest.cpp @@ -74,7 +74,7 @@ TEST(S2TTranscribeTests, TranscribeReturnsValidChars) { auto result = model.transcribe(audio, "en", true); ASSERT_EQ(result.language, "en"); EXPECT_GE(result.duration, 20.0f); - ASSERT_EQ(result.task, "transcription"); + ASSERT_EQ(result.task, "transcribe"); ASSERT_FALSE(result.segments.empty()); ASSERT_FALSE(result.text.empty()); for (char c : result.text) { diff --git a/packages/react-native-executorch/common/runner/constants.h b/packages/react-native-executorch/common/runner/constants.h index 93ac6a876..e75466829 100644 --- a/packages/react-native-executorch/common/runner/constants.h +++ b/packages/react-native-executorch/common/runner/constants.h @@ -7,7 +7,7 @@ */ #pragma once // constants for LLM runtime -namespace executorch::extension::constants { +namespace executorch::extension::llm { // Runtime metadata key constants inline constexpr auto kEnableDynamicShape = "enable_dynamic_shape"; @@ -27,4 +27,5 @@ inline constexpr auto kTextModelMethod = "text_decoder"; inline constexpr auto numOfAddedBoSTokens = 0; inline constexpr auto numOfAddedEoSTokens = 0; -} // namespace executorch::extension::constants + +} // namespace executorch::extension::llm diff --git a/packages/react-native-executorch/common/runner/irunner.h b/packages/react-native-executorch/common/runner/irunner.h index ac8115b20..8dedc6687 100644 --- a/packages/react-native-executorch/common/runner/irunner.h +++ b/packages/react-native-executorch/common/runner/irunner.h @@ -65,6 +65,10 @@ struct GenerationConfig { // Use KV_CACHE implementation (if implemented) or not bool enable_kv_cache = true; + + // Number of eos and bos to add to the prompt + int32_t num_bos = 0; + int32_t num_eos = 0; }; // Base interface for LLM runners diff --git a/packages/react-native-executorch/common/runner/kernel_includes.h b/packages/react-native-executorch/common/runner/kernel_includes.h index 9d6029a9a..bafaf80e7 100644 --- a/packages/react-native-executorch/common/runner/kernel_includes.h +++ b/packages/react-native-executorch/common/runner/kernel_includes.h @@ -15,8 +15,8 @@ #pragma once // This list should be very conservative since most kernel .cpp files will -// include these and depend on their transitive deps. Only add a header if 99% -// of kernels would have included it anyway. +// include these and depend on their transitive deps. Only add a header if +// 99% of kernels would have included it anyway. #include // IWYU pragma: export #include // IWYU pragma: export #include // IWYU pragma: export diff --git a/packages/react-native-executorch/common/runner/runner.cpp b/packages/react-native-executorch/common/runner/runner.cpp index 5d0fec78c..4da2ee6ba 100644 --- a/packages/react-native-executorch/common/runner/runner.cpp +++ b/packages/react-native-executorch/common/runner/runner.cpp @@ -19,7 +19,7 @@ namespace example { -using namespace executorch::extension::constants; +using namespace executorch::extension::llm; using ::executorch::extension::Module; using ::executorch::runtime::Error; using ::executorch::runtime::Result; diff --git a/packages/react-native-executorch/common/runner/sampler.h b/packages/react-native-executorch/common/runner/sampler.h index b18ba6585..a46a5ed12 100644 --- a/packages/react-native-executorch/common/runner/sampler.h +++ b/packages/react-native-executorch/common/runner/sampler.h @@ -26,6 +26,8 @@ namespace extension { namespace llm { // A simple llama2 sampler. +inline constexpr auto kTopp = 0.9f; + template struct ProbIndex { T prob; int32_t index; @@ -65,3 +67,9 @@ using ::executorch::extension::llm::ProbIndex; using ::executorch::extension::llm::Sampler; } // namespace executor } // namespace torch + +namespace executorch::llm { +// TODO(T197294990): Remove these deprecated aliases once all users have moved +// to the new `::executorch::extension::llm` namespaces. +using ::executorch::extension::llm::kTopp; +} // namespace executorch::llm diff --git a/packages/react-native-executorch/common/runner/stats.h b/packages/react-native-executorch/common/runner/stats.h index 4c1fae519..b94834475 100644 --- a/packages/react-native-executorch/common/runner/stats.h +++ b/packages/react-native-executorch/common/runner/stats.h @@ -11,6 +11,7 @@ #include "util.h" #include #include +#include #include #include @@ -44,11 +45,19 @@ struct Stats { // inference_end_ms: End of inference/generation. long inference_end_ms; // Keep a running total of the time spent in sampling. - long aggregate_sampling_time_ms; + long aggregate_sampling_time_ms = 0; // Token count from prompt int64_t num_prompt_tokens; // Token count from generated (total - prompt) int64_t num_generated_tokens; + // GPU memory stats (optional; may be zero if not available) + // GPU memory stats (optional). Use sentinel UINT64_MAX / -1.0 to indicate + // "not available". + uint64_t gpu_total_bytes = std::numeric_limits::max(); + uint64_t gpu_free_before_load_bytes = std::numeric_limits::max(); + uint64_t gpu_free_after_load_bytes = std::numeric_limits::max(); + uint64_t gpu_free_after_generate_bytes = std::numeric_limits::max(); + double gpu_peak_usage_mb = -1.0; inline void on_sampling_begin() { aggregate_sampling_timer_start_timestamp = time_in_ms(); } @@ -75,6 +84,11 @@ struct Stats { aggregate_sampling_time_ms = 0; num_prompt_tokens = 0; num_generated_tokens = 0; + gpu_total_bytes = std::numeric_limits::max(); + gpu_free_before_load_bytes = std::numeric_limits::max(); + gpu_free_after_load_bytes = std::numeric_limits::max(); + gpu_free_after_generate_bytes = std::numeric_limits::max(); + gpu_peak_usage_mb = -1.0; aggregate_sampling_timer_start_timestamp = 0; } @@ -93,7 +107,29 @@ inline std::string stats_to_json_string(const Stats &stats) { << "\"prompt_eval_end_ms\":" << stats.prompt_eval_end_ms << "," << "\"first_token_ms\":" << stats.first_token_ms << "," << "\"aggregate_sampling_time_ms\":" << stats.aggregate_sampling_time_ms - << "," << "\"SCALING_FACTOR_UNITS_PER_SECOND\":" + << ","; + // Only include GPU fields in the JSON if gpu_total_bytes is valid (not + // equal to sentinel -1) + if (stats.gpu_total_bytes != static_cast(-1)) { + ss << "\"gpu_total_bytes\":" << stats.gpu_total_bytes; + if (stats.gpu_free_before_load_bytes != static_cast(-1)) { + ss << ",\"gpu_free_before_load_bytes\":" + << stats.gpu_free_before_load_bytes; + } + if (stats.gpu_free_after_load_bytes != static_cast(-1)) { + ss << ",\"gpu_free_after_load_bytes\":" + << stats.gpu_free_after_load_bytes; + } + if (stats.gpu_free_after_generate_bytes != static_cast(-1)) { + ss << ",\"gpu_free_after_generate_bytes\":" + << stats.gpu_free_after_generate_bytes; + } + if (stats.gpu_peak_usage_mb >= 0.0) { + ss << ",\"gpu_peak_usage_mb\":" << stats.gpu_peak_usage_mb; + } + ss << ","; + } + ss << "\"SCALING_FACTOR_UNITS_PER_SECOND\":" << stats.SCALING_FACTOR_UNITS_PER_SECOND << "}"; return ss.str(); } @@ -145,6 +181,27 @@ inline void print_report(const Stats &stats) { stats.num_prompt_tokens + stats.num_generated_tokens, (double)stats.aggregate_sampling_time_ms / stats.SCALING_FACTOR_UNITS_PER_SECOND); + + // GPU memory reporting (only meaningful if GPU fields were populated) + if (stats.gpu_total_bytes != static_cast(-1)) { + ET_LOG(Info, "\tGPU total memory: %.2f MB", + stats.gpu_total_bytes / 1024.0 / 1024.0); + if (stats.gpu_free_before_load_bytes != static_cast(-1)) { + ET_LOG(Info, "\tGPU free before load: %.2f MB", + stats.gpu_free_before_load_bytes / 1024.0 / 1024.0); + } + if (stats.gpu_free_after_load_bytes != static_cast(-1)) { + ET_LOG(Info, "\tGPU free after load: %.2f MB", + stats.gpu_free_after_load_bytes / 1024.0 / 1024.0); + } + if (stats.gpu_free_after_generate_bytes != static_cast(-1)) { + ET_LOG(Info, "\tGPU free after generate: %.2f MB", + stats.gpu_free_after_generate_bytes / 1024.0 / 1024.0); + } + if (stats.gpu_peak_usage_mb >= 0.0) { + ET_LOG(Info, "\tGPU peak usage: %.2f MB", stats.gpu_peak_usage_mb); + } + } } } // namespace llm diff --git a/packages/react-native-executorch/common/runner/text_decoder_runner.cpp b/packages/react-native-executorch/common/runner/text_decoder_runner.cpp index 97eb3dd13..bc5ca069a 100644 --- a/packages/react-native-executorch/common/runner/text_decoder_runner.cpp +++ b/packages/react-native-executorch/common/runner/text_decoder_runner.cpp @@ -32,15 +32,23 @@ TextDecoderRunner::TextDecoderRunner(Module *module, IOManager *io_manager, ::executorch::runtime::Result TextDecoderRunner::step(TensorPtr &tokens, int64_t start_pos) { // ET_LOG(Info, "Input token %" PRIu64, input_token); - auto method_meta = ET_UNWRAP(module_->method_meta("forward")); + auto method_meta_result = module_->method_meta("forward"); + if (!method_meta_result.ok()) { + return method_meta_result.error(); + } + auto method_meta = std::move(*method_meta_result); // If only 1 input, we are not using kv cache bool use_kv_cache = method_meta.num_inputs() > 1; std::vector cache_positions; if (use_kv_cache) { - auto start_pos_tensor = ET_UNWRAP(populate_start_pos_or_cache_position( - module_, start_pos, cache_positions, tokens->numel(), "forward")); + auto start_pos_tensor_result = populate_start_pos_or_cache_position( + module_, start_pos, cache_positions, tokens->numel(), "forward"); + if (!start_pos_tensor_result.ok()) { + return start_pos_tensor_result.error(); + } + auto start_pos_tensor = std::move(*start_pos_tensor_result); std::vector inputs; auto inputs_res = io_manager_->prepare_decode(tokens, start_pos_tensor); diff --git a/packages/react-native-executorch/common/runner/text_prefiller.cpp b/packages/react-native-executorch/common/runner/text_prefiller.cpp index c7e4296eb..dc961158b 100644 --- a/packages/react-native-executorch/common/runner/text_prefiller.cpp +++ b/packages/react-native-executorch/common/runner/text_prefiller.cpp @@ -98,8 +98,11 @@ TextPrefiller::prefill_chunk(std::vector &prompt_tokens, // run the first token and get back logits tensor. Assuming the first token // is bos so don't callback. - auto logits_tensor = - ET_UNWRAP(text_decoder_runner_->step(tokens, start_pos)); + auto logits_result = text_decoder_runner_->step(tokens, start_pos); + if (!logits_result.ok()) { + return logits_result.error(); + } + auto logits_tensor = std::move(*logits_result); pos += 1; // start the loop from index 1 start_pos += 1; diff --git a/packages/react-native-executorch/common/runner/text_token_generator.h b/packages/react-native-executorch/common/runner/text_token_generator.h index 7b0dd3042..024cce456 100644 --- a/packages/react-native-executorch/common/runner/text_token_generator.h +++ b/packages/react-native-executorch/common/runner/text_token_generator.h @@ -39,6 +39,10 @@ class TextTokenGenerator { * @param start_pos The start position of the new tokens, based on how many * prompt tokens is prefilled. * @param max_new_tokens Maximum number of new tokens to generate. + * @param temperature controls the randomness of predictions by scaling the + * logits before applying softmax. A higher temperature results in more + * random predictions, while a lower temperature results in more deterministic + * predictions. * @param token_callback what to do after a token is generated. * @return how many tokens are generated. */ @@ -113,7 +117,8 @@ class TextTokenGenerator { // We pass false, as we want don't want to skip special tokens e.g. // - auto decodeResult = tokenizer_->decode(token_cache, false); + auto decodeResult = + tokenizer_->decode(token_cache, false); // NOTE: difference if (!decodeResult.ok()) { throw rnexecutorch::RnExecutorchError( rnexecutorch::RnExecutorchErrorCode::TokenizerError, diff --git a/packages/react-native-executorch/common/runner/util.h b/packages/react-native-executorch/common/runner/util.h index 03ad0c7b3..640b96319 100644 --- a/packages/react-native-executorch/common/runner/util.h +++ b/packages/react-native-executorch/common/runner/util.h @@ -10,10 +10,12 @@ #include "constants.h" #include "text_prefiller.h" #include +#include #include #include #include #include +#include #if defined(__linux__) || defined(__ANDROID__) || defined(__unix__) #include #endif @@ -112,8 +114,16 @@ populate_start_pos_or_cache_position(Module *module, int64_t &start_pos, const char *method_name = "forward") { // Get expected shape of cache position tensor, which should be the second // argument - auto method_meta = ET_UNWRAP(module->method_meta(method_name)); - auto second_input_info = ET_UNWRAP(method_meta.input_tensor_meta(1)); + auto method_meta_result = module->method_meta(method_name); + if (!method_meta_result.ok()) { + return method_meta_result.error(); + } + auto method_meta = std::move(*method_meta_result); + auto second_input_info_result = method_meta.input_tensor_meta(1); + if (!second_input_info_result.ok()) { + return second_input_info_result.error(); + } + auto second_input_info = std::move(*second_input_info_result); auto second_input_sizes = second_input_info.sizes(); auto numel = second_input_sizes[0]; @@ -136,6 +146,31 @@ populate_start_pos_or_cache_position(Module *module, int64_t &start_pos, } } +/** + * Helper function to convert a float tensor to bfloat16. + * Creates a new tensor with bfloat16 dtype and copies/converts the data. + */ +inline ::executorch::runtime::Result<::executorch::extension::TensorPtr> +convert_to_bfloat16(const ::executorch::extension::TensorPtr &src_tensor) { + ET_CHECK_OR_RETURN_ERROR( + src_tensor->scalar_type() == ::executorch::aten::ScalarType::Float, + InvalidArgument, + "BFloat16 conversion only supported from Float source data"); + + const auto num_elements = static_cast(src_tensor->numel()); + const float *float_data = src_tensor->const_data_ptr(); + + auto bf16_tensor = ::executorch::extension::empty_like( + src_tensor, ::executorch::aten::ScalarType::BFloat16); + auto *bf16_data = + bf16_tensor->mutable_data_ptr<::executorch::aten::BFloat16>(); + for (size_t i = 0; i < num_elements; ++i) { + bf16_data[i] = ::executorch::aten::BFloat16(float_data[i]); + } + + return bf16_tensor; +} + } // namespace llm } // namespace extension } // namespace executorch diff --git a/packages/react-native-executorch/third-party/android/libs/executorch/arm64-v8a/libexecutorch.so b/packages/react-native-executorch/third-party/android/libs/executorch/arm64-v8a/libexecutorch.so old mode 100644 new mode 100755 index 846897531..8b29d1cb0 Binary files a/packages/react-native-executorch/third-party/android/libs/executorch/arm64-v8a/libexecutorch.so and b/packages/react-native-executorch/third-party/android/libs/executorch/arm64-v8a/libexecutorch.so differ diff --git a/packages/react-native-executorch/third-party/android/libs/executorch/x86_64/libexecutorch.so b/packages/react-native-executorch/third-party/android/libs/executorch/x86_64/libexecutorch.so old mode 100644 new mode 100755 index 5e4064515..2836d92bd Binary files a/packages/react-native-executorch/third-party/android/libs/executorch/x86_64/libexecutorch.so and b/packages/react-native-executorch/third-party/android/libs/executorch/x86_64/libexecutorch.so differ diff --git a/packages/react-native-executorch/third-party/include/absl/base/casts.h b/packages/react-native-executorch/third-party/include/absl/base/casts.h new file mode 100644 index 000000000..461cb8d52 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/casts.h @@ -0,0 +1,180 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: casts.h +// ----------------------------------------------------------------------------- +// +// This header file defines casting templates to fit use cases not covered by +// the standard casts provided in the C++ standard. As with all cast operations, +// use these with caution and only if alternatives do not exist. + +#ifndef ABSL_BASE_CASTS_H_ +#define ABSL_BASE_CASTS_H_ + +#include +#include +#include +#include + +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L +#include // For std::bit_cast. +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +#include "absl/base/internal/identity.h" +#include "absl/base/macros.h" +#include "absl/meta/type_traits.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// implicit_cast() +// +// Performs an implicit conversion between types following the language +// rules for implicit conversion; if an implicit conversion is otherwise +// allowed by the language in the given context, this function performs such an +// implicit conversion. +// +// Example: +// +// // If the context allows implicit conversion: +// From from; +// To to = from; +// +// // Such code can be replaced by: +// implicit_cast(from); +// +// An `implicit_cast()` may also be used to annotate numeric type conversions +// that, although safe, may produce compiler warnings (such as `long` to `int`). +// Additionally, an `implicit_cast()` is also useful within return statements to +// indicate a specific implicit conversion is being undertaken. +// +// Example: +// +// return implicit_cast(size_in_bytes) / capacity_; +// +// Annotating code with `implicit_cast()` allows you to explicitly select +// particular overloads and template instantiations, while providing a safer +// cast than `reinterpret_cast()` or `static_cast()`. +// +// Additionally, an `implicit_cast()` can be used to allow upcasting within a +// type hierarchy where incorrect use of `static_cast()` could accidentally +// allow downcasting. +// +// Finally, an `implicit_cast()` can be used to perform implicit conversions +// from unrelated types that otherwise couldn't be implicitly cast directly; +// C++ will normally only implicitly cast "one step" in such conversions. +// +// That is, if C is a type which can be implicitly converted to B, with B being +// a type that can be implicitly converted to A, an `implicit_cast()` can be +// used to convert C to B (which the compiler can then implicitly convert to A +// using language rules). +// +// Example: +// +// // Assume an object C is convertible to B, which is implicitly convertible +// // to A +// A a = implicit_cast(C); +// +// Such implicit cast chaining may be useful within template logic. +template +constexpr To implicit_cast(typename absl::internal::type_identity_t to) { + return to; +} + +// bit_cast() +// +// Creates a value of the new type `Dest` whose representation is the same as +// that of the argument, which is of (deduced) type `Source` (a "bitwise cast"; +// every bit in the value representation of the result is equal to the +// corresponding bit in the object representation of the source). Source and +// destination types must be of the same size, and both types must be trivially +// copyable. +// +// As with most casts, use with caution. A `bit_cast()` might be needed when you +// need to treat a value as the value of some other type, for example, to access +// the individual bits of an object which are not normally accessible through +// the object's type, such as for working with the binary representation of a +// floating point value: +// +// float f = 3.14159265358979; +// int i = bit_cast(f); +// // i = 0x40490fdb +// +// Reinterpreting and accessing a value directly as a different type (as shown +// below) usually results in undefined behavior. +// +// Example: +// +// // WRONG +// float f = 3.14159265358979; +// int i = reinterpret_cast(f); // Wrong +// int j = *reinterpret_cast(&f); // Equally wrong +// int k = *bit_cast(&f); // Equally wrong +// +// Reinterpret-casting results in undefined behavior according to the ISO C++ +// specification, section [basic.lval]. Roughly, this section says: if an object +// in memory has one type, and a program accesses it with a different type, the +// result is undefined behavior for most "different type". +// +// Using bit_cast on a pointer and then dereferencing it is no better than using +// reinterpret_cast. You should only use bit_cast on the value itself. +// +// Such casting results in type punning: holding an object in memory of one type +// and reading its bits back using a different type. A `bit_cast()` avoids this +// issue by copying the object representation to a new value, which avoids +// introducing this undefined behavior (since the original value is never +// accessed in the wrong way). +// +// The requirements of `absl::bit_cast` are more strict than that of +// `std::bit_cast` unless compiler support is available. Specifically, without +// compiler support, this implementation also requires `Dest` to be +// default-constructible. In C++20, `absl::bit_cast` is replaced by +// `std::bit_cast`. +#if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +using std::bit_cast; + +#else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +template < + typename Dest, typename Source, + typename std::enable_if::value && + std::is_trivially_copyable::value +#if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + && std::is_default_constructible::value +#endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) + , + int>::type = 0> +#if ABSL_HAVE_BUILTIN(__builtin_bit_cast) +inline constexpr Dest bit_cast(const Source &source) { + return __builtin_bit_cast(Dest, source); +} +#else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) +inline Dest bit_cast(const Source &source) { + Dest dest; + memcpy(static_cast(std::addressof(dest)), + static_cast(std::addressof(source)), sizeof(dest)); + return dest; +} +#endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) + +#endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CASTS_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/const_init.h b/packages/react-native-executorch/third-party/include/absl/base/const_init.h new file mode 100644 index 000000000..96bef4dd3 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/const_init.h @@ -0,0 +1,76 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// kConstInit +// ----------------------------------------------------------------------------- +// +// A constructor tag used to mark an object as safe for use as a global +// variable, avoiding the usual lifetime issues that can affect globals. + +#ifndef ABSL_BASE_CONST_INIT_H_ +#define ABSL_BASE_CONST_INIT_H_ + +#include "absl/base/config.h" + +// In general, objects with static storage duration (such as global variables) +// can trigger tricky object lifetime situations. Attempting to access them +// from the constructors or destructors of other global objects can result in +// undefined behavior, unless their constructors and destructors are designed +// with this issue in mind. +// +// The normal way to deal with this issue in C++11 is to use constant +// initialization and trivial destructors. +// +// Constant initialization is guaranteed to occur before any other code +// executes. Constructors that are declared 'constexpr' are eligible for +// constant initialization. You can annotate a variable declaration with the +// ABSL_CONST_INIT macro to express this intent. For compilers that support +// it, this annotation will cause a compilation error for declarations that +// aren't subject to constant initialization (perhaps because a runtime value +// was passed as a constructor argument). +// +// On program shutdown, lifetime issues can be avoided on global objects by +// ensuring that they contain trivial destructors. A class has a trivial +// destructor unless it has a user-defined destructor, a virtual method or base +// class, or a data member or base class with a non-trivial destructor of its +// own. Objects with static storage duration and a trivial destructor are not +// cleaned up on program shutdown, and are thus safe to access from other code +// running during shutdown. +// +// For a few core Abseil classes, we make a best effort to allow for safe global +// instances, even though these classes have non-trivial destructors. These +// objects can be created with the absl::kConstInit tag. For example: +// ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit); +// +// The line above declares a global variable of type absl::Mutex which can be +// accessed at any point during startup or shutdown. global_mutex's destructor +// will still run, but will not invalidate the object. Note that C++ specifies +// that accessing an object after its destructor has run results in undefined +// behavior, but this pattern works on the toolchains we support. +// +// The absl::kConstInit tag should only be used to define objects with static +// or thread_local storage duration. + +namespace absl { +ABSL_NAMESPACE_BEGIN + +enum ConstInitType { + kConstInit, +}; + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_CONST_INIT_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/dynamic_annotations.h b/packages/react-native-executorch/third-party/include/absl/base/dynamic_annotations.h new file mode 100644 index 000000000..5deb2f2ce --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/dynamic_annotations.h @@ -0,0 +1,476 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ABSL_ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. + +#ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#ifdef __cplusplus +#include "absl/base/macros.h" +#endif + +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +#include +#endif + +// TODO(rogeeff): Remove after the backward compatibility period. +#include "absl/base/internal/dynamic_annotations.h" // IWYU pragma: export + +// ------------------------------------------------------------------------- +// Decide which features are enabled. + +#ifdef ABSL_HAVE_THREAD_SANITIZER + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 + +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +#if defined(__clang__) +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1 +#if !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif +#else +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#endif + +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED + +#endif // ABSL_HAVE_THREAD_SANITIZER + +#ifdef __cplusplus +#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline +#endif + +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are +// defined by the compiler-based sanitizer implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC. +#define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ABSL_ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var##_annotator { \ + public: \ + static_var##_annotator() { \ + ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ + #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateRWLockCreate(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockCreateStatic(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockDestroy(const char *file, int line, + const volatile void *lock); +void AnnotateRWLockAcquired(const char *file, int line, + const volatile void *lock, long is_w); // NOLINT +void AnnotateRWLockReleased(const char *file, int line, + const volatile void *lock, long is_w); // NOLINT +void AnnotateBenignRace(const char *file, int line, + const volatile void *address, const char *description); +void AnnotateBenignRaceSized(const char *file, int line, + const volatile void *address, size_t size, + const char *description); +void AnnotateThreadName(const char *file, int line, const char *name); +void AnnotateEnableRaceDetection(const char *file, int line, int enable); +ABSL_INTERNAL_END_EXTERN_C + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ABSL_ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ABSL_ANNOTATE_BENIGN_RACE(address, description) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ABSL_ANNOTATE_THREAD_NAME(name) // empty +#define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#ifdef ABSL_HAVE_MEMORY_SANITIZER + +#include + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // !defined(ABSL_HAVE_MEMORY_SANITIZER) + +#define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty + +#endif // ABSL_HAVE_MEMORY_SANITIZER + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 +// Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are +// defined by the compiler-based implementation, not by the Abseil +// library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. + +// Request the analysis tool to ignore all reads in the current thread until +// ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ABSL_ANNOTATE_UNPROTECTED_READ. +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ + (__FILE__, __LINE__) + +// Stop ignoring reads. +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ + (__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreReadsBegin(const char *file, int line) + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE; +void AnnotateIgnoreReadsEnd(const char *file, + int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE; +ABSL_INTERNAL_END_EXTERN_C + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \ + () + +#define ABSL_ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED( \ + ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \ + () + +ABSL_INTERNAL_STATIC_INLINE void +ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)() + ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {} + +ABSL_INTERNAL_STATIC_INLINE void +ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)() + ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {} + +#else + +#define ABSL_ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) + +// Stop ignoring writes. +#define ABSL_ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) + +// Function prototypes of annotations provided by the compiler-based sanitizer +// implementation. +ABSL_INTERNAL_BEGIN_EXTERN_C +void AnnotateIgnoreWritesBegin(const char *file, int line); +void AnnotateIgnoreWritesEnd(const char *file, int line); +ABSL_INTERNAL_END_EXTERN_C + +#else + +#define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ABSL_ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ABSL_ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ABSL_ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \ + ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ABSL_ANNOTATE_IGNORE_WRITES_END(); \ + ABSL_ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template +inline T AnnotateUnprotectedRead(const volatile T &x) { // NOLINT + ABSL_ANNOTATE_IGNORE_READS_BEGIN(); + T res = x; + ABSL_ANNOTATE_IGNORE_READS_END(); + return res; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl +#endif + +#else + +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. +#include + +#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ + struct { \ + alignas(8) char x[8]; \ + } name + +#else + +#define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) // empty +#define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") + +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +// ------------------------------------------------------------------------- +// HWAddress sanitizer annotations + +#ifdef __cplusplus +namespace absl { +#ifdef ABSL_HAVE_HWADDRESS_SANITIZER +// Under HWASAN changes the tag of the pointer. +template T *HwasanTagPointer(T *ptr, uintptr_t tag) { + return reinterpret_cast(__hwasan_tag_pointer(ptr, tag)); +} +#else +template T *HwasanTagPointer(T *ptr, uintptr_t) { return ptr; } +#endif +} // namespace absl +#endif + +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/atomic_hook_test_helper.h b/packages/react-native-executorch/third-party/include/absl/base/internal/atomic_hook_test_helper.h new file mode 100644 index 000000000..f5ff0fea3 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/atomic_hook_test_helper.h @@ -0,0 +1,34 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ +#define ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ + +#include "absl/base/internal/atomic_hook.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace atomic_hook_internal { + +using VoidF = void (*)(); +extern absl::base_internal::AtomicHook func; +extern int default_func_calls; +void DefaultFunc(); +void RegisterFunc(VoidF func); + +} // namespace atomic_hook_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_TEST_HELPER_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock.h b/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock.h new file mode 100644 index 000000000..a6ca008c4 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock.h @@ -0,0 +1,144 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// ----------------------------------------------------------------------------- +// File: cycleclock.h +// ----------------------------------------------------------------------------- +// +// This header file defines a `CycleClock`, which yields the value and frequency +// of a cycle counter that increments at a rate that is approximately constant. +// +// NOTE: +// +// The cycle counter frequency is not necessarily related to the core clock +// frequency and should not be treated as such. That is, `CycleClock` cycles are +// not necessarily "CPU cycles" and code should not rely on that behavior, even +// if experimentally observed. +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/internal/cycleclock_config.h" +#include "absl/base/internal/unscaledcycleclock.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +using CycleClockSourceFunc = int64_t (*)(); + +// ----------------------------------------------------------------------------- +// CycleClock +// ----------------------------------------------------------------------------- +class CycleClock { +public: + // CycleClock::Now() + // + // Returns the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // CycleClock::Frequency() + // + // Returns the amount by which `CycleClock::Now()` increases per second. Note + // that this value may not necessarily match the core CPU clock frequency. + static double Frequency(); + +private: +#if ABSL_USE_UNSCALED_CYCLECLOCK + static CycleClockSourceFunc LoadCycleClockSource(); + + static constexpr int32_t kShift = kCycleClockShift; + static constexpr double kFrequencyScale = kCycleClockFrequencyScale; + + ABSL_CONST_INIT static std::atomic cycle_clock_source_; +#endif // ABSL_USE_UNSCALED_CYCLECLOC + + CycleClock() = delete; // no instances + CycleClock(const CycleClock &) = delete; + CycleClock &operator=(const CycleClock &) = delete; + + friend class CycleClockSource; +}; + +class CycleClockSource { +private: + // CycleClockSource::Register() + // + // Register a function that provides an alternate source for the unscaled CPU + // cycle count value. The source function must be async signal safe, must not + // call CycleClock::Now(), and must have a frequency that matches that of the + // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use + // the default source. + static void Register(CycleClockSourceFunc source); +}; + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() { +#if !defined(__x86_64__) + // Optimize for the common case (no callback) by first doing a relaxed load; + // this is significantly faster on non-x86 platforms. + if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) { + return nullptr; + } +#endif // !defined(__x86_64__) + + // This corresponds to the store(std::memory_order_release) in + // CycleClockSource::Register, and makes sure that any updates made prior to + // registering the callback are visible to this thread before the callback + // is invoked. + return cycle_clock_source_.load(std::memory_order_acquire); +} + +// Accessing globals in inlined code in Window DLLs is problematic. +#ifndef _WIN32 +inline int64_t CycleClock::Now() { + auto fn = LoadCycleClockSource(); + if (fn == nullptr) { + return base_internal::UnscaledCycleClock::Now() >> kShift; + } + return fn() >> kShift; +} +#endif + +inline double CycleClock::Frequency() { + return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); +} + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock_config.h b/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock_config.h new file mode 100644 index 000000000..3e15b9712 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/cycleclock_config.h @@ -0,0 +1,55 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ +#define ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ + +#include + +#include "absl/base/config.h" +#include "absl/base/internal/inline_variable.h" +#include "absl/base/internal/unscaledcycleclock_config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +#if ABSL_USE_UNSCALED_CYCLECLOCK +#ifdef NDEBUG +#ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +// Not debug mode and the UnscaledCycleClock frequency is the CPU +// frequency. Scale the CycleClock to prevent overflow if someone +// tries to represent the time as cycles since the Unix epoch. +ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 1); +#else +// Not debug mode and the UnscaledCycleClock isn't operating at the +// raw CPU frequency. There is no need to do any scaling, so don't +// needlessly sacrifice precision. +ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 0); +#endif +#else // NDEBUG +// In debug mode use a different shift to discourage depending on a +// particular shift value. +ABSL_INTERNAL_INLINE_CONSTEXPR(int32_t, kCycleClockShift, 2); +#endif // NDEBUG + +ABSL_INTERNAL_INLINE_CONSTEXPR(double, kCycleClockFrequencyScale, + 1.0 / (1 << kCycleClockShift)); +#endif // ABSL_USE_UNSCALED_CYCLECLOC + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_CYCLECLOCK_CONFIG_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/direct_mmap.h b/packages/react-native-executorch/third-party/include/absl/base/internal/direct_mmap.h new file mode 100644 index 000000000..d657f11b5 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/direct_mmap.h @@ -0,0 +1,170 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Functions for directly invoking mmap() via syscall, avoiding the case where +// mmap() has been locally overridden. + +#ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ +#define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_MMAP + +#include + +#ifdef __linux__ + +#include +#ifdef __BIONIC__ +#include +#else +#include +#endif + +#include +#include +#include +#include +#include + +#ifdef __mips__ +// Include definitions of the ABI currently in use. +#if defined(__BIONIC__) || !defined(__GLIBC__) +// Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the +// definitions we need. +#include +#else +#include +#endif // __BIONIC__ || !__GLIBC__ +#endif // __mips__ + +// SYS_mmap and SYS_munmap are not defined in Android. +#ifdef __BIONIC__ +extern "C" void *__mmap2(void *, size_t, int, int, int, size_t); +#if defined(__NR_mmap) && !defined(SYS_mmap) +#define SYS_mmap __NR_mmap +#endif +#ifndef SYS_munmap +#define SYS_munmap __NR_munmap +#endif +#endif // __BIONIC__ + +#if defined(__NR_mmap2) && !defined(SYS_mmap2) +#define SYS_mmap2 __NR_mmap2 +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Platform specific logic extracted from +// https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h +inline void *DirectMmap(void *start, size_t length, int prot, int flags, int fd, + off_t offset) noexcept { +#if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ + defined(__m68k__) || defined(__sh__) || \ + (defined(__hppa__) && !defined(__LP64__)) || \ + (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ + (defined(__PPC__) && !defined(__PPC64__)) || \ + (defined(__riscv) && __riscv_xlen == 32) || \ + (defined(__s390__) && !defined(__s390x__)) || \ + (defined(__sparc__) && !defined(__arch64__)) + // On these architectures, implement mmap with mmap2. + static int pagesize = 0; + if (pagesize == 0) { +#if defined(__wasm__) || defined(__asmjs__) + pagesize = getpagesize(); +#else + pagesize = sysconf(_SC_PAGESIZE); +#endif + } + if (offset < 0 || offset % pagesize != 0) { + errno = EINVAL; + return MAP_FAILED; + } +#ifdef __BIONIC__ + // SYS_mmap2 has problems on Android API level <= 16. + // Workaround by invoking __mmap2() instead. + return __mmap2(start, length, prot, flags, fd, + static_cast(offset / pagesize)); +#else + return reinterpret_cast( + syscall(SYS_mmap2, start, length, prot, flags, fd, + static_cast(offset / pagesize))); // NOLINT +#endif +#elif defined(__s390x__) + // On s390x, mmap() arguments are passed in memory. + unsigned long buf[6] = {reinterpret_cast(start), // NOLINT + static_cast(length), // NOLINT + static_cast(prot), // NOLINT + static_cast(flags), // NOLINT + static_cast(fd), // NOLINT + static_cast(offset)}; // NOLINT + return reinterpret_cast(syscall(SYS_mmap, buf)); +#elif defined(__x86_64__) +// The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. +// We need to explicitly cast to an unsigned 64 bit type to avoid implicit +// sign extension. We can't cast pointers directly because those are +// 32 bits, and gcc will dump ugly warnings about casting from a pointer +// to an integer of a different size. We also need to make sure __off64_t +// isn't truncated to 32-bits under x32. +#define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) + return reinterpret_cast( + syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), + MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), + MMAP_SYSCALL_ARG(fd), static_cast(offset))); +#undef MMAP_SYSCALL_ARG +#else // Remaining 64-bit aritectures. + static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); + return reinterpret_cast( + syscall(SYS_mmap, start, length, prot, flags, fd, offset)); +#endif +} + +inline int DirectMunmap(void *start, size_t length) { + return static_cast(syscall(SYS_munmap, start, length)); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#else // !__linux__ + +// For non-linux platforms where we have mmap, just dispatch directly to the +// actual mmap()/munmap() methods. + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline void *DirectMmap(void *start, size_t length, int prot, int flags, int fd, + off_t offset) { + return mmap(start, length, prot, flags, fd, offset); +} + +inline int DirectMunmap(void *start, size_t length) { + return munmap(start, length); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // __linux__ + +#endif // ABSL_HAVE_MMAP + +#endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/dynamic_annotations.h b/packages/react-native-executorch/third-party/include/absl/base/internal/dynamic_annotations.h new file mode 100644 index 000000000..4366fc7ef --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/dynamic_annotations.h @@ -0,0 +1,398 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file defines dynamic annotations for use with dynamic analysis tool +// such as valgrind, PIN, etc. +// +// Dynamic annotation is a source code annotation that affects the generated +// code (that is, the annotation is not a comment). Each such annotation is +// attached to a particular instruction and/or to a particular object (address) +// in the program. +// +// The annotations that should be used by users are macros in all upper-case +// (e.g., ANNOTATE_THREAD_NAME). +// +// Actual implementation of these macros may differ depending on the dynamic +// analysis tool being used. +// +// This file supports the following configurations: +// - Dynamic Annotations enabled (with static thread-safety warnings disabled). +// In this case, macros expand to functions implemented by Thread Sanitizer, +// when building with TSan. When not provided an external implementation, +// dynamic_annotations.cc provides no-op implementations. +// +// - Static Clang thread-safety warnings enabled. +// When building with a Clang compiler that supports thread-safety warnings, +// a subset of annotations can be statically-checked at compile-time. We +// expand these macros to static-inline functions that can be analyzed for +// thread-safety, but afterwards elided when building the final binary. +// +// - All annotations are disabled. +// If neither Dynamic Annotations nor Clang thread-safety warnings are +// enabled, then all annotation-macros expand to empty. + +#ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ +#define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ + +#include + +#include "absl/base/config.h" + +// ------------------------------------------------------------------------- +// Decide which features are enabled + +#ifndef DYNAMIC_ANNOTATIONS_ENABLED +#define DYNAMIC_ANNOTATIONS_ENABLED 0 +#endif + +#if defined(__clang__) && !defined(SWIG) +#define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 +#endif + +#if DYNAMIC_ANNOTATIONS_ENABLED != 0 + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 + +#else + +#define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 +#define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 + +// Clang provides limited support for static thread-safety analysis through a +// feature called Annotalysis. We configure macro-definitions according to +// whether Annotalysis support is available. When running in opt-mode, GCC +// will issue a warning, if these attributes are compiled. Only include them +// when compiling using Clang. + +// ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 +#define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ + defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) +// Read/write annotations are enabled in Annotalysis mode; disabled otherwise. +#define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ + ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#endif + +// Memory annotations are also made available to LLVM's Memory Sanitizer +#if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__) +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1 +#endif + +#ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0 +#endif + +#ifdef __cplusplus +#define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { +#define ABSL_INTERNAL_END_EXTERN_C } // extern "C" +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F +#define ABSL_INTERNAL_STATIC_INLINE inline +#else +#define ABSL_INTERNAL_BEGIN_EXTERN_C // empty +#define ABSL_INTERNAL_END_EXTERN_C // empty +#define ABSL_INTERNAL_GLOBAL_SCOPED(F) F +#define ABSL_INTERNAL_STATIC_INLINE static inline +#endif + +// ------------------------------------------------------------------------- +// Define race annotations. + +#if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 + +// ------------------------------------------------------------- +// Annotations that suppress errors. It is usually better to express the +// program's synchronization using the other annotations, but these can be used +// when all else fails. + +// Report that we may have a benign race at `pointer`, with size +// "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the +// point where `pointer` has been allocated, preferably close to the point +// where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. +#define ANNOTATE_BENIGN_RACE(pointer, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) + +// Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to +// the memory range [`address`, `address`+`size`). +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ + (__FILE__, __LINE__, address, size, description) + +// Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. +// This annotation could be useful if you want to skip expensive race analysis +// during some period of program execution, e.g. during initialization. +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ + (__FILE__, __LINE__, enable) + +// ------------------------------------------------------------- +// Annotations useful for debugging. + +// Report the current thread `name` to a race detector. +#define ANNOTATE_THREAD_NAME(name) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) + +// ------------------------------------------------------------- +// Annotations useful when implementing locks. They are not normally needed by +// modules that merely use locks. The `lock` argument is a pointer to the lock +// object. + +// Report that a lock has been created at address `lock`. +#define ANNOTATE_RWLOCK_CREATE(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) + +// Report that a linker initialized lock has been created at address `lock`. +#ifdef ABSL_HAVE_THREAD_SANITIZER +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ + (__FILE__, __LINE__, lock) +#else +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) +#endif + +// Report that the lock at address `lock` is about to be destroyed. +#define ANNOTATE_RWLOCK_DESTROY(lock) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) + +// Report that the lock at address `lock` has been acquired. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ + (__FILE__, __LINE__, lock, is_w) + +// Report that the lock at address `lock` is about to be released. +// `is_w`=1 for writer lock, `is_w`=0 for reader lock. +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ + (__FILE__, __LINE__, lock, is_w) + +// Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ + namespace { \ + class static_var##_annotator { \ + public: \ + static_var##_annotator() { \ + ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ + #static_var ": " description); \ + } \ + }; \ + static static_var##_annotator the##static_var##_annotator; \ + } // namespace + +#else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 + +#define ANNOTATE_RWLOCK_CREATE(lock) // empty +#define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty +#define ANNOTATE_RWLOCK_DESTROY(lock) // empty +#define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty +#define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty +#define ANNOTATE_BENIGN_RACE(address, description) // empty +#define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty +#define ANNOTATE_THREAD_NAME(name) // empty +#define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty +#define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty + +#endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define memory annotations. + +#if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1 + +#include + +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + __msan_unpoison(address, size) + +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + __msan_allocated_memory(address, size) + +#else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0 + +#if DYNAMIC_ANNOTATIONS_ENABLED == 1 +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ + do { \ + (void)(address); \ + (void)(size); \ + } while (0) +#else +#define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty +#define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty +#endif + +#endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END attributes. + +#if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ + __attribute((exclusive_lock_function("*"))) +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ + __attribute((unlock_function("*"))) + +#else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +#define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty +#define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty + +#endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) + +// ------------------------------------------------------------------------- +// Define IGNORE_READS_BEGIN/_END annotations. + +#if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 + +// Request the analysis tool to ignore all reads in the current thread until +// ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey +// reads, while still checking other reads and all writes. +// See also ANNOTATE_UNPROTECTED_READ. +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) + +// Stop ignoring reads. +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) + +#elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) + +// When Annotalysis is enabled without Dynamic Annotations, the use of +// static-inline functions allows the annotations to be read at compile-time, +// while still letting the compiler elide the functions from the final build. +// +// TODO(delesley) -- The exclusive lock here ignores writes as well, but +// allows IGNORE_READS_AND_WRITES to work properly. + +#define ANNOTATE_IGNORE_READS_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)() + +#define ANNOTATE_IGNORE_READS_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)() + +#else + +#define ANNOTATE_IGNORE_READS_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define IGNORE_WRITES_BEGIN/_END annotations. + +#if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 + +// Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. +#define ANNOTATE_IGNORE_WRITES_BEGIN() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) + +// Stop ignoring writes. +#define ANNOTATE_IGNORE_WRITES_END() \ + ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) + +#else + +#define ANNOTATE_IGNORE_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_WRITES_END() // empty + +#endif + +// ------------------------------------------------------------------------- +// Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more +// primitive annotations defined above. +// +// Instead of doing +// ANNOTATE_IGNORE_READS_BEGIN(); +// ... = x; +// ANNOTATE_IGNORE_READS_END(); +// one can use +// ... = ANNOTATE_UNPROTECTED_READ(x); + +#if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) + +// Start ignoring all memory accesses (both reads and writes). +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ + do { \ + ANNOTATE_IGNORE_READS_BEGIN(); \ + ANNOTATE_IGNORE_WRITES_BEGIN(); \ + } while (0) + +// Stop ignoring both reads and writes. +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ + do { \ + ANNOTATE_IGNORE_WRITES_END(); \ + ANNOTATE_IGNORE_READS_END(); \ + } while (0) + +#ifdef __cplusplus +// ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. +#define ANNOTATE_UNPROTECTED_READ(x) \ + absl::base_internal::AnnotateUnprotectedRead(x) + +#endif + +#else + +#define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty +#define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty +#define ANNOTATE_UNPROTECTED_READ(x) (x) + +#endif + +// ------------------------------------------------------------------------- +// Address sanitizer annotations + +#ifdef ABSL_HAVE_ADDRESS_SANITIZER +// Describe the current state of a contiguous container such as e.g. +// std::vector or std::string. For more details see +// sanitizer/common_interface_defs.h, which is provided by the compiler. +#include + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ + __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) \ + struct { \ + char x[8] __attribute__((aligned(8))); \ + } name + +#else + +#define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) +#define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") + +#endif // ABSL_HAVE_ADDRESS_SANITIZER + +// ------------------------------------------------------------------------- +// Undefine the macros intended only for this file. + +#undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED +#undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED +#undef ABSL_INTERNAL_BEGIN_EXTERN_C +#undef ABSL_INTERNAL_END_EXTERN_C +#undef ABSL_INTERNAL_STATIC_INLINE + +#endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/endian.h b/packages/react-native-executorch/third-party/include/absl/base/internal/endian.h new file mode 100644 index 000000000..4b630e76e --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/endian.h @@ -0,0 +1,282 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ +#define ABSL_BASE_INTERNAL_ENDIAN_H_ + +#include +#include + +#include "absl/base/casts.h" +#include "absl/base/config.h" +#include "absl/base/internal/unaligned_access.h" +#include "absl/base/nullability.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +inline uint64_t gbswap_64(uint64_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) + return __builtin_bswap64(host_int); +#elif defined(_MSC_VER) + return _byteswap_uint64(host_int); +#else + return (((host_int & uint64_t{0xFF}) << 56) | + ((host_int & uint64_t{0xFF00}) << 40) | + ((host_int & uint64_t{0xFF0000}) << 24) | + ((host_int & uint64_t{0xFF000000}) << 8) | + ((host_int & uint64_t{0xFF00000000}) >> 8) | + ((host_int & uint64_t{0xFF0000000000}) >> 24) | + ((host_int & uint64_t{0xFF000000000000}) >> 40) | + ((host_int & uint64_t{0xFF00000000000000}) >> 56)); +#endif +} + +inline uint32_t gbswap_32(uint32_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) + return __builtin_bswap32(host_int); +#elif defined(_MSC_VER) + return _byteswap_ulong(host_int); +#else + return (((host_int & uint32_t{0xFF}) << 24) | + ((host_int & uint32_t{0xFF00}) << 8) | + ((host_int & uint32_t{0xFF0000}) >> 8) | + ((host_int & uint32_t{0xFF000000}) >> 24)); +#endif +} + +inline uint16_t gbswap_16(uint16_t host_int) { +#if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) + return __builtin_bswap16(host_int); +#elif defined(_MSC_VER) + return _byteswap_ushort(host_int); +#else + return (((host_int & uint16_t{0xFF}) << 8) | + ((host_int & uint16_t{0xFF00}) >> 8)); +#endif +} + +#ifdef ABSL_IS_LITTLE_ENDIAN + +// Portable definitions for htonl (host-to-network) and friends on little-endian +// architectures. +inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } +inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } +inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } + +#elif defined ABSL_IS_BIG_ENDIAN + +// Portable definitions for htonl (host-to-network) etc on big-endian +// architectures. These definitions are simpler since the host byte order is the +// same as network byte order. +inline uint16_t ghtons(uint16_t x) { return x; } +inline uint32_t ghtonl(uint32_t x) { return x; } +inline uint64_t ghtonll(uint64_t x) { return x; } + +#else +#error "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ + "ABSL_IS_LITTLE_ENDIAN must be defined" +#endif // byte order + +inline uint16_t gntohs(uint16_t x) { return ghtons(x); } +inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } +inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } + +// Utilities to convert numbers between the current hosts's native byte +// order and little-endian byte order +// +// Load/Store methods are alignment safe +namespace little_endian { +// Conversion functions. +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +inline uint8_t FromHost(uint8_t x) { return x; } +inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } +inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } +inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } +inline uint8_t ToHost(uint8_t x) { return x; } +inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } +inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } +inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } + +inline int8_t FromHost(int8_t x) { return x; } +inline int16_t FromHost(int16_t x) { + return bit_cast(FromHost16(bit_cast(x))); +} +inline int32_t FromHost(int32_t x) { + return bit_cast(FromHost32(bit_cast(x))); +} +inline int64_t FromHost(int64_t x) { + return bit_cast(FromHost64(bit_cast(x))); +} +inline int8_t ToHost(int8_t x) { return x; } +inline int16_t ToHost(int16_t x) { + return bit_cast(ToHost16(bit_cast(x))); +} +inline int32_t ToHost(int32_t x) { + return bit_cast(ToHost32(bit_cast(x))); +} +inline int64_t ToHost(int64_t x) { + return bit_cast(ToHost64(bit_cast(x))); +} + +// Functions to do unaligned loads and stores in little-endian order. +inline uint16_t Load16(absl::Nonnull p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(absl::Nonnull p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(absl::Nonnull p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(absl::Nonnull p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(absl::Nonnull p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(absl::Nonnull p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace little_endian + +// Utilities to convert numbers between the current hosts's native byte +// order and big-endian byte order (same as network byte order) +// +// Load/Store methods are alignment safe +namespace big_endian { +#ifdef ABSL_IS_LITTLE_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } +inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } + +inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } +inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } + +inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } +inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } + +inline constexpr bool IsLittleEndian() { return true; } + +#elif defined ABSL_IS_BIG_ENDIAN + +inline uint16_t FromHost16(uint16_t x) { return x; } +inline uint16_t ToHost16(uint16_t x) { return x; } + +inline uint32_t FromHost32(uint32_t x) { return x; } +inline uint32_t ToHost32(uint32_t x) { return x; } + +inline uint64_t FromHost64(uint64_t x) { return x; } +inline uint64_t ToHost64(uint64_t x) { return x; } + +inline constexpr bool IsLittleEndian() { return false; } + +#endif /* ENDIAN */ + +inline uint8_t FromHost(uint8_t x) { return x; } +inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } +inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } +inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } +inline uint8_t ToHost(uint8_t x) { return x; } +inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } +inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } +inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } + +inline int8_t FromHost(int8_t x) { return x; } +inline int16_t FromHost(int16_t x) { + return bit_cast(FromHost16(bit_cast(x))); +} +inline int32_t FromHost(int32_t x) { + return bit_cast(FromHost32(bit_cast(x))); +} +inline int64_t FromHost(int64_t x) { + return bit_cast(FromHost64(bit_cast(x))); +} +inline int8_t ToHost(int8_t x) { return x; } +inline int16_t ToHost(int16_t x) { + return bit_cast(ToHost16(bit_cast(x))); +} +inline int32_t ToHost(int32_t x) { + return bit_cast(ToHost32(bit_cast(x))); +} +inline int64_t ToHost(int64_t x) { + return bit_cast(ToHost64(bit_cast(x))); +} + +// Functions to do unaligned loads and stores in big-endian order. +inline uint16_t Load16(absl::Nonnull p) { + return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); +} + +inline void Store16(absl::Nonnull p, uint16_t v) { + ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); +} + +inline uint32_t Load32(absl::Nonnull p) { + return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); +} + +inline void Store32(absl::Nonnull p, uint32_t v) { + ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); +} + +inline uint64_t Load64(absl::Nonnull p) { + return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); +} + +inline void Store64(absl::Nonnull p, uint64_t v) { + ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); +} + +} // namespace big_endian + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ENDIAN_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/errno_saver.h b/packages/react-native-executorch/third-party/include/absl/base/internal/errno_saver.h new file mode 100644 index 000000000..6adacc523 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/errno_saver.h @@ -0,0 +1,43 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ +#define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// `ErrnoSaver` captures the value of `errno` upon construction and restores it +// upon deletion. It is used in low-level code and must be super fast. Do not +// add instrumentation, even in debug modes. +class ErrnoSaver { +public: + ErrnoSaver() : saved_errno_(errno) {} + ~ErrnoSaver() { errno = saved_errno_; } + int operator()() const { return saved_errno_; } + +private: + const int saved_errno_; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/exception_safety_testing.h b/packages/react-native-executorch/third-party/include/absl/base/internal/exception_safety_testing.h new file mode 100644 index 000000000..ff15dd3a4 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/exception_safety_testing.h @@ -0,0 +1,1108 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Utilities for testing exception-safety + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ + +#include "absl/base/config.h" + +#ifdef ABSL_HAVE_EXCEPTIONS + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "absl/base/internal/pretty_function.h" +#include "absl/memory/memory.h" +#include "absl/meta/type_traits.h" +#include "absl/strings/string_view.h" +#include "absl/strings/substitute.h" +#include "absl/utility/utility.h" +#include "gtest/gtest.h" + +namespace testing { + +enum class TypeSpec; +enum class AllocSpec; + +constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); +} + +constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); +} + +constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) | static_cast(b)); +} + +constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) { + using T = absl::underlying_type_t; + return static_cast(static_cast(a) & static_cast(b)); +} + +namespace exceptions_internal { + +std::string GetSpecString(TypeSpec); +std::string GetSpecString(AllocSpec); + +struct NoThrowTag {}; +struct StrongGuaranteeTagType {}; + +// A simple exception class. We throw this so that test code can catch +// exceptions specifically thrown by ThrowingValue. +class TestException { +public: + explicit TestException(absl::string_view msg) : msg_(msg) {} + virtual ~TestException() {} + virtual const char *what() const noexcept { return msg_.c_str(); } + +private: + std::string msg_; +}; + +// TestBadAllocException exists because allocation functions must throw an +// exception which can be caught by a handler of std::bad_alloc. We use a child +// class of std::bad_alloc so we can customise the error message, and also +// derive from TestException so we don't accidentally end up catching an actual +// bad_alloc exception in TestExceptionSafety. +class TestBadAllocException : public std::bad_alloc, public TestException { +public: + explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {} + using TestException::what; +}; + +extern int countdown; + +// Allows the countdown variable to be set manually (defaulting to the initial +// value of 0) +inline void SetCountdown(int i = 0) { countdown = i; } +// Sets the countdown to the terminal value -1 +inline void UnsetCountdown() { SetCountdown(-1); } + +void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false); + +testing::AssertionResult FailureMessage(const TestException &e, + int countdown) noexcept; + +struct TrackedAddress { + bool is_alive; + std::string description; +}; + +// Inspects the constructions and destructions of anything inheriting from +// TrackedObject. This allows us to safely "leak" TrackedObjects, as +// ConstructorTracker will destroy everything left over in its destructor. +class ConstructorTracker { +public: + explicit ConstructorTracker(int count) : countdown_(count) { + assert(current_tracker_instance_ == nullptr); + current_tracker_instance_ = this; + } + + ~ConstructorTracker() { + assert(current_tracker_instance_ == this); + current_tracker_instance_ = nullptr; + + for (auto &it : address_map_) { + void *address = it.first; + TrackedAddress &tracked_address = it.second; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + countdown_, "Object was not destroyed."); + } + } + } + + static void ObjectConstructed(void *address, std::string description) { + if (!CurrentlyTracking()) + return; + + TrackedAddress &tracked_address = + current_tracker_instance_->address_map_[address]; + if (tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage( + address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-constructed. Current object was constructed by " + + description); + } + tracked_address = {true, std::move(description)}; + } + + static void ObjectDestructed(void *address) { + if (!CurrentlyTracking()) + return; + + auto it = current_tracker_instance_->address_map_.find(address); + // Not tracked. Ignore. + if (it == current_tracker_instance_->address_map_.end()) + return; + + TrackedAddress &tracked_address = it->second; + if (!tracked_address.is_alive) { + ADD_FAILURE() << ErrorMessage(address, tracked_address.description, + current_tracker_instance_->countdown_, + "Object was re-destroyed."); + } + tracked_address.is_alive = false; + } + +private: + static bool CurrentlyTracking() { + return current_tracker_instance_ != nullptr; + } + + static std::string ErrorMessage(void *address, + const std::string &address_description, + int countdown, + const std::string &error_description) { + return absl::Substitute("With coundtown at $0:\n" + " $1\n" + " Object originally constructed by $2\n" + " Object address: $3\n", + countdown, error_description, address_description, + address); + } + + std::unordered_map address_map_; + int countdown_; + + static ConstructorTracker *current_tracker_instance_; +}; + +class TrackedObject { +public: + TrackedObject(const TrackedObject &) = delete; + TrackedObject(TrackedObject &&) = delete; + +protected: + explicit TrackedObject(std::string description) { + ConstructorTracker::ObjectConstructed(this, std::move(description)); + } + + ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); } +}; +} // namespace exceptions_internal + +extern exceptions_internal::NoThrowTag nothrow_ctor; + +extern exceptions_internal::StrongGuaranteeTagType strong_guarantee; + +// A test class which is convertible to bool. The conversion can be +// instrumented to throw at a controlled time. +class ThrowingBool { +public: + ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit) + operator bool() const { // NOLINT + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return b_; + } + +private: + bool b_; +}; + +/* + * Configuration enum for the ThrowingValue type that defines behavior for the + * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer + * constructor from throwing. + * + * kEverythingThrows: Every operation can throw an exception + * kNoThrowCopy: Copy construction and copy assignment will not throw + * kNoThrowMove: Move construction and move assignment will not throw + * kNoThrowNew: Overloaded operators new and new[] will not throw + */ +enum class TypeSpec { + kEverythingThrows = 0, + kNoThrowCopy = 1, + kNoThrowMove = 1 << 1, + kNoThrowNew = 1 << 2, +}; + +/* + * A testing class instrumented to throw an exception at a controlled time. + * + * ThrowingValue implements a slightly relaxed version of the Regular concept -- + * that is it's a value type with the expected semantics. It also implements + * arithmetic operations. It doesn't implement member and pointer operators + * like operator-> or operator[]. + * + * ThrowingValue can be instrumented to have certain operations be noexcept by + * using compile-time bitfield template arguments. That is, to make an + * ThrowingValue which has noexcept move construction/assignment and noexcept + * copy construction/assignment, use the following: + * ThrowingValue my_thrwr{val}; + */ +template +class ThrowingValue : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(TypeSpec spec) { + return static_cast(Spec & spec); + } + + static constexpr int kDefaultValue = 0; + static constexpr int kBadValue = 938550620; + +public: + ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = kDefaultValue; + } + + ThrowingValue(const ThrowingValue &other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + ThrowingValue(ThrowingValue &&other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) + : TrackedObject(GetInstanceString(other.dummy_)) { + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + } + + explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = i; + } + + ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept + : TrackedObject(GetInstanceString(i)), dummy_(i) {} + + // absl expects nothrow destructors + ~ThrowingValue() noexcept = default; + + ThrowingValue &operator=(const ThrowingValue &other) noexcept( + IsSpecified(TypeSpec::kNoThrowCopy)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowCopy)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + ThrowingValue &operator=(ThrowingValue &&other) noexcept( + IsSpecified(TypeSpec::kNoThrowMove)) { + dummy_ = kBadValue; + if (!IsSpecified(TypeSpec::kNoThrowMove)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + } + dummy_ = other.dummy_; + return *this; + } + + // Arithmetic Operators + ThrowingValue operator+(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor); + } + + ThrowingValue operator+() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_, nothrow_ctor); + } + + ThrowingValue operator-(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor); + } + + ThrowingValue operator-() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(-dummy_, nothrow_ctor); + } + + ThrowingValue &operator++() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + ++dummy_; + return *this; + } + + ThrowingValue operator++(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + ++dummy_; + return out; + } + + ThrowingValue &operator--() { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + --dummy_; + return *this; + } + + ThrowingValue operator--(int) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + auto out = ThrowingValue(dummy_, nothrow_ctor); + --dummy_; + return out; + } + + ThrowingValue operator*(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor); + } + + ThrowingValue operator/(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor); + } + + ThrowingValue operator%(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor); + } + + ThrowingValue operator<<(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ << shift, nothrow_ctor); + } + + ThrowingValue operator>>(int shift) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ >> shift, nothrow_ctor); + } + + // Comparison Operators + // NOTE: We use `ThrowingBool` instead of `bool` because most STL + // types/containers requires T to be convertible to bool. + friend ThrowingBool operator==(const ThrowingValue &a, + const ThrowingValue &b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ == b.dummy_; + } + friend ThrowingBool operator!=(const ThrowingValue &a, + const ThrowingValue &b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ != b.dummy_; + } + friend ThrowingBool operator<(const ThrowingValue &a, + const ThrowingValue &b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ < b.dummy_; + } + friend ThrowingBool operator<=(const ThrowingValue &a, + const ThrowingValue &b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ <= b.dummy_; + } + friend ThrowingBool operator>(const ThrowingValue &a, + const ThrowingValue &b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ > b.dummy_; + } + friend ThrowingBool operator>=(const ThrowingValue &a, + const ThrowingValue &b) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return a.dummy_ >= b.dummy_; + } + + // Logical Operators + ThrowingBool operator!() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return !dummy_; + } + + ThrowingBool operator&&(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ && other.dummy_; + } + + ThrowingBool operator||(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return dummy_ || other.dummy_; + } + + // Bitwise Logical Operators + ThrowingValue operator~() const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(~dummy_, nothrow_ctor); + } + + ThrowingValue operator&(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor); + } + + ThrowingValue operator|(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor); + } + + ThrowingValue operator^(const ThrowingValue &other) const { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor); + } + + // Compound Assignment operators + ThrowingValue &operator+=(const ThrowingValue &other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ += other.dummy_; + return *this; + } + + ThrowingValue &operator-=(const ThrowingValue &other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ -= other.dummy_; + return *this; + } + + ThrowingValue &operator*=(const ThrowingValue &other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ *= other.dummy_; + return *this; + } + + ThrowingValue &operator/=(const ThrowingValue &other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ /= other.dummy_; + return *this; + } + + ThrowingValue &operator%=(const ThrowingValue &other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ %= other.dummy_; + return *this; + } + + ThrowingValue &operator&=(const ThrowingValue &other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ &= other.dummy_; + return *this; + } + + ThrowingValue &operator|=(const ThrowingValue &other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ |= other.dummy_; + return *this; + } + + ThrowingValue &operator^=(const ThrowingValue &other) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ ^= other.dummy_; + return *this; + } + + ThrowingValue &operator<<=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ <<= shift; + return *this; + } + + ThrowingValue &operator>>=(int shift) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ >>= shift; + return *this; + } + + // Pointer operators + void operator&() const = delete; // NOLINT(runtime/operator) + + // Stream operators + friend std::ostream &operator<<(std::ostream &os, const ThrowingValue &tv) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return os << GetInstanceString(tv.dummy_); + } + + friend std::istream &operator>>(std::istream &is, const ThrowingValue &) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + return is; + } + + // Memory management operators + static void * + operator new(size_t s) noexcept(IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s); + } + + static void * + operator new[](size_t s) noexcept(IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s); + } + + template + static void * + operator new(size_t s, + Args &&...args) noexcept(IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new(s, std::forward(args)...); + } + + template + static void * + operator new[](size_t s, + Args &&...args) noexcept(IsSpecified(TypeSpec::kNoThrowNew)) { + if (!IsSpecified(TypeSpec::kNoThrowNew)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); + } + return ::operator new[](s, std::forward(args)...); + } + + // Abseil doesn't support throwing overloaded operator delete. These are + // provided so a throwing operator-new can clean up after itself. + void operator delete(void *p) noexcept { ::operator delete(p); } + + template + void operator delete(void *p, Args &&...args) noexcept { + ::operator delete(p, std::forward(args)...); + } + + void operator delete[](void *p) noexcept { return ::operator delete[](p); } + + template + void operator delete[](void *p, Args &&...args) noexcept { + return ::operator delete[](p, std::forward(args)...); + } + + // Non-standard access to the actual contained value. No need for this to + // throw. + int &Get() noexcept { return dummy_; } + const int &Get() const noexcept { return dummy_; } + +private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingValue<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + int dummy_; +}; +// While not having to do with exceptions, explicitly delete comma operator, to +// make sure we don't use it on user-supplied types. +template +void operator,(const ThrowingValue &, T &&) = delete; +template +void operator,(T &&, const ThrowingValue &) = delete; + +/* + * Configuration enum for the ThrowingAllocator type that defines behavior for + * the lifetime of the instance. + * + * kEverythingThrows: Calls to the member functions may throw + * kNoThrowAllocate: Calls to the member functions will not throw + */ +enum class AllocSpec { + kEverythingThrows = 0, + kNoThrowAllocate = 1, +}; + +/* + * An allocator type which is instrumented to throw at a controlled time, or not + * to throw, using AllocSpec. The supported settings are the default of every + * function which is allowed to throw in a conforming allocator possibly + * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS + * configuration macro. + */ +template +class ThrowingAllocator : private exceptions_internal::TrackedObject { + static constexpr bool IsSpecified(AllocSpec spec) { + return static_cast(Spec & spec); + } + +public: + using pointer = T *; + using const_pointer = const T *; + using reference = T &; + using const_reference = const T &; + using void_pointer = void *; + using const_void_pointer = const void *; + using value_type = T; + using size_type = size_t; + using difference_type = ptrdiff_t; + + using is_nothrow = + std::integral_constant; + using propagate_on_container_copy_assignment = std::true_type; + using propagate_on_container_move_assignment = std::true_type; + using propagate_on_container_swap = std::true_type; + using is_always_equal = std::false_type; + + ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) { + exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); + dummy_ = std::make_shared(next_id_++); + } + + template + ThrowingAllocator(const ThrowingAllocator &other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of + // allocator shall not exit via an exception, thus they are marked noexcept. + ThrowingAllocator(const ThrowingAllocator &other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(other.State()) {} + + template + ThrowingAllocator(ThrowingAllocator &&other) noexcept // NOLINT + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ThrowingAllocator(ThrowingAllocator &&other) noexcept + : TrackedObject(GetInstanceString(*other.State())), + dummy_(std::move(other.State())) {} + + ~ThrowingAllocator() noexcept = default; + + ThrowingAllocator &operator=(const ThrowingAllocator &other) noexcept { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator & + operator=(const ThrowingAllocator &other) noexcept { + dummy_ = other.State(); + return *this; + } + + template + ThrowingAllocator &operator=(ThrowingAllocator &&other) noexcept { + dummy_ = std::move(other.State()); + return *this; + } + + template struct rebind { + using other = ThrowingAllocator; + }; + + pointer + allocate(size_type n) noexcept(IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return static_cast(::operator new(n * sizeof(T))); + } + + pointer allocate(size_type n, const_void_pointer) noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + return allocate(n); + } + + void deallocate(pointer ptr, size_type) noexcept { + ReadState(); + ::operator delete(static_cast(ptr)); + } + + template + void + construct(U *ptr, + Args &&...args) noexcept(IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + ::new (static_cast(ptr)) U(std::forward(args)...); + } + + template void destroy(U *p) noexcept { + ReadState(); + p->~U(); + } + + size_type max_size() const noexcept { + return (std::numeric_limits::max)() / sizeof(value_type); + } + + ThrowingAllocator select_on_container_copy_construction() noexcept( + IsSpecified(AllocSpec::kNoThrowAllocate)) { + ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); + return *this; + } + + template + bool operator==(const ThrowingAllocator &other) const noexcept { + return dummy_ == other.dummy_; + } + + template + bool operator!=(const ThrowingAllocator &other) const noexcept { + return dummy_ != other.dummy_; + } + + template friend class ThrowingAllocator; + +private: + static std::string GetInstanceString(int dummy) { + return absl::StrCat("ThrowingAllocator<", + exceptions_internal::GetSpecString(Spec), ">(", dummy, + ")"); + } + + const std::shared_ptr &State() const { return dummy_; } + std::shared_ptr &State() { return dummy_; } + + void ReadState() { + // we know that this will never be true, but the compiler doesn't, so this + // should safely force a read of the value. + if (*dummy_ < 0) + std::abort(); + } + + void ReadStateAndMaybeThrow(absl::string_view msg) const { + if (!IsSpecified(AllocSpec::kNoThrowAllocate)) { + exceptions_internal::MaybeThrow( + absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg)); + } + } + + static int next_id_; + std::shared_ptr dummy_; +}; + +template +int ThrowingAllocator::next_id_ = 0; + +// Tests for resource leaks by attempting to construct a T using args repeatedly +// until successful, using the countdown method. Side effects can then be +// tested for resource leaks. +template void TestThrowingCtor(Args &&...args) { + struct Cleanup { + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + exceptions_internal::SetCountdown(count); + try { + T temp(std::forward(args)...); + static_cast(temp); + break; + } catch (const exceptions_internal::TestException &) { + } + } +} + +// Tests the nothrow guarantee of the provided nullary operation. If the an +// exception is thrown, the result will be AssertionFailure(). Otherwise, it +// will be AssertionSuccess(). +template +testing::AssertionResult TestNothrowOp(const Operation &operation) { + struct Cleanup { + Cleanup() { exceptions_internal::SetCountdown(); } + ~Cleanup() { exceptions_internal::UnsetCountdown(); } + } c; + try { + operation(); + return testing::AssertionSuccess(); + } catch (const exceptions_internal::TestException &) { + return testing::AssertionFailure() + << "TestException thrown during call to operation() when nothrow " + "guarantee was expected."; + } catch (...) { + return testing::AssertionFailure() + << "Unknown exception thrown during call to operation() when " + "nothrow guarantee was expected."; + } +} + +namespace exceptions_internal { + +// Dummy struct for ExceptionSafetyTestBuilder<> partial state. +struct UninitializedT {}; + +template class DefaultFactory { +public: + explicit DefaultFactory(const T &t) : t_(t) {} + std::unique_ptr operator()() const { return absl::make_unique(t_); } + +private: + T t_; +}; + +template +using EnableIfTestable = typename absl::enable_if_t< + LazyContractsCount != 0 && + !std::is_same::value && + !std::is_same::value>; + +template +class ExceptionSafetyTestBuilder; + +} // namespace exceptions_internal + +/* + * Constructs an empty ExceptionSafetyTestBuilder. All + * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation + * methods return new instances of ExceptionSafetyTestBuilder. + * + * In order to test a T for exception safety, a factory for that T, a testable + * operation, and at least one contract callback returning an assertion + * result must be applied using the respective methods. + */ +exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); + +namespace exceptions_internal { +template struct IsUniquePtr : std::false_type {}; + +template +struct IsUniquePtr> : std::true_type {}; + +template struct FactoryPtrTypeHelper { + using type = decltype(std::declval()()); + + static_assert(IsUniquePtr::value, "Factories must return a unique_ptr"); +}; + +template +using FactoryPtrType = typename FactoryPtrTypeHelper::type; + +template +using FactoryElementType = typename FactoryPtrType::element_type; + +template class ExceptionSafetyTest { + using Factory = std::function()>; + using Operation = std::function; + using Contract = std::function; + +public: + template + explicit ExceptionSafetyTest(const Factory &f, const Operation &op, + const Contracts &...contracts) + : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {} + + AssertionResult Test() const { + for (int count = 0;; ++count) { + exceptions_internal::ConstructorTracker ct(count); + + for (const auto &contract : contracts_) { + auto t_ptr = factory_(); + try { + SetCountdown(count); + operation_(t_ptr.get()); + // Unset for the case that the operation throws no exceptions, which + // would leave the countdown set and break the *next* exception safety + // test after this one. + UnsetCountdown(); + return AssertionSuccess(); + } catch (const exceptions_internal::TestException &e) { + if (!contract(t_ptr.get())) { + return AssertionFailure() << e.what() << " failed contract check"; + } + } + } + } + } + +private: + template + Contract WrapContract(const ContractFn &contract) { + return [contract](T *t_ptr) { return AssertionResult(contract(t_ptr)); }; + } + + Contract WrapContract(StrongGuaranteeTagType) { + return [this](T *t_ptr) { return AssertionResult(*factory_() == *t_ptr); }; + } + + Factory factory_; + Operation operation_; + std::vector contracts_; +}; + +/* + * Builds a tester object that tests if performing a operation on a T follows + * exception safety guarantees. Verification is done via contract assertion + * callbacks applied to T instances post-throw. + * + * Template parameters for ExceptionSafetyTestBuilder: + * + * - Factory: The factory object (passed in via tester.WithFactory(...) or + * tester.WithInitialValue(...)) must be invocable with the signature + * `std::unique_ptr operator()() const` where T is the type being tested. + * It is used for reliably creating identical T instances to test on. + * + * - Operation: The operation object (passed in via tester.WithOperation(...) + * or tester.Test(...)) must be invocable with the signature + * `void operator()(T*) const` where T is the type being tested. It is used + * for performing steps on a T instance that may throw and that need to be + * checked for exception safety. Each call to the operation will receive a + * fresh T instance so it's free to modify and destroy the T instances as it + * pleases. + * + * - Contracts...: The contract assertion callback objects (passed in via + * tester.WithContracts(...)) must be invocable with the signature + * `testing::AssertionResult operator()(T*) const` where T is the type being + * tested. Contract assertion callbacks are provided T instances post-throw. + * They must return testing::AssertionSuccess when the type contracts of the + * provided T instance hold. If the type contracts of the T instance do not + * hold, they must return testing::AssertionFailure. Execution order of + * Contracts... is unspecified. They will each individually get a fresh T + * instance so they are free to modify and destroy the T instances as they + * please. + */ +template +class ExceptionSafetyTestBuilder { +public: + /* + * Returns a new ExceptionSafetyTestBuilder with an included T factory based + * on the provided T instance. The existing factory will not be included in + * the newly created tester instance. The created factory returns a new T + * instance by copy-constructing the provided const T& t. + * + * Preconditions for tester.WithInitialValue(const T& t): + * + * - The const T& t object must be copy-constructible where T is the type + * being tested. For non-copy-constructible objects, use the method + * tester.WithFactory(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithInitialValue(const T &t) const { + return WithFactory(DefaultFactory(t)); + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided T factory + * included. The existing factory will not be included in the newly-created + * tester instance. This method is intended for use with types lacking a copy + * constructor. Types that can be copy-constructed should instead use the + * method tester.WithInitialValue(...). + */ + template + ExceptionSafetyTestBuilder, Operation, Contracts...> + WithFactory(const NewFactory &new_factory) const { + return {new_factory, operation_, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided testable + * operation included. The existing operation will not be included in the + * newly created tester. + */ + template + ExceptionSafetyTestBuilder, Contracts...> + WithOperation(const NewOperation &new_operation) const { + return {factory_, new_operation, contracts_}; + } + + /* + * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... + * combined with the Contracts... that were already included in the instance + * on which the method was called. Contracts... cannot be removed or replaced + * once added to an ExceptionSafetyTestBuilder instance. A fresh object must + * be created in order to get an empty Contracts... list. + * + * In addition to passing in custom contract assertion callbacks, this method + * accepts `testing::strong_guarantee` as an argument which checks T instances + * post-throw against freshly created T instances via operator== to verify + * that any state changes made during the execution of the operation were + * properly rolled back. + */ + template + ExceptionSafetyTestBuilder...> + WithContracts(const MoreContracts &...more_contracts) const { + return { + factory_, operation_, + std::tuple_cat(contracts_, std::tuple...>( + more_contracts...))}; + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * The passed-in testable operation will not be saved in a new tester instance + * nor will it modify/replace the existing tester instance. This is useful + * when each operation being tested is unique and does not need to be reused. + * + * Preconditions for tester.Test(const NewOperation& new_operation): + * + * - May only be called after at least one contract assertion callback and a + * factory or initial value have been provided. + */ + template < + typename NewOperation, + typename = EnableIfTestable> + testing::AssertionResult Test(const NewOperation &new_operation) const { + return TestImpl(new_operation, absl::index_sequence_for()); + } + + /* + * Returns a testing::AssertionResult that is the reduced result of the + * exception safety algorithm. The algorithm short circuits and returns + * AssertionFailure after the first contract callback returns an + * AssertionFailure. Otherwise, if all contract callbacks return an + * AssertionSuccess, the reduced result is AssertionSuccess. + * + * Preconditions for tester.Test(): + * + * - May only be called after at least one contract assertion callback, a + * factory or initial value and a testable operation have been provided. + */ + template < + typename LazyOperation = Operation, + typename = EnableIfTestable> + testing::AssertionResult Test() const { + return Test(operation_); + } + +private: + template + friend class ExceptionSafetyTestBuilder; + + friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); + + ExceptionSafetyTestBuilder() {} + + ExceptionSafetyTestBuilder(const Factory &f, const Operation &o, + const std::tuple &i) + : factory_(f), operation_(o), contracts_(i) {} + + template + testing::AssertionResult TestImpl(SelectedOperation selected_operation, + absl::index_sequence) const { + return ExceptionSafetyTest>( + factory_, selected_operation, std::get(contracts_)...) + .Test(); + } + + Factory factory_; + Operation operation_; + std::tuple contracts_; +}; + +} // namespace exceptions_internal + +} // namespace testing + +#endif // ABSL_HAVE_EXCEPTIONS + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/exception_testing.h b/packages/react-native-executorch/third-party/include/absl/base/internal/exception_testing.h new file mode 100644 index 000000000..3bf940d8a --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/exception_testing.h @@ -0,0 +1,42 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Testing utilities for ABSL types which throw exceptions. + +#ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ +#define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ + +#include "absl/base/config.h" +#include "gtest/gtest.h" + +// ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception +// if exceptions are enabled, or for death with a specified text in the error +// message +#ifdef ABSL_HAVE_EXCEPTIONS + +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_THROW(expr, exception_t) + +#elif defined(__ANDROID__) +// Android asserts do not log anywhere that gtest can currently inspect. +// So we expect exit, but cannot match the message. +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH(expr, ".*") +#else +#define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ + EXPECT_DEATH_IF_SUPPORTED(expr, text) + +#endif + +#endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/fast_type_id.h b/packages/react-native-executorch/third-party/include/absl/base/internal/fast_type_id.h new file mode 100644 index 000000000..b233bd005 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/fast_type_id.h @@ -0,0 +1,47 @@ +// +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ +#define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +template struct FastTypeTag { + constexpr static char dummy_var = 0; +}; + +#ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL +template constexpr char FastTypeTag::dummy_var; +#endif + +// FastTypeId() evaluates at compile/link-time to a unique pointer for the +// passed-in type. These are meant to be good match for keys into maps or +// straight up comparisons. +using FastTypeIdType = const void *; + +template constexpr inline FastTypeIdType FastTypeId() { + return &FastTypeTag::dummy_var; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/hide_ptr.h b/packages/react-native-executorch/third-party/include/absl/base/internal/hide_ptr.h new file mode 100644 index 000000000..400fb388b --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/hide_ptr.h @@ -0,0 +1,49 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_ +#define ABSL_BASE_INTERNAL_HIDE_PTR_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Arbitrary value with high bits set. Xor'ing with it is unlikely +// to map one valid pointer to another valid pointer. +constexpr uintptr_t HideMask() { + return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU; +} + +// Hide a pointer from the leak checker. For internal use only. +// Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr +// and all objects reachable from ptr to be ignored by the leak checker. +template inline uintptr_t HidePtr(T *ptr) { + return reinterpret_cast(ptr) ^ HideMask(); +} + +// Return a pointer that has been hidden from the leak checker. +// For internal use only. +template inline T *UnhidePtr(uintptr_t hidden) { + return reinterpret_cast(hidden ^ HideMask()); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/inline_variable_testing.h b/packages/react-native-executorch/third-party/include/absl/base/internal/inline_variable_testing.h new file mode 100644 index 000000000..d0a7809a5 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/inline_variable_testing.h @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ +#define ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ + +#include "absl/base/internal/inline_variable.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace inline_variable_testing_internal { + +struct Foo { + int value = 5; +}; + +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {}); +ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {}); + +ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5); +ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5); + +ABSL_INTERNAL_INLINE_CONSTEXPR(void (*)(), inline_variable_fun_ptr, nullptr); + +const Foo &get_foo_a(); +const Foo &get_foo_b(); + +const int &get_int_a(); +const int &get_int_b(); + +} // namespace inline_variable_testing_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_TESTING_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/low_level_alloc.h b/packages/react-native-executorch/third-party/include/absl/base/internal/low_level_alloc.h new file mode 100644 index 000000000..484ea67fb --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/low_level_alloc.h @@ -0,0 +1,127 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ +#define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ + +// A simple thread-safe memory allocator that does not depend on +// mutexes or thread-specific data. It is intended to be used +// sparingly, and only when malloc() would introduce an unwanted +// dependency, such as inside the heap-checker, or the Mutex +// implementation. + +// IWYU pragma: private, include "base/low_level_alloc.h" + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// LowLevelAlloc requires that the platform support low-level +// allocation of virtual memory. Platforms lacking this cannot use +// LowLevelAlloc. +#ifdef ABSL_LOW_LEVEL_ALLOC_MISSING +#error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set +#elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32) +#define ABSL_LOW_LEVEL_ALLOC_MISSING 1 +#endif + +// Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or +// asm.js / WebAssembly. +// See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html +// for more information. +#ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING +#error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set +#elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) || \ + defined(__hexagon__) +#define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 +#endif + +#include + +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class LowLevelAlloc { +public: + struct Arena; // an arena from which memory may be allocated + + // Returns a pointer to a block of at least "request" bytes + // that have been newly allocated from the specific arena. + // for Alloc() call the DefaultArena() is used. + // Returns 0 if passed request==0. + // Does not return 0 under other circumstances; it crashes if memory + // is not available. + static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); + static void *AllocWithArena(size_t request, Arena *arena) + ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // Deallocates a region of memory that was previously allocated with + // Alloc(). Does nothing if passed 0. "s" must be either 0, + // or must have been returned from a call to Alloc() and not yet passed to + // Free() since that call to Alloc(). The space is returned to the arena + // from which it was allocated. + static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook); + + // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free + // are to put all callers of MallocHook::Invoke* in this module + // into special section, + // so that MallocHook::GetCallerStackTrace can function accurately. + + // Create a new arena. + // The root metadata for the new arena is allocated in the + // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. + // These values may be ored into flags: + enum { + // Report calls to Alloc() and Free() via the MallocHook interface. + // Set in the DefaultArena. + kCallMallocHook = 0x0001, + +#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING + // Make calls to Alloc(), Free() be async-signal-safe. Not set in + // DefaultArena(). Not supported on all platforms. + kAsyncSignalSafe = 0x0002, +#endif + }; + // Construct a new arena. The allocation of the underlying metadata honors + // the provided flags. For example, the call NewArena(kAsyncSignalSafe) + // is itself async-signal-safe, as well as generatating an arena that provides + // async-signal-safe Alloc/Free. + static Arena *NewArena(uint32_t flags); + + // Destroys an arena allocated by NewArena and returns true, + // provided no allocated blocks remain in the arena. + // If allocated blocks remain in the arena, does nothing and + // returns false. + // It is illegal to attempt to destroy the DefaultArena(). + static bool DeleteArena(Arena *arena); + + // The default arena that always exists. + static Arena *DefaultArena(); + +private: + LowLevelAlloc(); // no instances +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/per_thread_tls.h b/packages/react-native-executorch/third-party/include/absl/base/internal/per_thread_tls.h new file mode 100644 index 000000000..0dfb94354 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/per_thread_tls.h @@ -0,0 +1,52 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ +#define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ + +// This header defines two macros: +// +// If the platform supports thread-local storage: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a +// thread-local variable +// * ABSL_PER_THREAD_TLS is 1 +// +// Otherwise: +// +// * ABSL_PER_THREAD_TLS_KEYWORD is empty +// * ABSL_PER_THREAD_TLS is 0 +// +// Microsoft C supports thread-local storage. +// GCC supports it if the appropriate version of glibc is available, +// which the programmer can indicate by defining ABSL_HAVE_TLS + +#include "absl/base/port.h" // For ABSL_HAVE_TLS + +#if defined(ABSL_PER_THREAD_TLS) +#error ABSL_PER_THREAD_TLS cannot be directly set +#elif defined(ABSL_PER_THREAD_TLS_KEYWORD) +#error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set +#elif defined(ABSL_HAVE_TLS) +#define ABSL_PER_THREAD_TLS_KEYWORD __thread +#define ABSL_PER_THREAD_TLS 1 +#elif defined(_MSC_VER) +#define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread) +#define ABSL_PER_THREAD_TLS 1 +#else +#define ABSL_PER_THREAD_TLS_KEYWORD +#define ABSL_PER_THREAD_TLS 0 +#endif + +#endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/poison.h b/packages/react-native-executorch/third-party/include/absl/base/internal/poison.h new file mode 100644 index 000000000..6241cd3f5 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/poison.h @@ -0,0 +1,59 @@ +// Copyright 2024 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_POISON_H_ +#define ABSL_BASE_INTERNAL_POISON_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline void *GetBadPointerInternal() { + // A likely bad pointer. Pointers are required to have high bits that are all + // zero or all one for certain 64-bit CPUs. This pointer value will hopefully + // cause a crash on dereference and also be clearly recognizable as invalid. + constexpr uint64_t kBadPtr = 0xBAD0BAD0BAD0BAD0; + auto ret = reinterpret_cast(static_cast(kBadPtr)); +#ifndef _MSC_VER // MSVC doesn't support inline asm with `volatile`. + // Try to prevent the compiler from optimizing out the undefined behavior. + asm volatile("" : : "r"(ret) :); // NOLINT +#endif + return ret; +} + +void *InitializePoisonedPointerInternal(); + +inline void *get_poisoned_pointer() { +#if defined(NDEBUG) && !defined(ABSL_HAVE_ADDRESS_SANITIZER) && \ + !defined(ABSL_HAVE_MEMORY_SANITIZER) + // In optimized non-sanitized builds, avoid the function-local static because + // of the codegen and runtime cost. + return GetBadPointerInternal(); +#else + // Non-optimized builds may use more robust implementation. Note that we can't + // use a static global because Chromium doesn't allow non-constinit globals. + static void *ptr = InitializePoisonedPointerInternal(); + return ptr; +#endif +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_POISON_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/pretty_function.h b/packages/react-native-executorch/third-party/include/absl/base/internal/pretty_function.h new file mode 100644 index 000000000..639cca141 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/pretty_function.h @@ -0,0 +1,33 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ +#define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ + +// ABSL_PRETTY_FUNCTION +// +// In C++11, __func__ gives the undecorated name of the current function. That +// is, "main", not "int main()". Various compilers give extra macros to get the +// decorated function name, including return type and arguments, to +// differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable +// version of these macros which forwards to the correct macro on each compiler. +#if defined(_MSC_VER) +#define ABSL_PRETTY_FUNCTION __FUNCSIG__ +#elif defined(__GNUC__) +#define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__ +#else +#error "Unsupported compiler" +#endif + +#endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/scoped_set_env.h b/packages/react-native-executorch/third-party/include/absl/base/internal/scoped_set_env.h new file mode 100644 index 000000000..1cf52c41a --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/scoped_set_env.h @@ -0,0 +1,45 @@ +// +// Copyright 2019 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ +#define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ScopedSetEnv { +public: + ScopedSetEnv(const char *var_name, const char *new_value); + ~ScopedSetEnv(); + +private: + std::string var_name_; + std::string old_value_; + + // True if the environment variable was initially not set. + bool was_unset_; +}; + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock.h b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock.h new file mode 100644 index 000000000..d3885177f --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock.h @@ -0,0 +1,275 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Most users requiring mutual exclusion should use Mutex. +// SpinLock is provided for use in two situations: +// - for use by Abseil internal code that Mutex itself depends on +// - for async signal safety (see below) + +// SpinLock with a base_internal::SchedulingMode::SCHEDULE_KERNEL_ONLY is async +// signal safe. If a spinlock is used within a signal handler, all code that +// acquires the lock must ensure that the signal cannot arrive while they are +// holding the lock. Typically, this is done by blocking the signal. +// +// Threads waiting on a SpinLock may be woken in an arbitrary order. + +#ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ +#define ABSL_BASE_INTERNAL_SPINLOCK_H_ + +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/const_init.h" +#include "absl/base/dynamic_annotations.h" +#include "absl/base/internal/low_level_scheduling.h" +#include "absl/base/internal/raw_logging.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/internal/tsan_mutex_interface.h" +#include "absl/base/thread_annotations.h" + +namespace tcmalloc { +namespace tcmalloc_internal { + +class AllocationGuardSpinLockHolder; + +} // namespace tcmalloc_internal +} // namespace tcmalloc + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +class ABSL_LOCKABLE ABSL_ATTRIBUTE_WARN_UNUSED SpinLock { +public: + SpinLock() : lockword_(kSpinLockCooperative) { + ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); + } + + // Constructors that allow non-cooperative spinlocks to be created for use + // inside thread schedulers. Normal clients should not use these. + explicit SpinLock(base_internal::SchedulingMode mode); + + // Constructor for global SpinLock instances. See absl/base/const_init.h. + constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) + : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} + + // For global SpinLock instances prefer trivial destructor when possible. + // Default but non-trivial destructor in some build configurations causes an + // extra static initializer. +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE + ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } +#else + ~SpinLock() = default; +#endif + + // Acquire this SpinLock. + inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); + if (!TryLockImpl()) { + SlowLock(); + } + ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); + } + + // Try to acquire this SpinLock without blocking and return true if the + // acquisition was successful. If the lock was not acquired, false is + // returned. If this SpinLock is free at the time of the call, TryLock + // will return true with high probability. + ABSL_MUST_USE_RESULT inline bool TryLock() + ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { + ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); + bool res = TryLockImpl(); + ABSL_TSAN_MUTEX_POST_LOCK( + this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), + 0); + return res; + } + + // Release this SpinLock, which must be held by the calling thread. + inline void Unlock() ABSL_UNLOCK_FUNCTION() { + ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, + std::memory_order_release); + + if ((lock_value & kSpinLockDisabledScheduling) != 0) { + base_internal::SchedulingGuard::EnableRescheduling(true); + } + if ((lock_value & kWaitTimeMask) != 0) { + // Collect contentionz profile info, and speed the wakeup of any waiter. + // The wait_cycles value indicates how long this thread spent waiting + // for the lock. + SlowUnlock(lock_value); + } + ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); + } + + // Determine if the lock is held. When the lock is held by the invoking + // thread, true will always be returned. Intended to be used as + // CHECK(lock.IsHeld()). + ABSL_MUST_USE_RESULT inline bool IsHeld() const { + return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; + } + + // Return immediately if this thread holds the SpinLock exclusively. + // Otherwise, report an error by crashing with a diagnostic. + inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { + if (!IsHeld()) { + ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); + } + } + +protected: + // These should not be exported except for testing. + + // Store number of cycles between wait_start_time and wait_end_time in a + // lock value. + static uint32_t EncodeWaitCycles(int64_t wait_start_time, + int64_t wait_end_time); + + // Extract number of wait cycles in a lock value. + static int64_t DecodeWaitCycles(uint32_t lock_value); + + // Provide access to protected method above. Use for testing only. + friend struct SpinLockTest; + friend class tcmalloc::tcmalloc_internal::AllocationGuardSpinLockHolder; + +private: + // lockword_ is used to store the following: + // + // bit[0] encodes whether a lock is being held. + // bit[1] encodes whether a lock uses cooperative scheduling. + // bit[2] encodes whether the current lock holder disabled scheduling when + // acquiring the lock. Only set when kSpinLockHeld is also set. + // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. + // This is set by the lock holder to indicate how long it waited on + // the lock before eventually acquiring it. The number of cycles is + // encoded as a 29-bit unsigned int, or in the case that the current + // holder did not wait but another waiter is queued, the LSB + // (kSpinLockSleeper) is set. The implementation does not explicitly + // track the number of queued waiters beyond this. It must always be + // assumed that waiters may exist if the current holder was required to + // queue. + // + // Invariant: if the lock is not held, the value is either 0 or + // kSpinLockCooperative. + static constexpr uint32_t kSpinLockHeld = 1; + static constexpr uint32_t kSpinLockCooperative = 2; + static constexpr uint32_t kSpinLockDisabledScheduling = 4; + static constexpr uint32_t kSpinLockSleeper = 8; + // Includes kSpinLockSleeper. + static constexpr uint32_t kWaitTimeMask = + ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); + + // Returns true if the provided scheduling mode is cooperative. + static constexpr bool + IsCooperative(base_internal::SchedulingMode scheduling_mode) { + return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; + } + + bool IsCooperative() const { + return lockword_.load(std::memory_order_relaxed) & kSpinLockCooperative; + } + + uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); + void SlowLock() ABSL_ATTRIBUTE_COLD; + void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; + uint32_t SpinLoop(); + + inline bool TryLockImpl() { + uint32_t lock_value = lockword_.load(std::memory_order_relaxed); + return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; + } + + std::atomic lockword_; + + SpinLock(const SpinLock &) = delete; + SpinLock &operator=(const SpinLock &) = delete; +}; + +// Corresponding locker object that arranges to acquire a spinlock for +// the duration of a C++ scope. +// +// TODO(b/176172494): Use only [[nodiscard]] when baseline is raised. +// TODO(b/6695610): Remove forward declaration when #ifdef is no longer needed. +#if ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) +class [[nodiscard]] SpinLockHolder; +#else +class ABSL_MUST_USE_RESULT ABSL_ATTRIBUTE_TRIVIAL_ABI SpinLockHolder; +#endif + +class ABSL_SCOPED_LOCKABLE SpinLockHolder { +public: + inline explicit SpinLockHolder(SpinLock *l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) + : lock_(l) { + l->Lock(); + } + inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } + + SpinLockHolder(const SpinLockHolder &) = delete; + SpinLockHolder &operator=(const SpinLockHolder &) = delete; + +private: + SpinLock *lock_; +}; + +// Register a hook for profiling support. +// +// The function pointer registered here will be called whenever a spinlock is +// contended. The callback is given an opaque handle to the contended spinlock +// and the number of wait cycles. This is thread-safe, but only a single +// profiler can be registered. It is an error to call this function multiple +// times with different arguments. +void RegisterSpinLockProfiler(void (*fn)(const void *lock, + int64_t wait_cycles)); + +//------------------------------------------------------------------------------ +// Public interface ends here. +//------------------------------------------------------------------------------ + +// If (result & kSpinLockHeld) == 0, then *this was successfully locked. +// Otherwise, returns last observed value for lockword_. +inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, + uint32_t wait_cycles) { + if ((lock_value & kSpinLockHeld) != 0) { + return lock_value; + } + + uint32_t sched_disabled_bit = 0; + if ((lock_value & kSpinLockCooperative) == 0) { + // For non-cooperative locks we must make sure we mark ourselves as + // non-reschedulable before we attempt to CompareAndSwap. + if (base_internal::SchedulingGuard::DisableRescheduling()) { + sched_disabled_bit = kSpinLockDisabledScheduling; + } + } + + if (!lockword_.compare_exchange_strong( + lock_value, + kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, + std::memory_order_acquire, std::memory_order_relaxed)) { + base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); + } + + return lock_value; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_akaros.inc b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_akaros.inc new file mode 100644 index 000000000..7b0cada4f --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_akaros.inc @@ -0,0 +1,35 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is an Akaros-specific part of spinlock_wait.cc + +#include + +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, + int /* loop */, absl::base_internal::SchedulingMode /* mode */) { + // In Akaros, one must take care not to call anything that could cause a + // malloc(), a blocking system call, or a uthread_yield() while holding a + // spinlock. Our callers assume will not call into libraries or other + // arbitrary code. +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_linux.inc b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_linux.inc new file mode 100644 index 000000000..fe8ba674f --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_linux.inc @@ -0,0 +1,71 @@ +// Copyright 2018 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Linux-specific part of spinlock_wait.cc + +#include +#include +#include + +#include +#include +#include +#include + +#include "absl/base/attributes.h" +#include "absl/base/internal/errno_saver.h" + +// The SpinLock lockword is `std::atomic`. Here we assert that +// `std::atomic` is bitwise equivalent of the `int` expected +// by SYS_futex. We also assume that reads/writes done to the lockword +// by SYS_futex have rational semantics with regard to the +// std::atomic<> API. C++ provides no guarantees of these assumptions, +// but they are believed to hold in practice. +static_assert(sizeof(std::atomic) == sizeof(int), + "SpinLock lockword has the wrong size for a futex"); + +// Some Android headers are missing these definitions even though they +// support these futex operations. +#ifdef __BIONIC__ +#ifndef SYS_futex +#define SYS_futex __NR_futex +#endif +#ifndef FUTEX_PRIVATE_FLAG +#define FUTEX_PRIVATE_FLAG 128 +#endif +#endif + +#if defined(__NR_futex_time64) && !defined(SYS_futex_time64) +#define SYS_futex_time64 __NR_futex_time64 +#endif + +#if defined(SYS_futex_time64) && !defined(SYS_futex) +#define SYS_futex SYS_futex_time64 +#endif + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic *w, uint32_t value, int, + absl::base_internal::SchedulingMode) { + absl::base_internal::ErrnoSaver errno_saver; + syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr); +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic *w, bool all) { + syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); +} + +} // extern "C" diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_posix.inc b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_posix.inc new file mode 100644 index 000000000..4f6f887d9 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_posix.inc @@ -0,0 +1,46 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Posix-specific part of spinlock_wait.cc + +#include + +#include +#include + +#include "absl/base/internal/errno_saver.h" +#include "absl/base/internal/scheduling_mode.h" +#include "absl/base/port.h" + +extern "C" { + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + absl::base_internal::ErrnoSaver errno_saver; + if (loop == 0) { + } else if (loop == 1) { + sched_yield(); + } else { + struct timespec tm; + tm.tv_sec = 0; + tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); + nanosleep(&tm, nullptr); + } +} + +ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_win32.inc b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_win32.inc new file mode 100644 index 000000000..934c2016f --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/spinlock_win32.inc @@ -0,0 +1,40 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is a Win32-specific part of spinlock_wait.cc + +#include +#include +#include "absl/base/internal/scheduling_mode.h" + +extern "C" { + +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( + std::atomic* /* lock_word */, uint32_t /* value */, int loop, + absl::base_internal::SchedulingMode /* mode */) { + if (loop == 0) { + } else if (loop == 1) { + Sleep(0); + } else { + // SpinLockSuggestedDelayNS() always returns a positive integer, so this + // static_cast is safe. + Sleep(static_cast( + absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000)); + } +} + +void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( + std::atomic* /* lock_word */, bool /* all */) {} + +} // extern "C" diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/strerror.h b/packages/react-native-executorch/third-party/include/absl/base/internal/strerror.h new file mode 100644 index 000000000..693a98587 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/strerror.h @@ -0,0 +1,39 @@ +// Copyright 2020 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_STRERROR_H_ +#define ABSL_BASE_INTERNAL_STRERROR_H_ + +#include + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// A portable and thread-safe alternative to C89's `strerror`. +// +// The C89 specification of `strerror` is not suitable for use in a +// multi-threaded application as the returned string may be changed by calls to +// `strerror` from another thread. The many non-stdlib alternatives differ +// enough in their names, availability, and semantics to justify this wrapper +// around them. `errno` will not be modified by a call to `absl::StrError`. +std::string StrError(int errnum); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_STRERROR_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/sysinfo.h b/packages/react-native-executorch/third-party/include/absl/base/internal/sysinfo.h new file mode 100644 index 000000000..3a20dee52 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/sysinfo.h @@ -0,0 +1,74 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file includes routines to find out characteristics +// of the machine a program is running on. It is undoubtedly +// system-dependent. + +// Functions listed here that accept a pid_t as an argument act on the +// current process if the pid_t argument is 0 +// All functions here are thread-hostile due to file caching unless +// commented otherwise. + +#ifndef ABSL_BASE_INTERNAL_SYSINFO_H_ +#define ABSL_BASE_INTERNAL_SYSINFO_H_ + +#ifndef _WIN32 +#include +#endif + +#include + +#include "absl/base/config.h" +#include "absl/base/port.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Nominal core processor cycles per second of each processor. This is _not_ +// necessarily the frequency of the CycleClock counter (see cycleclock.h) +// Thread-safe. +double NominalCPUFrequency(); + +// Number of logical processors (hyperthreads) in system. Thread-safe. +int NumCPUs(); + +// Return the thread id of the current thread, as told by the system. +// No two currently-live threads implemented by the OS shall have the same ID. +// Thread ids of exited threads may be reused. Multiple user-level threads +// may have the same thread ID if multiplexed on the same OS thread. +// +// On Linux, you may send a signal to the resulting ID with kill(). However, +// it is recommended for portability that you use pthread_kill() instead. +#ifdef _WIN32 +// On Windows, process id and thread id are of the same type according to the +// return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned +// 32-bit type. +using pid_t = uint32_t; +#endif +pid_t GetTID(); + +// Like GetTID(), but caches the result in thread-local storage in order +// to avoid unnecessary system calls. Note that there are some cases where +// one must call through to GetTID directly, which is why this exists as a +// separate function. For example, GetCachedTID() is not safe to call in +// an asynchronous signal-handling context nor right after a call to fork(). +pid_t GetCachedTID(); + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_SYSINFO_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/thread_identity.h b/packages/react-native-executorch/third-party/include/absl/base/internal/thread_identity.h new file mode 100644 index 000000000..8018c79e1 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/thread_identity.h @@ -0,0 +1,273 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Each active thread has an ThreadIdentity that may represent the thread in +// various level interfaces. ThreadIdentity objects are never deallocated. +// When a thread terminates, its ThreadIdentity object may be reused for a +// thread created later. + +#ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ +#define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ + +#ifndef _WIN32 +#include +// Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when +// supported. +#include +#endif + +#include +#include + +#include "absl/base/config.h" +#include "absl/base/internal/per_thread_tls.h" +#include "absl/base/optimization.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +struct SynchLocksHeld; +struct SynchWaitParams; + +namespace base_internal { + +class SpinLock; +struct ThreadIdentity; + +// Used by the implementation of absl::Mutex and absl::CondVar. +struct PerThreadSynch { + // The internal representation of absl::Mutex and absl::CondVar rely + // on the alignment of PerThreadSynch. Both store the address of the + // PerThreadSynch in the high-order bits of their internal state, + // which means the low kLowZeroBits of the address of PerThreadSynch + // must be zero. + static constexpr int kLowZeroBits = 8; + static constexpr int kAlignment = 1 << kLowZeroBits; + + // Returns the associated ThreadIdentity. + // This can be implemented as a cast because we guarantee + // PerThreadSynch is the first element of ThreadIdentity. + ThreadIdentity *thread_identity() { + return reinterpret_cast(this); + } + + PerThreadSynch *next; // Circular waiter queue; initialized to 0. + PerThreadSynch *skip; // If non-zero, all entries in Mutex queue + // up to and including "skip" have same + // condition as this, and will be woken later + bool may_skip; // if false while on mutex queue, a mutex unlocker + // is using this PerThreadSynch as a terminator. Its + // skip field must not be filled in because the loop + // might then skip over the terminator. + bool wake; // This thread is to be woken from a Mutex. + // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the + // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. + // + // The value of "x->cond_waiter" is meaningless if "x" is not on a + // Mutex waiter list. + bool cond_waiter; + bool maybe_unlocking; // Valid at head of Mutex waiter queue; + // true if UnlockSlow could be searching + // for a waiter to wake. Used for an optimization + // in Enqueue(). true is always a valid value. + // Can be reset to false when the unlocker or any + // writer releases the lock, or a reader fully + // releases the lock. It may not be set to false + // by a reader that decrements the count to + // non-zero. protected by mutex spinlock + bool suppress_fatal_errors; // If true, try to proceed even in the face + // of broken invariants. This is used within + // fatal signal handlers to improve the + // chances of debug logging information being + // output successfully. + int priority; // Priority of thread (updated every so often). + + // State values: + // kAvailable: This PerThreadSynch is available. + // kQueued: This PerThreadSynch is unavailable, it's currently queued on a + // Mutex or CondVar waistlist. + // + // Transitions from kQueued to kAvailable require a release + // barrier. This is needed as a waiter may use "state" to + // independently observe that it's no longer queued. + // + // Transitions from kAvailable to kQueued require no barrier, they + // are externally ordered by the Mutex. + enum State { kAvailable, kQueued }; + std::atomic state; + + // The wait parameters of the current wait. waitp is null if the + // thread is not waiting. Transitions from null to non-null must + // occur before the enqueue commit point (state = kQueued in + // Enqueue() and CondVarEnqueue()). Transitions from non-null to + // null must occur after the wait is finished (state = kAvailable in + // Mutex::Block() and CondVar::WaitCommon()). This field may be + // changed only by the thread that describes this PerThreadSynch. A + // special case is Fer(), which calls Enqueue() on another thread, + // but with an identical SynchWaitParams pointer, thus leaving the + // pointer unchanged. + SynchWaitParams *waitp; + + intptr_t readers; // Number of readers in mutex. + + // When priority will next be read (cycles). + int64_t next_priority_read_cycles; + + // Locks held; used during deadlock detection. + // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). + SynchLocksHeld *all_locks; +}; + +// The instances of this class are allocated in NewThreadIdentity() with an +// alignment of PerThreadSynch::kAlignment and never destroyed. Initialization +// should happen in OneTimeInitThreadIdentity(). +// +// Instances may be reused by new threads - fields should be reset in +// ResetThreadIdentityBetweenReuse(). +// +// NOTE: The layout of fields in this structure is critical, please do not +// add, remove, or modify the field placements without fully auditing the +// layout. +struct ThreadIdentity { + // Must be the first member. The Mutex implementation requires that + // the PerThreadSynch object associated with each thread is + // PerThreadSynch::kAlignment aligned. We provide this alignment on + // ThreadIdentity itself. + PerThreadSynch per_thread_synch; + + // Private: Reserved for absl::synchronization_internal::Waiter. + struct WaiterState { + alignas(void *) char data[256]; + } waiter_state; + + // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). + std::atomic *blocked_count_ptr; + + // The following variables are mostly read/written just by the + // thread itself. The only exception is that these are read by + // a ticker thread as a hint. + std::atomic ticker; // Tick counter, incremented once per second. + std::atomic wait_start; // Ticker value when thread started waiting. + std::atomic is_idle; // Has thread become idle yet? + + ThreadIdentity *next; +}; + +// Returns the ThreadIdentity object representing the calling thread; guaranteed +// to be unique for its lifetime. The returned object will remain valid for the +// program's lifetime; although it may be re-assigned to a subsequent thread. +// If one does not exist, return nullptr instead. +// +// Does not malloc(*), and is async-signal safe. +// [*] Technically pthread_setspecific() does malloc on first use; however this +// is handled internally within tcmalloc's initialization already. Note that +// darwin does *not* use tcmalloc, so this can catch you if using MallocHooks +// on Apple platforms. Whatever function is calling your MallocHooks will need +// to watch for recursion on Apple platforms. +// +// New ThreadIdentity objects can be constructed and associated with a thread +// by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. +ThreadIdentity *CurrentThreadIdentityIfPresent(); + +using ThreadIdentityReclaimerFunction = void (*)(void *); + +// Sets the current thread identity to the given value. 'reclaimer' is a +// pointer to the global function for cleaning up instances on thread +// destruction. +void SetCurrentThreadIdentity(ThreadIdentity *identity, + ThreadIdentityReclaimerFunction reclaimer); + +// Removes the currently associated ThreadIdentity from the running thread. +// This must be called from inside the ThreadIdentityReclaimerFunction, and only +// from that function. +void ClearCurrentThreadIdentity(); + +// May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS +#error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set +#else +#define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 +#endif + +#ifdef ABSL_THREAD_IDENTITY_MODE +#error ABSL_THREAD_IDENTITY_MODE cannot be directly set +#elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) +#define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE +#elif defined(_WIN32) && !defined(__MINGW32__) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 +#elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ + (__GOOGLE_GRTE_VERSION__ >= 20140228L) +// Support for async-safe TLS was specifically added in GRTEv4. It's not +// present in the upstream eglibc. +// Note: Current default for production systems. +#define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS +#else +#define ABSL_THREAD_IDENTITY_MODE \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#endif + +#if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ + ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 + +#if ABSL_PER_THREAD_TLS +ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity + *thread_identity_ptr; +#elif defined(ABSL_HAVE_THREAD_LOCAL) +ABSL_CONST_INIT extern thread_local ThreadIdentity *thread_identity_ptr; +#else +#error Thread-local storage not detected on this platform +#endif + +// thread_local variables cannot be in headers exposed by DLLs or in certain +// build configurations on Apple platforms. However, it is important for +// performance reasons in general that `CurrentThreadIdentityIfPresent` be +// inlined. In the other cases we opt to have the function not be inlined. Note +// that `CurrentThreadIdentityIfPresent` is declared above so we can exclude +// this entire inline definition. +#if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \ + !defined(ABSL_CONSUME_DLL) +#define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1 +#endif + +#ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT +inline ThreadIdentity *CurrentThreadIdentityIfPresent() { + return thread_identity_ptr; +} +#endif + +#elif ABSL_THREAD_IDENTITY_MODE != \ + ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC +#error Unknown ABSL_THREAD_IDENTITY_MODE +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/tracing.h b/packages/react-native-executorch/third-party/include/absl/base/internal/tracing.h new file mode 100644 index 000000000..a99baefda --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/tracing.h @@ -0,0 +1,81 @@ +// Copyright 2024 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_TRACING_H_ +#define ABSL_BASE_INTERNAL_TRACING_H_ + +#include "absl/base/config.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Well known Abseil object types that have causality. +enum class ObjectKind { kUnknown, kBlockingCounter, kNotification }; + +// `TraceWait` and `TraceContinue` record the start and end of a potentially +// blocking wait operation on `object`. `object` typically represents a higher +// level synchronization object such as `absl::Notification`. +void TraceWait(const void *object, ObjectKind kind); +void TraceContinue(const void *object, ObjectKind kind); + +// `TraceSignal` records a signal on `object`. +void TraceSignal(const void *object, ObjectKind kind); + +// `TraceObserved` records the non-blocking observation of a signaled object. +void TraceObserved(const void *object, ObjectKind kind); + +// --------------------------------------------------------------------------- +// Weak implementation detail: +// +// We define the weak API as extern "C": in some build configurations we pass +// `--detect-odr-violations` to the gold linker. This causes it to flag weak +// symbol overrides as ODR violations. Because ODR only applies to C++ and not +// C, `--detect-odr-violations` ignores symbols not mangled with C++ names. +// By changing our extension points to be extern "C", we dodge this check. +// --------------------------------------------------------------------------- +extern "C" { + +void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceWait)(const void *object, + ObjectKind kind); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceContinue)(const void *object, + ObjectKind kind); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceSignal)(const void *object, + ObjectKind kind); +void ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceObserved)(const void *object, + ObjectKind kind); + +} // extern "C" + +inline void TraceWait(const void *object, ObjectKind kind) { + ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceWait)(object, kind); +} + +inline void TraceContinue(const void *object, ObjectKind kind) { + ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceContinue)(object, kind); +} + +inline void TraceSignal(const void *object, ObjectKind kind) { + ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceSignal)(object, kind); +} + +inline void TraceObserved(const void *object, ObjectKind kind) { + ABSL_INTERNAL_C_SYMBOL(AbslInternalTraceObserved)(object, kind); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_INTERNAL_TRACING_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/tsan_mutex_interface.h b/packages/react-native-executorch/third-party/include/absl/base/internal/tsan_mutex_interface.h new file mode 100644 index 000000000..957c08278 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/tsan_mutex_interface.h @@ -0,0 +1,68 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// This file is intended solely for spinlock.h. +// It provides ThreadSanitizer annotations for custom mutexes. +// See for meaning of these annotations. + +#ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ +#define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ + +#include "absl/base/config.h" + +// ABSL_INTERNAL_HAVE_TSAN_INTERFACE +// Macro intended only for internal use. +// +// Checks whether LLVM Thread Sanitizer interfaces are available. +// First made available in LLVM 5.0 (Sep 2017). +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set." +#endif + +#if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include) +#if __has_include() +#define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1 +#endif +#endif + +#ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE +#include + +#define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create +#define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy +#define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock +#define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock +#define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock +#define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock +#define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal +#define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal +#define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert +#define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert + +#else + +#define ABSL_TSAN_MUTEX_CREATE(...) +#define ABSL_TSAN_MUTEX_DESTROY(...) +#define ABSL_TSAN_MUTEX_PRE_LOCK(...) +#define ABSL_TSAN_MUTEX_POST_LOCK(...) +#define ABSL_TSAN_MUTEX_PRE_UNLOCK(...) +#define ABSL_TSAN_MUTEX_POST_UNLOCK(...) +#define ABSL_TSAN_MUTEX_PRE_SIGNAL(...) +#define ABSL_TSAN_MUTEX_POST_SIGNAL(...) +#define ABSL_TSAN_MUTEX_PRE_DIVERT(...) +#define ABSL_TSAN_MUTEX_POST_DIVERT(...) + +#endif + +#endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/unaligned_access.h b/packages/react-native-executorch/third-party/include/absl/base/internal/unaligned_access.h new file mode 100644 index 000000000..597cce3c7 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/unaligned_access.h @@ -0,0 +1,89 @@ +// +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +#ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ +#define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ + +#include + +#include + +#include "absl/base/attributes.h" +#include "absl/base/config.h" +#include "absl/base/nullability.h" + +// unaligned APIs + +// Portable handling of unaligned loads, stores, and copies. + +// The unaligned API is C++ only. The declarations use C++ features +// (namespaces, inline) which are absent or incompatible in C. +#if defined(__cplusplus) +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +inline uint16_t UnalignedLoad16(absl::Nonnull p) { + uint16_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint32_t UnalignedLoad32(absl::Nonnull p) { + uint32_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline uint64_t UnalignedLoad64(absl::Nonnull p) { + uint64_t t; + memcpy(&t, p, sizeof t); + return t; +} + +inline void UnalignedStore16(absl::Nonnull p, uint16_t v) { + memcpy(p, &v, sizeof v); +} + +inline void UnalignedStore32(absl::Nonnull p, uint32_t v) { + memcpy(p, &v, sizeof v); +} + +inline void UnalignedStore64(absl::Nonnull p, uint64_t v) { + memcpy(p, &v, sizeof v); +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ + (absl::base_internal::UnalignedLoad16(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ + (absl::base_internal::UnalignedLoad32(_p)) +#define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ + (absl::base_internal::UnalignedLoad64(_p)) + +#define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ + (absl::base_internal::UnalignedStore16(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ + (absl::base_internal::UnalignedStore32(_p, _val)) +#define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ + (absl::base_internal::UnalignedStore64(_p, _val)) + +#endif // defined(__cplusplus), end of unaligned API + +#endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/unscaledcycleclock.h b/packages/react-native-executorch/third-party/include/absl/base/internal/unscaledcycleclock.h new file mode 100644 index 000000000..2cbda0903 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/unscaledcycleclock.h @@ -0,0 +1,96 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// UnscaledCycleClock +// An UnscaledCycleClock yields the value and frequency of a cycle counter +// that increments at a rate that is approximately constant. +// This class is for internal use only, you should consider using CycleClock +// instead. +// +// Notes: +// The cycle counter frequency is not necessarily the core clock frequency. +// That is, CycleCounter cycles are not necessarily "CPU cycles". +// +// An arbitrary offset may have been added to the counter at power on. +// +// On some platforms, the rate and offset of the counter may differ +// slightly when read from different CPUs of a multiprocessor. Usually, +// we try to ensure that the operating system adjusts values periodically +// so that values agree approximately. If you need stronger guarantees, +// consider using alternate interfaces. +// +// The CPU is not required to maintain the ordering of a cycle counter read +// with respect to surrounding instructions. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ + +#include + +#if defined(__APPLE__) +#include +#endif + +#include "absl/base/config.h" +#include "absl/base/internal/unscaledcycleclock_config.h" + +#if ABSL_USE_UNSCALED_CYCLECLOCK + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace time_internal { +class UnscaledCycleClockWrapperForGetCurrentTime; +} // namespace time_internal + +namespace base_internal { +class CycleClock; +class UnscaledCycleClockWrapperForInitializeFrequency; + +class UnscaledCycleClock { +private: + UnscaledCycleClock() = delete; + + // Return the value of a cycle counter that counts at a rate that is + // approximately constant. + static int64_t Now(); + + // Return the how much UnscaledCycleClock::Now() increases per second. + // This is not necessarily the core CPU clock frequency. + // It may be the nominal value report by the kernel, rather than a measured + // value. + static double Frequency(); + + // Allowed users + friend class base_internal::CycleClock; + friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; + friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; +}; + +#if defined(__x86_64__) + +inline int64_t UnscaledCycleClock::Now() { + uint64_t low, high; + __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); + return static_cast((high << 32) | low); +} + +#endif + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_USE_UNSCALED_CYCLECLOCK + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/internal/unscaledcycleclock_config.h b/packages/react-native-executorch/third-party/include/absl/base/internal/unscaledcycleclock_config.h new file mode 100644 index 000000000..9a65178cd --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/internal/unscaledcycleclock_config.h @@ -0,0 +1,62 @@ +// Copyright 2022 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ +#define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ + +#if defined(__APPLE__) +#include +#endif + +// The following platforms have an implementation of a hardware counter. +#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ + defined(__powerpc__) || defined(__ppc__) || defined(_M_IX86) || \ + (defined(_M_X64) && !defined(_M_ARM64EC)) +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 +#else +#define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 +#endif + +// The following platforms often disable access to the hardware +// counter (through a sandbox) even if the underlying hardware has a +// usable counter. The CycleTimer interface also requires a *scaled* +// CycleClock that runs at atleast 1 MHz. We've found some Android +// ARM64 devices where this is not the case, so we disable it by +// default on Android ARM64. +#if defined(__native_client__) || (defined(__APPLE__)) || \ + (defined(__ANDROID__) && defined(__aarch64__)) +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 +#else +#define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 +#endif + +// UnscaledCycleClock is an optional internal feature. +// Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. +// Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 +#if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) +#define ABSL_USE_UNSCALED_CYCLECLOCK \ + (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ + ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) +#endif + +#if ABSL_USE_UNSCALED_CYCLECLOCK +// This macro can be used to test if UnscaledCycleClock::Frequency() +// is NominalCPUFrequency() on a particular platform. +#if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86) || \ + defined(_M_X64)) +#define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY +#endif +#endif + +#endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_CONFIG_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/no_destructor.h b/packages/react-native-executorch/third-party/include/absl/base/no_destructor.h new file mode 100644 index 000000000..9242d10bd --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/no_destructor.h @@ -0,0 +1,208 @@ +// Copyright 2023 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: no_destructor.h +// ----------------------------------------------------------------------------- +// +// This header file defines the absl::NoDestructor wrapper for defining a +// static type that does not need to be destructed upon program exit. Instead, +// such an object survives during program exit (and can be safely accessed at +// any time). +// +// absl::NoDestructor is useful when when a variable has static storage +// duration but its type has a non-trivial destructor. Global constructors are +// not recommended because of the C++'s static initialization order fiasco (See +// https://en.cppreference.com/w/cpp/language/siof). Global destructors are not +// allowed due to similar concerns about destruction ordering. Using +// absl::NoDestructor as a function-local static prevents both of these +// issues. +// +// See below for complete details. + +#ifndef ABSL_BASE_NO_DESTRUCTOR_H_ +#define ABSL_BASE_NO_DESTRUCTOR_H_ + +#include +#include +#include + +#include "absl/base/config.h" +#include "absl/base/nullability.h" + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// absl::NoDestructor +// +// NoDestructor is a wrapper around an object of type T that behaves as an +// object of type T but never calls T's destructor. NoDestructor makes it +// safer and/or more efficient to use such objects in static storage contexts, +// ideally as function scope static variables. +// +// An instance of absl::NoDestructor has similar type semantics to an +// instance of T: +// +// * Constructs in the same manner as an object of type T through perfect +// forwarding. +// * Provides pointer/reference semantic access to the object of type T via +// `->`, `*`, and `get()`. +// (Note that `const NoDestructor` works like a pointer to const `T`.) +// +// Additionally, NoDestructor provides the following benefits: +// +// * Never calls T's destructor for the object +// * If the object is a function-local static variable, the type can be +// lazily constructed. +// +// An object of type NoDestructor is "trivially destructible" in the notion +// that its destructor is never run. +// +// Usage as Function Scope Static Variables +// +// Function static objects will be lazily initialized within static storage: +// +// // Function scope. +// const std::string& MyString() { +// static const absl::NoDestructor x("foo"); +// return *x; +// } +// +// For function static variables, NoDestructor avoids heap allocation and can be +// inlined in static storage, resulting in exactly-once, thread-safe +// construction of an object, and very fast access thereafter (the cost is a few +// extra cycles). +// +// Using NoDestructor in this manner is generally better than other patterns +// which require pointer chasing: +// +// // Prefer using absl::NoDestructor instead for the static variable. +// const std::string& MyString() { +// static const std::string* x = new std::string("foo"); +// return *x; +// } +// +// Usage as Global Static Variables +// +// NoDestructor allows declaration of a global object of type T that has a +// non-trivial destructor since its destructor is never run. However, such +// objects still need to worry about initialization order, so such use is not +// recommended, strongly discouraged by the Google C++ Style Guide, and outright +// banned in Chromium. +// See +// https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables +// +// // Global or namespace scope. +// absl::NoDestructor reg{"foo", "bar", 8008}; +// +// Note that if your object already has a trivial destructor, you don't need to +// use NoDestructor. +// +template class NoDestructor { +public: + // Forwards arguments to the T's constructor: calls T(args...). + template &...), + void(NoDestructor &)>::value, + int>::type = 0> + explicit constexpr NoDestructor(Ts &&...args) + : impl_(std::forward(args)...) {} + + // Forwards copy and move construction for T. Enables usage like this: + // static NoDestructor> x{{{"1", "2", "3"}}}; + // static NoDestructor> x{{1, 2, 3}}; + explicit constexpr NoDestructor(const T &x) : impl_(x) {} + explicit constexpr NoDestructor(T &&x) : impl_(std::move(x)) {} + + // No copying. + NoDestructor(const NoDestructor &) = delete; + NoDestructor &operator=(const NoDestructor &) = delete; + + // Pretend to be a smart pointer to T with deep constness. + // Never returns a null pointer. + T &operator*() { return *get(); } + absl::Nonnull operator->() { return get(); } + absl::Nonnull get() { return impl_.get(); } + const T &operator*() const { return *get(); } + absl::Nonnull operator->() const { return get(); } + absl::Nonnull get() const { return impl_.get(); } + +private: + class DirectImpl { + public: + template + explicit constexpr DirectImpl(Args &&...args) + : value_(std::forward(args)...) {} + absl::Nonnull get() const { return &value_; } + absl::Nonnull get() { return &value_; } + + private: + T value_; + }; + + class PlacementImpl { + public: + template explicit PlacementImpl(Args &&...args) { + new (&space_) T(std::forward(args)...); + } + absl::Nonnull get() const { + return Launder(reinterpret_cast(&space_)); + } + absl::Nonnull get() { return Launder(reinterpret_cast(&space_)); } + + private: + template + static absl::Nonnull

Launder(absl::Nonnull

p) { +#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606L + return std::launder(p); +#elif ABSL_HAVE_BUILTIN(__builtin_launder) + return __builtin_launder(p); +#else + // When `std::launder` or equivalent are not available, we rely on + // undefined behavior, which works as intended on Abseil's officially + // supported platforms as of Q3 2023. +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif + return p; +#if defined(__GNUC__) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif +#endif + } + + alignas(T) unsigned char space_[sizeof(T)]; + }; + + // If the object is trivially destructible we use a member directly to avoid + // potential once-init runtime initialization. It somewhat defeats the + // purpose of NoDestructor in this case, but this makes the class more + // friendly to generic code. + std::conditional_t::value, DirectImpl, + PlacementImpl> + impl_; +}; + +#ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION +// Provide 'Class Template Argument Deduction': the type of NoDestructor's T +// will be the same type as the argument passed to NoDestructor's constructor. +template NoDestructor(T) -> NoDestructor; +#endif // ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_NO_DESTRUCTOR_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/options.h b/packages/react-native-executorch/third-party/include/absl/base/options.h index 76daa90cd..8b2549d01 100644 --- a/packages/react-native-executorch/third-party/include/absl/base/options.h +++ b/packages/react-native-executorch/third-party/include/absl/base/options.h @@ -94,7 +94,7 @@ // User code should not inspect this macro. To check in the preprocessor if // absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY. -#define ABSL_OPTION_USE_STD_ANY 2 +#define ABSL_OPTION_USE_STD_ANY 1 // ABSL_OPTION_USE_STD_OPTIONAL // @@ -120,7 +120,7 @@ // absl::optional is a typedef of std::optional, use the feature macro // ABSL_USES_STD_OPTIONAL. -#define ABSL_OPTION_USE_STD_OPTIONAL 2 +#define ABSL_OPTION_USE_STD_OPTIONAL 1 // ABSL_OPTION_USE_STD_STRING_VIEW // @@ -146,7 +146,7 @@ // absl::string_view is a typedef of std::string_view, use the feature macro // ABSL_USES_STD_STRING_VIEW. -#define ABSL_OPTION_USE_STD_STRING_VIEW 2 +#define ABSL_OPTION_USE_STD_STRING_VIEW 1 // ABSL_OPTION_USE_STD_VARIANT // @@ -172,7 +172,7 @@ // absl::variant is a typedef of std::variant, use the feature macro // ABSL_USES_STD_VARIANT. -#define ABSL_OPTION_USE_STD_VARIANT 2 +#define ABSL_OPTION_USE_STD_VARIANT 1 // ABSL_OPTION_USE_STD_ORDERING // @@ -199,7 +199,7 @@ // the ordering types are aliases of std:: ordering types, use the feature macro // ABSL_USES_STD_ORDERING. -#define ABSL_OPTION_USE_STD_ORDERING 2 +#define ABSL_OPTION_USE_STD_ORDERING 0 // ABSL_OPTION_USE_INLINE_NAMESPACE // ABSL_OPTION_INLINE_NAMESPACE_NAME diff --git a/packages/react-native-executorch/third-party/include/absl/base/prefetch.h b/packages/react-native-executorch/third-party/include/absl/base/prefetch.h new file mode 100644 index 000000000..4e50178c1 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/prefetch.h @@ -0,0 +1,209 @@ +// Copyright 2023 The Abseil Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: prefetch.h +// ----------------------------------------------------------------------------- +// +// This header file defines prefetch functions to prefetch memory contents +// into the first level cache (L1) for the current CPU. The prefetch logic +// offered in this header is limited to prefetching first level cachelines +// only, and is aimed at relatively 'simple' prefetching logic. +// +#ifndef ABSL_BASE_PREFETCH_H_ +#define ABSL_BASE_PREFETCH_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +#if defined(ABSL_INTERNAL_HAVE_SSE) +#include +#endif + +#if defined(_MSC_VER) +#include +#if defined(ABSL_INTERNAL_HAVE_SSE) +#pragma intrinsic(_mm_prefetch) +#endif +#endif + +namespace absl { +ABSL_NAMESPACE_BEGIN + +// Moves data into the L1 cache before it is read, or "prefetches" it. +// +// The value of `addr` is the address of the memory to prefetch. If +// the target and compiler support it, data prefetch instructions are +// generated. If the prefetch is done some time before the memory is +// read, it may be in the cache by the time the read occurs. +// +// This method prefetches data with the highest degree of temporal locality; +// data is prefetched where possible into all levels of the cache. +// +// Incorrect or gratuitous use of this function can degrade performance. +// Use this function only when representative benchmarks show an improvement. +// +// Example: +// +// // Computes incremental checksum for `data`. +// int ComputeChecksum(int sum, absl::string_view data); +// +// // Computes cumulative checksum for all values in `data` +// int ComputeChecksum(absl::Span data) { +// int sum = 0; +// auto it = data.begin(); +// auto pit = data.begin(); +// auto end = data.end(); +// for (int dist = 8; dist > 0 && pit != data.end(); --dist, ++pit) { +// absl::PrefetchToLocalCache(pit->data()); +// } +// for (; pit != end; ++pit, ++it) { +// sum = ComputeChecksum(sum, *it); +// absl::PrefetchToLocalCache(pit->data()); +// } +// for (; it != end; ++it) { +// sum = ComputeChecksum(sum, *it); +// } +// return sum; +// } +// +void PrefetchToLocalCache(const void *addr); + +// Moves data into the L1 cache before it is read, or "prefetches" it. +// +// This function is identical to `PrefetchToLocalCache()` except that it has +// non-temporal locality: the fetched data should not be left in any of the +// cache tiers. This is useful for cases where the data is used only once / +// short term, for example, invoking a destructor on an object. +// +// Incorrect or gratuitous use of this function can degrade performance. +// Use this function only when representative benchmarks show an improvement. +// +// Example: +// +// template +// void DestroyPointers(Iterator begin, Iterator end) { +// size_t distance = std::min(8U, bars.size()); +// +// int dist = 8; +// auto prefetch_it = begin; +// while (prefetch_it != end && --dist;) { +// absl::PrefetchToLocalCacheNta(*prefetch_it++); +// } +// while (prefetch_it != end) { +// delete *begin++; +// absl::PrefetchToLocalCacheNta(*prefetch_it++); +// } +// while (begin != end) { +// delete *begin++; +// } +// } +// +void PrefetchToLocalCacheNta(const void *addr); + +// Moves data into the L1 cache with the intent to modify it. +// +// This function is similar to `PrefetchToLocalCache()` except that it +// prefetches cachelines with an 'intent to modify' This typically includes +// invalidating cache entries for this address in all other cache tiers, and an +// exclusive access intent. +// +// Incorrect or gratuitous use of this function can degrade performance. As this +// function can invalidate cached cachelines on other caches and computer cores, +// incorrect usage of this function can have an even greater negative impact +// than incorrect regular prefetches. +// Use this function only when representative benchmarks show an improvement. +// +// Example: +// +// void* Arena::Allocate(size_t size) { +// void* ptr = AllocateBlock(size); +// absl::PrefetchToLocalCacheForWrite(ptr); +// return ptr; +// } +// +void PrefetchToLocalCacheForWrite(const void *addr); + +#if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) + +#define ABSL_HAVE_PREFETCH 1 + +// See __builtin_prefetch: +// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. +// +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void +PrefetchToLocalCache(const void *addr) { + __builtin_prefetch(addr, 0, 3); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void +PrefetchToLocalCacheNta(const void *addr) { + __builtin_prefetch(addr, 0, 0); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void +PrefetchToLocalCacheForWrite(const void *addr) { + // [x86] gcc/clang don't generate PREFETCHW for __builtin_prefetch(.., 1) + // unless -march=broadwell or newer; this is not generally the default, so we + // manually emit prefetchw. PREFETCHW is recognized as a no-op on older Intel + // processors and has been present on AMD processors since the K6-2. +#if defined(__x86_64__) && !defined(__PRFCHW__) + asm("prefetchw %0" : : "m"(*reinterpret_cast(addr))); +#else + __builtin_prefetch(addr, 1, 3); +#endif +} + +#elif defined(ABSL_INTERNAL_HAVE_SSE) + +#define ABSL_HAVE_PREFETCH 1 + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void +PrefetchToLocalCache(const void *addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void +PrefetchToLocalCacheNta(const void *addr) { + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); +} + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void +PrefetchToLocalCacheForWrite(const void *addr) { +#if defined(_MM_HINT_ET0) + _mm_prefetch(reinterpret_cast(addr), _MM_HINT_ET0); +#elif !defined(_MSC_VER) && defined(__x86_64__) + // _MM_HINT_ET0 is not universally supported. As we commented further + // up, PREFETCHW is recognized as a no-op on older Intel processors + // and has been present on AMD processors since the K6-2. We have this + // disabled for MSVC compilers as this miscompiles on older MSVC compilers. + asm("prefetchw %0" : : "m"(*reinterpret_cast(addr))); +#endif +} + +#else + +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void +PrefetchToLocalCache(const void *addr) {} +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void +PrefetchToLocalCacheNta(const void *addr) {} +ABSL_ATTRIBUTE_ALWAYS_INLINE inline void +PrefetchToLocalCacheForWrite(const void *addr) {} + +#endif + +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_PREFETCH_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/base/thread_annotations.h b/packages/react-native-executorch/third-party/include/absl/base/thread_annotations.h new file mode 100644 index 000000000..a67384419 --- /dev/null +++ b/packages/react-native-executorch/third-party/include/absl/base/thread_annotations.h @@ -0,0 +1,333 @@ +// Copyright 2017 The Abseil Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ----------------------------------------------------------------------------- +// File: thread_annotations.h +// ----------------------------------------------------------------------------- +// +// This header file contains macro definitions for thread safety annotations +// that allow developers to document the locking policies of multi-threaded +// code. The annotations can also help program analysis tools to identify +// potential thread safety issues. +// +// These annotations are implemented using compiler attributes. Using the macros +// defined here instead of raw attributes allow for portability and future +// compatibility. +// +// When referring to mutexes in the arguments of the attributes, you should +// use variable names or more complex expressions (e.g. my_object->mutex_) +// that evaluate to a concrete mutex object whenever possible. If the mutex +// you want to refer to is not in scope, you may use a member pointer +// (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. + +#ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ +#define ABSL_BASE_THREAD_ANNOTATIONS_H_ + +#include "absl/base/attributes.h" +#include "absl/base/config.h" + +// ABSL_GUARDED_BY() +// +// Documents if a shared field or global variable needs to be protected by a +// mutex. ABSL_GUARDED_BY() allows the user to specify a particular mutex that +// should be held when accessing the annotated variable. +// +// Although this annotation (and ABSL_PT_GUARDED_BY, below) cannot be applied to +// local variables, a local variable and its associated mutex can often be +// combined into a small class or struct, thereby allowing the annotation. +// +// Example: +// +// class Foo { +// Mutex mu_; +// int p1_ ABSL_GUARDED_BY(mu_); +// ... +// }; +#if ABSL_HAVE_ATTRIBUTE(guarded_by) +#define ABSL_GUARDED_BY(x) __attribute__((guarded_by(x))) +#else +#define ABSL_GUARDED_BY(x) +#endif + +// ABSL_PT_GUARDED_BY() +// +// Documents if the memory location pointed to by a pointer should be guarded +// by a mutex when dereferencing the pointer. +// +// Example: +// class Foo { +// Mutex mu_; +// int *p1_ ABSL_PT_GUARDED_BY(mu_); +// ... +// }; +// +// Note that a pointer variable to a shared memory location could itself be a +// shared variable. +// +// Example: +// +// // `q_`, guarded by `mu1_`, points to a shared memory location that is +// // guarded by `mu2_`: +// int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_); +#if ABSL_HAVE_ATTRIBUTE(pt_guarded_by) +#define ABSL_PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x))) +#else +#define ABSL_PT_GUARDED_BY(x) +#endif + +// ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE() +// +// Documents the acquisition order between locks that can be held +// simultaneously by a thread. For any two locks that need to be annotated +// to establish an acquisition order, only one of them needs the annotation. +// (i.e. You don't have to annotate both locks with both ABSL_ACQUIRED_AFTER +// and ABSL_ACQUIRED_BEFORE.) +// +// As with ABSL_GUARDED_BY, this is only applicable to mutexes that are shared +// fields or global variables. +// +// Example: +// +// Mutex m1_; +// Mutex m2_ ABSL_ACQUIRED_AFTER(m1_); +#if ABSL_HAVE_ATTRIBUTE(acquired_after) +#define ABSL_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_AFTER(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(acquired_before) +#define ABSL_ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__))) +#else +#define ABSL_ACQUIRED_BEFORE(...) +#endif + +// ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED() +// +// Documents a function that expects a mutex to be held prior to entry. +// The mutex is expected to be held both on entry to, and exit from, the +// function. +// +// An exclusive lock allows read-write access to the guarded data member(s), and +// only one thread can acquire a lock exclusively at any one time. A shared lock +// allows read-only access, and any number of threads can acquire a shared lock +// concurrently. +// +// Generally, non-const methods should be annotated with +// ABSL_EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with +// ABSL_SHARED_LOCKS_REQUIRED. +// +// Example: +// +// Mutex mu1, mu2; +// int a ABSL_GUARDED_BY(mu1); +// int b ABSL_GUARDED_BY(mu2); +// +// void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } +// void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } +#if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required) +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ + __attribute__((exclusive_locks_required(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(shared_locks_required) +#define ABSL_SHARED_LOCKS_REQUIRED(...) \ + __attribute__((shared_locks_required(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCKS_REQUIRED(...) +#endif + +// ABSL_LOCKS_EXCLUDED() +// +// Documents the locks that cannot be held by callers of this function, as they +// might be acquired by this function (Abseil's `Mutex` locks are +// non-reentrant). +#if ABSL_HAVE_ATTRIBUTE(locks_excluded) +#define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__))) +#else +#define ABSL_LOCKS_EXCLUDED(...) +#endif + +// ABSL_LOCK_RETURNED() +// +// Documents a function that returns a mutex without acquiring it. For example, +// a public getter method that returns a pointer to a private mutex should +// be annotated with ABSL_LOCK_RETURNED. +#if ABSL_HAVE_ATTRIBUTE(lock_returned) +#define ABSL_LOCK_RETURNED(x) __attribute__((lock_returned(x))) +#else +#define ABSL_LOCK_RETURNED(x) +#endif + +// ABSL_LOCKABLE +// +// Documents if a class/type is a lockable type (such as the `Mutex` class). +#if ABSL_HAVE_ATTRIBUTE(lockable) +#define ABSL_LOCKABLE __attribute__((lockable)) +#else +#define ABSL_LOCKABLE +#endif + +// ABSL_SCOPED_LOCKABLE +// +// Documents if a class does RAII locking (such as the `MutexLock` class). +// The constructor should use `LOCK_FUNCTION()` to specify the mutex that is +// acquired, and the destructor should use `UNLOCK_FUNCTION()` with no +// arguments; the analysis will assume that the destructor unlocks whatever the +// constructor locked. +#if ABSL_HAVE_ATTRIBUTE(scoped_lockable) +#define ABSL_SCOPED_LOCKABLE __attribute__((scoped_lockable)) +#else +#define ABSL_SCOPED_LOCKABLE +#endif + +// ABSL_EXCLUSIVE_LOCK_FUNCTION() +// +// Documents functions that acquire a lock in the body of a function, and do +// not release it. +#if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function) +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ + __attribute__((exclusive_lock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) +#endif + +// ABSL_SHARED_LOCK_FUNCTION() +// +// Documents functions that acquire a shared (reader) lock in the body of a +// function, and do not release it. +#if ABSL_HAVE_ATTRIBUTE(shared_lock_function) +#define ABSL_SHARED_LOCK_FUNCTION(...) \ + __attribute__((shared_lock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_LOCK_FUNCTION(...) +#endif + +// ABSL_UNLOCK_FUNCTION() +// +// Documents functions that expect a lock to be held on entry to the function, +// and release it in the body of the function. +#if ABSL_HAVE_ATTRIBUTE(unlock_function) +#define ABSL_UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__))) +#else +#define ABSL_UNLOCK_FUNCTION(...) +#endif + +// ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION() +// +// Documents functions that try to acquire a lock, and return success or failure +// (or a non-boolean value that can be interpreted as a boolean). +// The first argument should be `true` for functions that return `true` on +// success, or `false` for functions that return `false` on success. The second +// argument specifies the mutex that is locked on success. If unspecified, this +// mutex is assumed to be `this`. +#if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function) +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ + __attribute__((exclusive_trylock_function(__VA_ARGS__))) +#else +#define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(shared_trylock_function) +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ + __attribute__((shared_trylock_function(__VA_ARGS__))) +#else +#define ABSL_SHARED_TRYLOCK_FUNCTION(...) +#endif + +// ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK() +// +// Documents functions that dynamically check to see if a lock is held, and fail +// if it is not held. +#if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock) +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ + __attribute__((assert_exclusive_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_EXCLUSIVE_LOCK(...) +#endif + +#if ABSL_HAVE_ATTRIBUTE(assert_shared_lock) +#define ABSL_ASSERT_SHARED_LOCK(...) \ + __attribute__((assert_shared_lock(__VA_ARGS__))) +#else +#define ABSL_ASSERT_SHARED_LOCK(...) +#endif + +// ABSL_NO_THREAD_SAFETY_ANALYSIS +// +// Turns off thread safety checking within the body of a particular function. +// This annotation is used to mark functions that are known to be correct, but +// the locking behavior is more complicated than the analyzer can handle. +#if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis) +#define ABSL_NO_THREAD_SAFETY_ANALYSIS \ + __attribute__((no_thread_safety_analysis)) +#else +#define ABSL_NO_THREAD_SAFETY_ANALYSIS +#endif + +//------------------------------------------------------------------------------ +// Tool-Supplied Annotations +//------------------------------------------------------------------------------ + +// ABSL_TS_UNCHECKED should be placed around lock expressions that are not valid +// C++ syntax, but which are present for documentation purposes. These +// annotations will be ignored by the analysis. +#define ABSL_TS_UNCHECKED(x) "" + +// ABSL_TS_FIXME is used to mark lock expressions that are not valid C++ syntax. +// It is used by automated tools to mark and disable invalid expressions. +// The annotation should either be fixed, or changed to ABSL_TS_UNCHECKED. +#define ABSL_TS_FIXME(x) "" + +// Like ABSL_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body +// of a particular function. However, this attribute is used to mark functions +// that are incorrect and need to be fixed. It is used by automated tools to +// avoid breaking the build when the analysis is updated. +// Code owners are expected to eventually fix the routine. +#define ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME ABSL_NO_THREAD_SAFETY_ANALYSIS + +// Similar to ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a +// ABSL_GUARDED_BY annotation that needs to be fixed, because it is producing +// thread safety warning. It disables the ABSL_GUARDED_BY. +#define ABSL_GUARDED_BY_FIXME(x) + +// Disables warnings for a single read operation. This can be used to avoid +// warnings when it is known that the read is not actually involved in a race, +// but the compiler cannot confirm that. +#define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) + +namespace absl { +ABSL_NAMESPACE_BEGIN +namespace base_internal { + +// Takes a reference to a guarded data member, and returns an unguarded +// reference. +// Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. +template +inline const T &ts_unchecked_read(const T &v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +template +inline T &ts_unchecked_read(T &v) ABSL_NO_THREAD_SAFETY_ANALYSIS { + return v; +} + +} // namespace base_internal +ABSL_NAMESPACE_END +} // namespace absl + +#endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ diff --git a/packages/react-native-executorch/third-party/include/absl/memory/memory.h b/packages/react-native-executorch/third-party/include/absl/memory/memory.h deleted file mode 100644 index 72ee30f23..000000000 --- a/packages/react-native-executorch/third-party/include/absl/memory/memory.h +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright 2017 The Abseil Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// ----------------------------------------------------------------------------- -// File: memory.h -// ----------------------------------------------------------------------------- -// -// This header file contains utility functions for managing the creation and -// conversion of smart pointers. This file is an extension to the C++ -// standard library header file. - -#ifndef ABSL_MEMORY_MEMORY_H_ -#define ABSL_MEMORY_MEMORY_H_ - -#include -#include -#include -#include -#include -#include - -#include "absl/base/macros.h" -#include "absl/meta/type_traits.h" - -namespace absl { -ABSL_NAMESPACE_BEGIN - -// ----------------------------------------------------------------------------- -// Function Template: WrapUnique() -// ----------------------------------------------------------------------------- -// -// Adopts ownership from a raw pointer and transfers it to the returned -// `std::unique_ptr`, whose type is deduced. Because of this deduction, *do not* -// specify the template type `T` when calling `WrapUnique`. -// -// Example: -// X* NewX(int, int); -// auto x = WrapUnique(NewX(1, 2)); // 'x' is std::unique_ptr. -// -// Do not call WrapUnique with an explicit type, as in -// `WrapUnique(NewX(1, 2))`. The purpose of WrapUnique is to automatically -// deduce the pointer type. If you wish to make the type explicit, just use -// `std::unique_ptr` directly. -// -// auto x = std::unique_ptr(NewX(1, 2)); -// - or - -// std::unique_ptr x(NewX(1, 2)); -// -// While `absl::WrapUnique` is useful for capturing the output of a raw -// pointer factory, prefer 'absl::make_unique(args...)' over -// 'absl::WrapUnique(new T(args...))'. -// -// auto x = WrapUnique(new X(1, 2)); // works, but nonideal. -// auto x = make_unique(1, 2); // safer, standard, avoids raw 'new'. -// -// Note that `absl::WrapUnique(p)` is valid only if `delete p` is a valid -// expression. In particular, `absl::WrapUnique()` cannot wrap pointers to -// arrays, functions or void, and it must not be used to capture pointers -// obtained from array-new expressions (even though that would compile!). -template std::unique_ptr WrapUnique(T *ptr) { - static_assert(!std::is_array::value, "array types are unsupported"); - static_assert(std::is_object::value, "non-object types are unsupported"); - return std::unique_ptr(ptr); -} - -// ----------------------------------------------------------------------------- -// Function Template: make_unique() -// ----------------------------------------------------------------------------- -// -// Creates a `std::unique_ptr<>`, while avoiding issues creating temporaries -// during the construction process. `absl::make_unique<>` also avoids redundant -// type declarations, by avoiding the need to explicitly use the `new` operator. -// -// https://en.cppreference.com/w/cpp/memory/unique_ptr/make_unique -// -// For more background on why `std::unique_ptr(new T(a,b))` is problematic, -// see Herb Sutter's explanation on -// (Exception-Safe Function Calls)[https://herbsutter.com/gotw/_102/]. -// (In general, reviewers should treat `new T(a,b)` with scrutiny.) -// -// Historical note: Abseil once provided a C++11 compatible implementation of -// the C++14's `std::make_unique`. Now that C++11 support has been sunsetted, -// `absl::make_unique` simply uses the STL-provided implementation. New code -// should use `std::make_unique`. -using std::make_unique; - -// ----------------------------------------------------------------------------- -// Function Template: RawPtr() -// ----------------------------------------------------------------------------- -// -// Extracts the raw pointer from a pointer-like value `ptr`. `absl::RawPtr` is -// useful within templates that need to handle a complement of raw pointers, -// `std::nullptr_t`, and smart pointers. -template auto RawPtr(T &&ptr) -> decltype(std::addressof(*ptr)) { - // ptr is a forwarding reference to support Ts with non-const operators. - return (ptr != nullptr) ? std::addressof(*ptr) : nullptr; -} -inline std::nullptr_t RawPtr(std::nullptr_t) { return nullptr; } - -// ----------------------------------------------------------------------------- -// Function Template: ShareUniquePtr() -// ----------------------------------------------------------------------------- -// -// Adopts a `std::unique_ptr` rvalue and returns a `std::shared_ptr` of deduced -// type. Ownership (if any) of the held value is transferred to the returned -// shared pointer. -// -// Example: -// -// auto up = absl::make_unique(10); -// auto sp = absl::ShareUniquePtr(std::move(up)); // shared_ptr -// CHECK_EQ(*sp, 10); -// CHECK(up == nullptr); -// -// Note that this conversion is correct even when T is an array type, and more -// generally it works for *any* deleter of the `unique_ptr` (single-object -// deleter, array deleter, or any custom deleter), since the deleter is adopted -// by the shared pointer as well. The deleter is copied (unless it is a -// reference). -// -// Implements the resolution of [LWG 2415](http://wg21.link/lwg2415), by which a -// null shared pointer does not attempt to call the deleter. -template -std::shared_ptr ShareUniquePtr(std::unique_ptr &&ptr) { - return ptr ? std::shared_ptr(std::move(ptr)) : std::shared_ptr(); -} - -// ----------------------------------------------------------------------------- -// Function Template: WeakenPtr() -// ----------------------------------------------------------------------------- -// -// Creates a weak pointer associated with a given shared pointer. The returned -// value is a `std::weak_ptr` of deduced type. -// -// Example: -// -// auto sp = std::make_shared(10); -// auto wp = absl::WeakenPtr(sp); -// CHECK_EQ(sp.get(), wp.lock().get()); -// sp.reset(); -// CHECK(wp.lock() == nullptr); -// -template -std::weak_ptr WeakenPtr(const std::shared_ptr &ptr) { - return std::weak_ptr(ptr); -} - -// ----------------------------------------------------------------------------- -// Class Template: pointer_traits -// ----------------------------------------------------------------------------- -// -// Historical note: Abseil once provided an implementation of -// `std::pointer_traits` for platforms that had not yet provided it. Those -// platforms are no longer supported. New code should simply use -// `std::pointer_traits`. -using std::pointer_traits; - -// ----------------------------------------------------------------------------- -// Class Template: allocator_traits -// ----------------------------------------------------------------------------- -// -// Historical note: Abseil once provided an implementation of -// `std::allocator_traits` for platforms that had not yet provided it. Those -// platforms are no longer supported. New code should simply use -// `std::allocator_traits`. -using std::allocator_traits; - -namespace memory_internal { - -// ExtractOr::type evaluates to E if possible. Otherwise, D. -template