| File: | out/../deps/v8/src/libplatform/default-job.cc |
| Warning: | line 80, column 62 The result of the left shift is undefined due to shifting by '32', which is greater or equal to the width of type 'uint32_t' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | // Copyright 2020 the V8 project authors. All rights reserved. | |||
| 2 | // Use of this source code is governed by a BSD-style license that can be | |||
| 3 | // found in the LICENSE file. | |||
| 4 | ||||
| 5 | #include "src/libplatform/default-job.h" | |||
| 6 | ||||
| 7 | #include "src/base/bits.h" | |||
| 8 | #include "src/base/macros.h" | |||
| 9 | ||||
| 10 | namespace v8 { | |||
| 11 | namespace platform { | |||
| 12 | namespace { | |||
| 13 | ||||
| 14 | // Capped to allow assigning task_ids from a bitfield. | |||
| 15 | constexpr size_t kMaxWorkersPerJob = 32; | |||
| 16 | ||||
| 17 | } // namespace | |||
| 18 | ||||
| 19 | DefaultJobState::JobDelegate::~JobDelegate() { | |||
| 20 | static_assert(kInvalidTaskId >= kMaxWorkersPerJob, | |||
| 21 | "kInvalidTaskId must be outside of the range of valid task_ids " | |||
| 22 | "[0, kMaxWorkersPerJob)"); | |||
| 23 | if (task_id_ != kInvalidTaskId) outer_->ReleaseTaskId(task_id_); | |||
| 24 | } | |||
| 25 | ||||
| 26 | uint8_t DefaultJobState::JobDelegate::GetTaskId() { | |||
| 27 | if (task_id_ == kInvalidTaskId) task_id_ = outer_->AcquireTaskId(); | |||
| ||||
| 28 | return task_id_; | |||
| 29 | } | |||
| 30 | ||||
| 31 | DefaultJobState::DefaultJobState(Platform* platform, | |||
| 32 | std::unique_ptr<JobTask> job_task, | |||
| 33 | TaskPriority priority, | |||
| 34 | size_t num_worker_threads) | |||
| 35 | : platform_(platform), | |||
| 36 | job_task_(std::move(job_task)), | |||
| 37 | priority_(priority), | |||
| 38 | num_worker_threads_(std::min(num_worker_threads, kMaxWorkersPerJob)) {} | |||
| 39 | ||||
| 40 | DefaultJobState::~DefaultJobState() { DCHECK_EQ(0U, active_workers_)((void) 0); } | |||
| 41 | ||||
| 42 | void DefaultJobState::NotifyConcurrencyIncrease() { | |||
| 43 | if (is_canceled_.load(std::memory_order_relaxed)) return; | |||
| 44 | ||||
| 45 | size_t num_tasks_to_post = 0; | |||
| 46 | TaskPriority priority; | |||
| 47 | { | |||
| 48 | base::MutexGuard guard(&mutex_); | |||
| 49 | const size_t max_concurrency = CappedMaxConcurrency(active_workers_); | |||
| 50 | // Consider |pending_tasks_| to avoid posting too many tasks. | |||
| 51 | if (max_concurrency > (active_workers_ + pending_tasks_)) { | |||
| 52 | num_tasks_to_post = max_concurrency - active_workers_ - pending_tasks_; | |||
| 53 | pending_tasks_ += num_tasks_to_post; | |||
| 54 | } | |||
| 55 | priority = priority_; | |||
| 56 | } | |||
| 57 | // Post additional worker tasks to reach |max_concurrency|. | |||
| 58 | for (size_t i = 0; i < num_tasks_to_post; ++i) { | |||
| 59 | CallOnWorkerThread(priority, std::make_unique<DefaultJobWorker>( | |||
| 60 | shared_from_this(), job_task_.get())); | |||
| 61 | } | |||
| 62 | } | |||
| 63 | ||||
| 64 | uint8_t DefaultJobState::AcquireTaskId() { | |||
| 65 | static_assert(kMaxWorkersPerJob <= sizeof(assigned_task_ids_) * 8, | |||
| 66 | "TaskId bitfield isn't big enough to fit kMaxWorkersPerJob."); | |||
| 67 | uint32_t assigned_task_ids = | |||
| 68 | assigned_task_ids_.load(std::memory_order_relaxed); | |||
| 69 | DCHECK_LE(v8::base::bits::CountPopulation(assigned_task_ids) + 1,((void) 0) | |||
| 70 | kMaxWorkersPerJob)((void) 0); | |||
| 71 | uint32_t new_assigned_task_ids = 0; | |||
| 72 | uint8_t task_id = 0; | |||
| 73 | // memory_order_acquire on success, matched with memory_order_release in | |||
| 74 | // ReleaseTaskId() so that operations done by previous threads that had | |||
| 75 | // the same task_id become visible to the current thread. | |||
| 76 | do { | |||
| 77 | // Count trailing one bits. This is the id of the right-most 0-bit in | |||
| 78 | // |assigned_task_ids|. | |||
| 79 | task_id = v8::base::bits::CountTrailingZeros32(~assigned_task_ids); | |||
| 80 | new_assigned_task_ids = assigned_task_ids | (uint32_t(1) << task_id); | |||
| ||||
| 81 | } while (!assigned_task_ids_.compare_exchange_weak( | |||
| 82 | assigned_task_ids, new_assigned_task_ids, std::memory_order_acquire, | |||
| 83 | std::memory_order_relaxed)); | |||
| 84 | return task_id; | |||
| 85 | } | |||
| 86 | ||||
| 87 | void DefaultJobState::ReleaseTaskId(uint8_t task_id) { | |||
| 88 | // memory_order_release to match AcquireTaskId(). | |||
| 89 | uint32_t previous_task_ids = assigned_task_ids_.fetch_and( | |||
| 90 | ~(uint32_t(1) << task_id), std::memory_order_release); | |||
| 91 | DCHECK(previous_task_ids & (uint32_t(1) << task_id))((void) 0); | |||
| 92 | USE(previous_task_ids)do { ::v8::base::Use unused_tmp_array_for_use_macro[]{previous_task_ids }; (void)unused_tmp_array_for_use_macro; } while (false); | |||
| 93 | } | |||
| 94 | ||||
| 95 | void DefaultJobState::Join() { | |||
| 96 | bool can_run = false; | |||
| 97 | { | |||
| 98 | base::MutexGuard guard(&mutex_); | |||
| 99 | priority_ = TaskPriority::kUserBlocking; | |||
| 100 | // Reserve a worker for the joining thread. GetMaxConcurrency() is ignored | |||
| 101 | // here, but WaitForParticipationOpportunityLockRequired() waits for | |||
| 102 | // workers to return if necessary so we don't exceed GetMaxConcurrency(). | |||
| 103 | num_worker_threads_ = platform_->NumberOfWorkerThreads() + 1; | |||
| 104 | ++active_workers_; | |||
| 105 | can_run = WaitForParticipationOpportunityLockRequired(); | |||
| 106 | } | |||
| 107 | DefaultJobState::JobDelegate delegate(this, true); | |||
| 108 | while (can_run) { | |||
| 109 | job_task_->Run(&delegate); | |||
| 110 | base::MutexGuard guard(&mutex_); | |||
| 111 | can_run = WaitForParticipationOpportunityLockRequired(); | |||
| 112 | } | |||
| 113 | } | |||
| 114 | ||||
| 115 | void DefaultJobState::CancelAndWait() { | |||
| 116 | { | |||
| 117 | base::MutexGuard guard(&mutex_); | |||
| 118 | is_canceled_.store(true, std::memory_order_relaxed); | |||
| 119 | while (active_workers_ > 0) { | |||
| 120 | worker_released_condition_.Wait(&mutex_); | |||
| 121 | } | |||
| 122 | } | |||
| 123 | } | |||
| 124 | ||||
| 125 | void DefaultJobState::CancelAndDetach() { | |||
| 126 | is_canceled_.store(true, std::memory_order_relaxed); | |||
| 127 | } | |||
| 128 | ||||
| 129 | bool DefaultJobState::IsActive() { | |||
| 130 | base::MutexGuard guard(&mutex_); | |||
| 131 | return job_task_->GetMaxConcurrency(active_workers_) != 0 || | |||
| 132 | active_workers_ != 0; | |||
| 133 | } | |||
| 134 | ||||
| 135 | bool DefaultJobState::CanRunFirstTask() { | |||
| 136 | base::MutexGuard guard(&mutex_); | |||
| 137 | --pending_tasks_; | |||
| 138 | if (is_canceled_.load(std::memory_order_relaxed)) return false; | |||
| 139 | if (active_workers_ >= std::min(job_task_->GetMaxConcurrency(active_workers_), | |||
| 140 | num_worker_threads_)) { | |||
| 141 | return false; | |||
| 142 | } | |||
| 143 | // Acquire current worker. | |||
| 144 | ++active_workers_; | |||
| 145 | return true; | |||
| 146 | } | |||
| 147 | ||||
| 148 | bool DefaultJobState::DidRunTask() { | |||
| 149 | size_t num_tasks_to_post = 0; | |||
| 150 | TaskPriority priority; | |||
| 151 | { | |||
| 152 | base::MutexGuard guard(&mutex_); | |||
| 153 | const size_t max_concurrency = CappedMaxConcurrency(active_workers_ - 1); | |||
| 154 | if (is_canceled_.load(std::memory_order_relaxed) || | |||
| 155 | active_workers_ > max_concurrency) { | |||
| 156 | // Release current worker and notify. | |||
| 157 | --active_workers_; | |||
| 158 | worker_released_condition_.NotifyOne(); | |||
| 159 | return false; | |||
| 160 | } | |||
| 161 | // Consider |pending_tasks_| to avoid posting too many tasks. | |||
| 162 | if (max_concurrency > active_workers_ + pending_tasks_) { | |||
| 163 | num_tasks_to_post = max_concurrency - active_workers_ - pending_tasks_; | |||
| 164 | pending_tasks_ += num_tasks_to_post; | |||
| 165 | } | |||
| 166 | priority = priority_; | |||
| 167 | } | |||
| 168 | // Post additional worker tasks to reach |max_concurrency| in the case that | |||
| 169 | // max concurrency increased. This is not strictly necessary, since | |||
| 170 | // NotifyConcurrencyIncrease() should eventually be invoked. However, some | |||
| 171 | // users of PostJob() batch work and tend to call NotifyConcurrencyIncrease() | |||
| 172 | // late. Posting here allows us to spawn new workers sooner. | |||
| 173 | for (size_t i = 0; i < num_tasks_to_post; ++i) { | |||
| 174 | CallOnWorkerThread(priority, std::make_unique<DefaultJobWorker>( | |||
| 175 | shared_from_this(), job_task_.get())); | |||
| 176 | } | |||
| 177 | return true; | |||
| 178 | } | |||
| 179 | ||||
| 180 | bool DefaultJobState::WaitForParticipationOpportunityLockRequired() { | |||
| 181 | size_t max_concurrency = CappedMaxConcurrency(active_workers_ - 1); | |||
| 182 | while (active_workers_ > max_concurrency && active_workers_ > 1) { | |||
| 183 | worker_released_condition_.Wait(&mutex_); | |||
| 184 | max_concurrency = CappedMaxConcurrency(active_workers_ - 1); | |||
| 185 | } | |||
| 186 | if (active_workers_ <= max_concurrency) return true; | |||
| 187 | DCHECK_EQ(1U, active_workers_)((void) 0); | |||
| 188 | DCHECK_EQ(0U, max_concurrency)((void) 0); | |||
| 189 | active_workers_ = 0; | |||
| 190 | is_canceled_.store(true, std::memory_order_relaxed); | |||
| 191 | return false; | |||
| 192 | } | |||
| 193 | ||||
| 194 | size_t DefaultJobState::CappedMaxConcurrency(size_t worker_count) const { | |||
| 195 | return std::min(job_task_->GetMaxConcurrency(worker_count), | |||
| 196 | num_worker_threads_); | |||
| 197 | } | |||
| 198 | ||||
| 199 | void DefaultJobState::CallOnWorkerThread(TaskPriority priority, | |||
| 200 | std::unique_ptr<Task> task) { | |||
| 201 | switch (priority) { | |||
| 202 | case TaskPriority::kBestEffort: | |||
| 203 | return platform_->CallLowPriorityTaskOnWorkerThread(std::move(task)); | |||
| 204 | case TaskPriority::kUserVisible: | |||
| 205 | return platform_->CallOnWorkerThread(std::move(task)); | |||
| 206 | case TaskPriority::kUserBlocking: | |||
| 207 | return platform_->CallBlockingTaskOnWorkerThread(std::move(task)); | |||
| 208 | } | |||
| 209 | } | |||
| 210 | ||||
| 211 | void DefaultJobState::UpdatePriority(TaskPriority priority) { | |||
| 212 | base::MutexGuard guard(&mutex_); | |||
| 213 | priority_ = priority; | |||
| 214 | } | |||
| 215 | ||||
| 216 | DefaultJobHandle::DefaultJobHandle(std::shared_ptr<DefaultJobState> state) | |||
| 217 | : state_(std::move(state)) { | |||
| 218 | state_->NotifyConcurrencyIncrease(); | |||
| 219 | } | |||
| 220 | ||||
| 221 | DefaultJobHandle::~DefaultJobHandle() { DCHECK_EQ(nullptr, state_)((void) 0); } | |||
| 222 | ||||
| 223 | void DefaultJobHandle::Join() { | |||
| 224 | state_->Join(); | |||
| 225 | state_ = nullptr; | |||
| 226 | } | |||
| 227 | void DefaultJobHandle::Cancel() { | |||
| 228 | state_->CancelAndWait(); | |||
| 229 | state_ = nullptr; | |||
| 230 | } | |||
| 231 | ||||
| 232 | void DefaultJobHandle::CancelAndDetach() { | |||
| 233 | state_->CancelAndDetach(); | |||
| 234 | state_ = nullptr; | |||
| 235 | } | |||
| 236 | ||||
| 237 | bool DefaultJobHandle::IsActive() { return state_->IsActive(); } | |||
| 238 | ||||
| 239 | void DefaultJobHandle::UpdatePriority(TaskPriority priority) { | |||
| 240 | state_->UpdatePriority(priority); | |||
| 241 | } | |||
| 242 | ||||
| 243 | } // namespace platform | |||
| 244 | } // namespace v8 |
| 1 | // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_BASE_BITS_H_ |
| 6 | #define V8_BASE_BITS_H_ |
| 7 | |
| 8 | #include <stdint.h> |
| 9 | #include <type_traits> |
| 10 | |
| 11 | #include "src/base/base-export.h" |
| 12 | #include "src/base/macros.h" |
| 13 | #if V8_CC_MSVC |
| 14 | #include <intrin.h> |
| 15 | #endif |
| 16 | #if V8_OS_WIN32 |
| 17 | #include "src/base/win32-headers.h" |
| 18 | #endif |
| 19 | |
| 20 | namespace v8 { |
| 21 | namespace base { |
| 22 | namespace bits { |
| 23 | |
| 24 | // CountPopulation(value) returns the number of bits set in |value|. |
| 25 | template <typename T> |
| 26 | constexpr inline |
| 27 | typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8, |
| 28 | unsigned>::type |
| 29 | CountPopulation(T value) { |
| 30 | STATIC_ASSERT(sizeof(T) <= 8)static_assert(sizeof(T) <= 8, "sizeof(T) <= 8"); |
| 31 | #if V8_HAS_BUILTIN_POPCOUNT(1) |
| 32 | return sizeof(T) == 8 ? __builtin_popcountll(static_cast<uint64_t>(value)) |
| 33 | : __builtin_popcount(static_cast<uint32_t>(value)); |
| 34 | #else |
| 35 | // Fall back to divide-and-conquer popcount (see "Hacker's Delight" by Henry |
| 36 | // S. Warren, Jr.), chapter 5-1. |
| 37 | constexpr uint64_t mask[] = {0x5555555555555555, 0x3333333333333333, |
| 38 | 0x0f0f0f0f0f0f0f0f}; |
| 39 | // Start with 64 buckets of 1 bits, holding values from [0,1]. |
| 40 | value = ((value >> 1) & mask[0]) + (value & mask[0]); |
| 41 | // Having 32 buckets of 2 bits, holding values from [0,2] now. |
| 42 | value = ((value >> 2) & mask[1]) + (value & mask[1]); |
| 43 | // Having 16 buckets of 4 bits, holding values from [0,4] now. |
| 44 | value = ((value >> 4) & mask[2]) + (value & mask[2]); |
| 45 | // Having 8 buckets of 8 bits, holding values from [0,8] now. |
| 46 | // From this point on, the buckets are bigger than the number of bits |
| 47 | // required to hold the values, and the buckets are bigger the maximum |
| 48 | // result, so there's no need to mask value anymore, since there's no |
| 49 | // more risk of overflow between buckets. |
| 50 | if (sizeof(T) > 1) value = (value >> (sizeof(T) > 1 ? 8 : 0)) + value; |
| 51 | // Having 4 buckets of 16 bits, holding values from [0,16] now. |
| 52 | if (sizeof(T) > 2) value = (value >> (sizeof(T) > 2 ? 16 : 0)) + value; |
| 53 | // Having 2 buckets of 32 bits, holding values from [0,32] now. |
| 54 | if (sizeof(T) > 4) value = (value >> (sizeof(T) > 4 ? 32 : 0)) + value; |
| 55 | // Having 1 buckets of 64 bits, holding values from [0,64] now. |
| 56 | return static_cast<unsigned>(value & 0xff); |
| 57 | #endif |
| 58 | } |
| 59 | |
| 60 | // ReverseBits(value) returns |value| in reverse bit order. |
| 61 | template <typename T> |
| 62 | T ReverseBits(T value) { |
| 63 | STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||static_assert((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) || (sizeof(value) == 8), "(sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) || (sizeof(value) == 8)" ) |
| 64 | (sizeof(value) == 4) || (sizeof(value) == 8))static_assert((sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) || (sizeof(value) == 8), "(sizeof(value) == 1) || (sizeof(value) == 2) || (sizeof(value) == 4) || (sizeof(value) == 8)" ); |
| 65 | T result = 0; |
| 66 | for (unsigned i = 0; i < (sizeof(value) * 8); i++) { |
| 67 | result = (result << 1) | (value & 1); |
| 68 | value >>= 1; |
| 69 | } |
| 70 | return result; |
| 71 | } |
| 72 | |
| 73 | // CountLeadingZeros(value) returns the number of zero bits following the most |
| 74 | // significant 1 bit in |value| if |value| is non-zero, otherwise it returns |
| 75 | // {sizeof(T) * 8}. |
| 76 | template <typename T, unsigned bits = sizeof(T) * 8> |
| 77 | inline constexpr |
| 78 | typename std::enable_if<std::is_unsigned<T>::value && sizeof(T) <= 8, |
| 79 | unsigned>::type |
| 80 | CountLeadingZeros(T value) { |
| 81 | static_assert(bits > 0, "invalid instantiation"); |
| 82 | #if V8_HAS_BUILTIN_CLZ(1) |
| 83 | return value == 0 |
| 84 | ? bits |
| 85 | : bits == 64 |
| 86 | ? __builtin_clzll(static_cast<uint64_t>(value)) |
| 87 | : __builtin_clz(static_cast<uint32_t>(value)) - (32 - bits); |
| 88 | #else |
| 89 | // Binary search algorithm taken from "Hacker's Delight" (by Henry S. Warren, |
| 90 | // Jr.), figures 5-11 and 5-12. |
| 91 | if (bits == 1) return static_cast<unsigned>(value) ^ 1; |
| 92 | T upper_half = value >> (bits / 2); |
| 93 | T next_value = upper_half != 0 ? upper_half : value; |
| 94 | unsigned add = upper_half != 0 ? 0 : bits / 2; |
| 95 | constexpr unsigned next_bits = bits == 1 ? 1 : bits / 2; |
| 96 | return CountLeadingZeros<T, next_bits>(next_value) + add; |
| 97 | #endif |
| 98 | } |
| 99 | |
| 100 | inline constexpr unsigned CountLeadingZeros32(uint32_t value) { |
| 101 | return CountLeadingZeros(value); |
| 102 | } |
| 103 | inline constexpr unsigned CountLeadingZeros64(uint64_t value) { |
| 104 | return CountLeadingZeros(value); |
| 105 | } |
| 106 | |
| 107 | // CountTrailingZeros(value) returns the number of zero bits preceding the |
| 108 | // least significant 1 bit in |value| if |value| is non-zero, otherwise it |
| 109 | // returns {sizeof(T) * 8}. |
| 110 | // See CountTrailingZerosNonZero for an optimized version for the case that |
| 111 | // |value| is guaranteed to be non-zero. |
| 112 | template <typename T, unsigned bits = sizeof(T) * 8> |
| 113 | inline constexpr |
| 114 | typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8, |
| 115 | unsigned>::type |
| 116 | CountTrailingZeros(T value) { |
| 117 | #if V8_HAS_BUILTIN_CTZ(1) |
| 118 | return value == 0 ? bits |
| 119 | : bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value)) |
| 120 | : __builtin_ctz(static_cast<uint32_t>(value)); |
| 121 | #else |
| 122 | // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.), |
| 123 | // chapter 5-4. On x64, since is faster than counting in a loop and faster |
| 124 | // than doing binary search. |
| 125 | using U = typename std::make_unsigned<T>::type; |
| 126 | U u = value; |
| 127 | return CountPopulation(static_cast<U>(~u & (u - 1u))); |
| 128 | #endif |
| 129 | } |
| 130 | |
| 131 | inline constexpr unsigned CountTrailingZeros32(uint32_t value) { |
| 132 | return CountTrailingZeros(value); |
| 133 | } |
| 134 | inline constexpr unsigned CountTrailingZeros64(uint64_t value) { |
| 135 | return CountTrailingZeros(value); |
| 136 | } |
| 137 | |
| 138 | // CountTrailingZerosNonZero(value) returns the number of zero bits preceding |
| 139 | // the least significant 1 bit in |value| if |value| is non-zero, otherwise the |
| 140 | // behavior is undefined. |
| 141 | // See CountTrailingZeros for an alternative version that allows |value| == 0. |
| 142 | template <typename T, unsigned bits = sizeof(T) * 8> |
| 143 | inline constexpr |
| 144 | typename std::enable_if<std::is_integral<T>::value && sizeof(T) <= 8, |
| 145 | unsigned>::type |
| 146 | CountTrailingZerosNonZero(T value) { |
| 147 | DCHECK_NE(0, value)((void) 0); |
| 148 | #if V8_HAS_BUILTIN_CTZ(1) |
| 149 | return bits == 64 ? __builtin_ctzll(static_cast<uint64_t>(value)) |
| 150 | : __builtin_ctz(static_cast<uint32_t>(value)); |
| 151 | #else |
| 152 | return CountTrailingZeros<T, bits>(value); |
| 153 | #endif |
| 154 | } |
| 155 | |
| 156 | // Returns true iff |value| is a power of 2. |
| 157 | template <typename T, |
| 158 | typename = typename std::enable_if<std::is_integral<T>::value || |
| 159 | std::is_enum<T>::value>::type> |
| 160 | constexpr inline bool IsPowerOfTwo(T value) { |
| 161 | return value > 0 && (value & (value - 1)) == 0; |
| 162 | } |
| 163 | |
| 164 | // Identical to {CountTrailingZeros}, but only works for powers of 2. |
| 165 | template <typename T, |
| 166 | typename = typename std::enable_if<std::is_integral<T>::value>::type> |
| 167 | inline constexpr int WhichPowerOfTwo(T value) { |
| 168 | DCHECK(IsPowerOfTwo(value))((void) 0); |
| 169 | #if V8_HAS_BUILTIN_CTZ(1) |
| 170 | STATIC_ASSERT(sizeof(T) <= 8)static_assert(sizeof(T) <= 8, "sizeof(T) <= 8"); |
| 171 | return sizeof(T) == 8 ? __builtin_ctzll(static_cast<uint64_t>(value)) |
| 172 | : __builtin_ctz(static_cast<uint32_t>(value)); |
| 173 | #else |
| 174 | // Fall back to popcount (see "Hacker's Delight" by Henry S. Warren, Jr.), |
| 175 | // chapter 5-4. On x64, since is faster than counting in a loop and faster |
| 176 | // than doing binary search. |
| 177 | using U = typename std::make_unsigned<T>::type; |
| 178 | U u = value; |
| 179 | return CountPopulation(static_cast<U>(u - 1)); |
| 180 | #endif |
| 181 | } |
| 182 | |
| 183 | // RoundUpToPowerOfTwo32(value) returns the smallest power of two which is |
| 184 | // greater than or equal to |value|. If you pass in a |value| that is already a |
| 185 | // power of two, it is returned as is. |value| must be less than or equal to |
| 186 | // 0x80000000u. Uses computation based on leading zeros if we have compiler |
| 187 | // support for that. Falls back to the implementation from "Hacker's Delight" by |
| 188 | // Henry S. Warren, Jr., figure 3-3, page 48, where the function is called clp2. |
| 189 | V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value); |
| 190 | // Same for 64 bit integers. |value| must be <= 2^63 |
| 191 | V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value); |
| 192 | // Same for size_t integers. |
| 193 | inline size_t RoundUpToPowerOfTwo(size_t value) { |
| 194 | if (sizeof(size_t) == sizeof(uint64_t)) { |
| 195 | return RoundUpToPowerOfTwo64(value); |
| 196 | } else { |
| 197 | // Without windows.h included this line triggers a truncation warning on |
| 198 | // 64-bit builds. Presumably windows.h disables the relevant warning. |
| 199 | return RoundUpToPowerOfTwo32(static_cast<uint32_t>(value)); |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | // RoundDownToPowerOfTwo32(value) returns the greatest power of two which is |
| 204 | // less than or equal to |value|. If you pass in a |value| that is already a |
| 205 | // power of two, it is returned as is. |
| 206 | inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) { |
| 207 | if (value > 0x80000000u) return 0x80000000u; |
| 208 | uint32_t result = RoundUpToPowerOfTwo32(value); |
| 209 | if (result > value) result >>= 1; |
| 210 | return result; |
| 211 | } |
| 212 | |
| 213 | |
| 214 | // Precondition: 0 <= shift < 32 |
| 215 | inline constexpr uint32_t RotateRight32(uint32_t value, uint32_t shift) { |
| 216 | return (value >> shift) | (value << ((32 - shift) & 31)); |
| 217 | } |
| 218 | |
| 219 | // Precondition: 0 <= shift < 32 |
| 220 | inline constexpr uint32_t RotateLeft32(uint32_t value, uint32_t shift) { |
| 221 | return (value << shift) | (value >> ((32 - shift) & 31)); |
| 222 | } |
| 223 | |
| 224 | // Precondition: 0 <= shift < 64 |
| 225 | inline constexpr uint64_t RotateRight64(uint64_t value, uint64_t shift) { |
| 226 | return (value >> shift) | (value << ((64 - shift) & 63)); |
| 227 | } |
| 228 | |
| 229 | // Precondition: 0 <= shift < 64 |
| 230 | inline constexpr uint64_t RotateLeft64(uint64_t value, uint64_t shift) { |
| 231 | return (value << shift) | (value >> ((64 - shift) & 63)); |
| 232 | } |
| 233 | |
| 234 | // SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and |
| 235 | // |rhs| and stores the result into the variable pointed to by |val| and |
| 236 | // returns true if the signed summation resulted in an overflow. |
| 237 | inline bool SignedAddOverflow32(int32_t lhs, int32_t rhs, int32_t* val) { |
| 238 | #if V8_HAS_BUILTIN_SADD_OVERFLOW(1) |
| 239 | return __builtin_sadd_overflow(lhs, rhs, val); |
| 240 | #else |
| 241 | uint32_t res = static_cast<uint32_t>(lhs) + static_cast<uint32_t>(rhs); |
| 242 | *val = bit_cast<int32_t>(res); |
| 243 | return ((res ^ lhs) & (res ^ rhs) & (1U << 31)) != 0; |
| 244 | #endif |
| 245 | } |
| 246 | |
| 247 | |
| 248 | // SignedSubOverflow32(lhs,rhs,val) performs a signed subtraction of |lhs| and |
| 249 | // |rhs| and stores the result into the variable pointed to by |val| and |
| 250 | // returns true if the signed subtraction resulted in an overflow. |
| 251 | inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) { |
| 252 | #if V8_HAS_BUILTIN_SSUB_OVERFLOW(1) |
| 253 | return __builtin_ssub_overflow(lhs, rhs, val); |
| 254 | #else |
| 255 | uint32_t res = static_cast<uint32_t>(lhs) - static_cast<uint32_t>(rhs); |
| 256 | *val = bit_cast<int32_t>(res); |
| 257 | return ((res ^ lhs) & (res ^ ~rhs) & (1U << 31)) != 0; |
| 258 | #endif |
| 259 | } |
| 260 | |
| 261 | // SignedMulOverflow32(lhs,rhs,val) performs a signed multiplication of |lhs| |
| 262 | // and |rhs| and stores the result into the variable pointed to by |val| and |
| 263 | // returns true if the signed multiplication resulted in an overflow. |
| 264 | V8_BASE_EXPORT bool SignedMulOverflow32(int32_t lhs, int32_t rhs, int32_t* val); |
| 265 | |
| 266 | // SignedAddOverflow64(lhs,rhs,val) performs a signed summation of |lhs| and |
| 267 | // |rhs| and stores the result into the variable pointed to by |val| and |
| 268 | // returns true if the signed summation resulted in an overflow. |
| 269 | inline bool SignedAddOverflow64(int64_t lhs, int64_t rhs, int64_t* val) { |
| 270 | uint64_t res = static_cast<uint64_t>(lhs) + static_cast<uint64_t>(rhs); |
| 271 | *val = bit_cast<int64_t>(res); |
| 272 | return ((res ^ lhs) & (res ^ rhs) & (1ULL << 63)) != 0; |
| 273 | } |
| 274 | |
| 275 | |
| 276 | // SignedSubOverflow64(lhs,rhs,val) performs a signed subtraction of |lhs| and |
| 277 | // |rhs| and stores the result into the variable pointed to by |val| and |
| 278 | // returns true if the signed subtraction resulted in an overflow. |
| 279 | inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) { |
| 280 | uint64_t res = static_cast<uint64_t>(lhs) - static_cast<uint64_t>(rhs); |
| 281 | *val = bit_cast<int64_t>(res); |
| 282 | return ((res ^ lhs) & (res ^ ~rhs) & (1ULL << 63)) != 0; |
| 283 | } |
| 284 | |
| 285 | // SignedMulHigh32(lhs, rhs) multiplies two signed 32-bit values |lhs| and |
| 286 | // |rhs|, extracts the most significant 32 bits of the result, and returns |
| 287 | // those. |
| 288 | V8_BASE_EXPORT int32_t SignedMulHigh32(int32_t lhs, int32_t rhs); |
| 289 | |
| 290 | // SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values |
| 291 | // |lhs| and |rhs|, extracts the most significant 32 bits of the result, and |
| 292 | // adds the accumulate value |acc|. |
| 293 | V8_BASE_EXPORT int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, |
| 294 | int32_t acc); |
| 295 | |
| 296 | // SignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient |
| 297 | // truncated to int32. If |rhs| is zero, then zero is returned. If |lhs| |
| 298 | // is minint and |rhs| is -1, it returns minint. |
| 299 | V8_BASE_EXPORT int32_t SignedDiv32(int32_t lhs, int32_t rhs); |
| 300 | |
| 301 | // SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder |
| 302 | // truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs| |
| 303 | // is -1, it returns zero. |
| 304 | V8_BASE_EXPORT int32_t SignedMod32(int32_t lhs, int32_t rhs); |
| 305 | |
| 306 | // UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs| |
| 307 | // and |rhs| and stores the result into the variable pointed to by |val| and |
| 308 | // returns true if the unsigned summation resulted in an overflow. |
| 309 | inline bool UnsignedAddOverflow32(uint32_t lhs, uint32_t rhs, uint32_t* val) { |
| 310 | #if V8_HAS_BUILTIN_SADD_OVERFLOW(1) |
| 311 | return __builtin_uadd_overflow(lhs, rhs, val); |
| 312 | #else |
| 313 | *val = lhs + rhs; |
| 314 | return *val < (lhs | rhs); |
| 315 | #endif |
| 316 | } |
| 317 | |
| 318 | |
| 319 | // UnsignedDiv32(lhs, rhs) divides |lhs| by |rhs| and returns the quotient |
| 320 | // truncated to uint32. If |rhs| is zero, then zero is returned. |
| 321 | inline uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs) { |
| 322 | return rhs ? lhs / rhs : 0u; |
| 323 | } |
| 324 | |
| 325 | |
| 326 | // UnsignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder |
| 327 | // truncated to uint32. If |rhs| is zero, then zero is returned. |
| 328 | inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) { |
| 329 | return rhs ? lhs % rhs : 0u; |
| 330 | } |
| 331 | |
| 332 | // Wraparound integer arithmetic without undefined behavior. |
| 333 | |
| 334 | inline int32_t WraparoundAdd32(int32_t lhs, int32_t rhs) { |
| 335 | return static_cast<int32_t>(static_cast<uint32_t>(lhs) + |
| 336 | static_cast<uint32_t>(rhs)); |
| 337 | } |
| 338 | |
| 339 | inline int32_t WraparoundNeg32(int32_t x) { |
| 340 | return static_cast<int32_t>(-static_cast<uint32_t>(x)); |
| 341 | } |
| 342 | |
| 343 | // SignedSaturatedAdd64(lhs, rhs) adds |lhs| and |rhs|, |
| 344 | // checks and returns the result. |
| 345 | V8_BASE_EXPORT int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs); |
| 346 | |
| 347 | // SignedSaturatedSub64(lhs, rhs) subtracts |lhs| by |rhs|, |
| 348 | // checks and returns the result. |
| 349 | V8_BASE_EXPORT int64_t SignedSaturatedSub64(int64_t lhs, int64_t rhs); |
| 350 | |
| 351 | } // namespace bits |
| 352 | } // namespace base |
| 353 | } // namespace v8 |
| 354 | |
| 355 | #endif // V8_BASE_BITS_H_ |