File: | out/../deps/v8/src/heap/new-spaces.cc |
Warning: | line 656, column 5 Value stored to 'high' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // Copyright 2020 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/heap/new-spaces.h" |
6 | |
7 | #include "src/common/globals.h" |
8 | #include "src/heap/array-buffer-sweeper.h" |
9 | #include "src/heap/heap-inl.h" |
10 | #include "src/heap/incremental-marking.h" |
11 | #include "src/heap/mark-compact.h" |
12 | #include "src/heap/memory-allocator.h" |
13 | #include "src/heap/paged-spaces.h" |
14 | #include "src/heap/safepoint.h" |
15 | #include "src/heap/spaces-inl.h" |
16 | #include "src/heap/spaces.h" |
17 | |
18 | namespace v8 { |
19 | namespace internal { |
20 | |
21 | Page* SemiSpace::InitializePage(MemoryChunk* chunk) { |
22 | bool in_to_space = (id() != kFromSpace); |
23 | chunk->SetFlag(in_to_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE); |
24 | Page* page = static_cast<Page*>(chunk); |
25 | page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking()); |
26 | page->list_node().Initialize(); |
27 | if (FLAG_minor_mc) { |
28 | page->AllocateYoungGenerationBitmap(); |
29 | heap() |
30 | ->minor_mark_compact_collector() |
31 | ->non_atomic_marking_state() |
32 | ->ClearLiveness(page); |
33 | } |
34 | page->InitializationMemoryFence(); |
35 | return page; |
36 | } |
37 | |
38 | bool SemiSpace::EnsureCurrentCapacity() { |
39 | if (IsCommitted()) { |
40 | const int expected_pages = |
41 | static_cast<int>(target_capacity_ / Page::kPageSize); |
42 | // `target_capacity_` is a multiple of `Page::kPageSize`. |
43 | DCHECK_EQ(target_capacity_, expected_pages * Page::kPageSize)((void) 0); |
44 | MemoryChunk* current_page = first_page(); |
45 | int actual_pages = 0; |
46 | |
47 | // First iterate through the pages list until expected pages if so many |
48 | // pages exist. |
49 | while (current_page != nullptr && actual_pages < expected_pages) { |
50 | actual_pages++; |
51 | current_page = current_page->list_node().next(); |
52 | } |
53 | |
54 | DCHECK_LE(actual_pages, expected_pages)((void) 0); |
55 | |
56 | // Free all overallocated pages which are behind current_page. |
57 | while (current_page) { |
58 | DCHECK_EQ(actual_pages, expected_pages)((void) 0); |
59 | MemoryChunk* next_current = current_page->list_node().next(); |
60 | // Promoted pages contain live objects and should not be discarded. |
61 | DCHECK(!current_page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION))((void) 0); |
62 | // `current_page_` contains the current allocation area. Thus, we should |
63 | // never free the `current_page_`. Furthermore, live objects generally |
64 | // reside before the current allocation area, so `current_page_` also |
65 | // serves as a guard against freeing pages with live objects on them. |
66 | DCHECK_NE(current_page, current_page_)((void) 0); |
67 | AccountUncommitted(Page::kPageSize); |
68 | DecrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory()); |
69 | memory_chunk_list_.Remove(current_page); |
70 | // Clear new space flags to avoid this page being treated as a new |
71 | // space page that is potentially being swept. |
72 | current_page->ClearFlags(Page::kIsInYoungGenerationMask); |
73 | heap()->memory_allocator()->Free( |
74 | MemoryAllocator::FreeMode::kConcurrentlyAndPool, current_page); |
75 | current_page = next_current; |
76 | } |
77 | |
78 | // Add more pages if we have less than expected_pages. |
79 | IncrementalMarking::NonAtomicMarkingState* marking_state = |
80 | heap()->incremental_marking()->non_atomic_marking_state(); |
81 | while (actual_pages < expected_pages) { |
82 | actual_pages++; |
83 | current_page = heap()->memory_allocator()->AllocatePage( |
84 | MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE); |
85 | if (current_page == nullptr) return false; |
86 | DCHECK_NOT_NULL(current_page)((void) 0); |
87 | AccountCommitted(Page::kPageSize); |
88 | IncrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory()); |
89 | memory_chunk_list_.PushBack(current_page); |
90 | marking_state->ClearLiveness(current_page); |
91 | current_page->SetFlags(first_page()->GetFlags()); |
92 | heap()->CreateFillerObjectAt(current_page->area_start(), |
93 | static_cast<int>(current_page->area_size()), |
94 | ClearRecordedSlots::kNo); |
95 | } |
96 | DCHECK_EQ(expected_pages, actual_pages)((void) 0); |
97 | } |
98 | return true; |
99 | } |
100 | |
101 | // ----------------------------------------------------------------------------- |
102 | // SemiSpace implementation |
103 | |
104 | void SemiSpace::SetUp(size_t initial_capacity, size_t maximum_capacity) { |
105 | DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize))((void) 0); |
106 | minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
107 | target_capacity_ = minimum_capacity_; |
108 | maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); |
109 | } |
110 | |
111 | void SemiSpace::TearDown() { |
112 | // Properly uncommit memory to keep the allocator counters in sync. |
113 | if (IsCommitted()) { |
114 | Uncommit(); |
115 | } |
116 | target_capacity_ = maximum_capacity_ = 0; |
117 | } |
118 | |
119 | bool SemiSpace::Commit() { |
120 | DCHECK(!IsCommitted())((void) 0); |
121 | DCHECK_EQ(CommittedMemory(), size_t(0))((void) 0); |
122 | const int num_pages = static_cast<int>(target_capacity_ / Page::kPageSize); |
123 | DCHECK(num_pages)((void) 0); |
124 | for (int pages_added = 0; pages_added < num_pages; pages_added++) { |
125 | // Pages in the new spaces can be moved to the old space by the full |
126 | // collector. Therefore, they must be initialized with the same FreeList as |
127 | // old pages. |
128 | Page* new_page = heap()->memory_allocator()->AllocatePage( |
129 | MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE); |
130 | if (new_page == nullptr) { |
131 | if (pages_added) RewindPages(pages_added); |
132 | DCHECK(!IsCommitted())((void) 0); |
133 | return false; |
134 | } |
135 | memory_chunk_list_.PushBack(new_page); |
136 | IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory()); |
137 | } |
138 | Reset(); |
139 | AccountCommitted(target_capacity_); |
140 | if (age_mark_ == kNullAddress) { |
141 | age_mark_ = first_page()->area_start(); |
142 | } |
143 | DCHECK(IsCommitted())((void) 0); |
144 | return true; |
145 | } |
146 | |
147 | bool SemiSpace::Uncommit() { |
148 | DCHECK(IsCommitted())((void) 0); |
149 | int actual_pages = 0; |
150 | while (!memory_chunk_list_.Empty()) { |
151 | actual_pages++; |
152 | MemoryChunk* chunk = memory_chunk_list_.front(); |
153 | DecrementCommittedPhysicalMemory(chunk->CommittedPhysicalMemory()); |
154 | memory_chunk_list_.Remove(chunk); |
155 | heap()->memory_allocator()->Free( |
156 | MemoryAllocator::FreeMode::kConcurrentlyAndPool, chunk); |
157 | } |
158 | current_page_ = nullptr; |
159 | current_capacity_ = 0; |
160 | size_t removed_page_size = |
161 | static_cast<size_t>(actual_pages * Page::kPageSize); |
162 | DCHECK_EQ(CommittedMemory(), removed_page_size)((void) 0); |
163 | DCHECK_EQ(CommittedPhysicalMemory(), 0)((void) 0); |
164 | AccountUncommitted(removed_page_size); |
165 | heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
166 | DCHECK(!IsCommitted())((void) 0); |
167 | return true; |
168 | } |
169 | |
170 | size_t SemiSpace::CommittedPhysicalMemory() const { |
171 | if (!IsCommitted()) return 0; |
172 | if (!base::OS::HasLazyCommits()) return CommittedMemory(); |
173 | return committed_physical_memory_; |
174 | } |
175 | |
176 | bool SemiSpace::GrowTo(size_t new_capacity) { |
177 | if (!IsCommitted()) { |
178 | if (!Commit()) return false; |
179 | } |
180 | DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u)((void) 0); |
181 | DCHECK_LE(new_capacity, maximum_capacity_)((void) 0); |
182 | DCHECK_GT(new_capacity, target_capacity_)((void) 0); |
183 | const size_t delta = new_capacity - target_capacity_; |
184 | DCHECK(IsAligned(delta, AllocatePageSize()))((void) 0); |
185 | const int delta_pages = static_cast<int>(delta / Page::kPageSize); |
186 | DCHECK(last_page())((void) 0); |
187 | IncrementalMarking::NonAtomicMarkingState* marking_state = |
188 | heap()->incremental_marking()->non_atomic_marking_state(); |
189 | for (int pages_added = 0; pages_added < delta_pages; pages_added++) { |
190 | Page* new_page = heap()->memory_allocator()->AllocatePage( |
191 | MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE); |
192 | if (new_page == nullptr) { |
193 | if (pages_added) RewindPages(pages_added); |
194 | return false; |
195 | } |
196 | memory_chunk_list_.PushBack(new_page); |
197 | marking_state->ClearLiveness(new_page); |
198 | IncrementCommittedPhysicalMemory(new_page->CommittedPhysicalMemory()); |
199 | // Duplicate the flags that was set on the old page. |
200 | new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask); |
201 | } |
202 | AccountCommitted(delta); |
203 | target_capacity_ = new_capacity; |
204 | return true; |
205 | } |
206 | |
207 | void SemiSpace::RewindPages(int num_pages) { |
208 | DCHECK_GT(num_pages, 0)((void) 0); |
209 | DCHECK(last_page())((void) 0); |
210 | while (num_pages > 0) { |
211 | MemoryChunk* last = last_page(); |
212 | memory_chunk_list_.Remove(last); |
213 | DecrementCommittedPhysicalMemory(last->CommittedPhysicalMemory()); |
214 | heap()->memory_allocator()->Free( |
215 | MemoryAllocator::FreeMode::kConcurrentlyAndPool, last); |
216 | num_pages--; |
217 | } |
218 | } |
219 | |
220 | void SemiSpace::ShrinkTo(size_t new_capacity) { |
221 | DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u)((void) 0); |
222 | DCHECK_GE(new_capacity, minimum_capacity_)((void) 0); |
223 | DCHECK_LT(new_capacity, target_capacity_)((void) 0); |
224 | if (IsCommitted()) { |
225 | const size_t delta = target_capacity_ - new_capacity; |
226 | DCHECK(IsAligned(delta, Page::kPageSize))((void) 0); |
227 | int delta_pages = static_cast<int>(delta / Page::kPageSize); |
228 | RewindPages(delta_pages); |
229 | AccountUncommitted(delta); |
230 | heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
231 | } |
232 | target_capacity_ = new_capacity; |
233 | } |
234 | |
235 | void SemiSpace::FixPagesFlags(Page::MainThreadFlags flags, |
236 | Page::MainThreadFlags mask) { |
237 | for (Page* page : *this) { |
238 | page->set_owner(this); |
239 | page->SetFlags(flags, mask); |
240 | if (id_ == kToSpace) { |
241 | page->ClearFlag(MemoryChunk::FROM_PAGE); |
242 | page->SetFlag(MemoryChunk::TO_PAGE); |
243 | page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
244 | heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes( |
245 | page, 0); |
246 | } else { |
247 | page->SetFlag(MemoryChunk::FROM_PAGE); |
248 | page->ClearFlag(MemoryChunk::TO_PAGE); |
249 | } |
250 | DCHECK(page->InYoungGeneration())((void) 0); |
251 | } |
252 | } |
253 | |
254 | void SemiSpace::Reset() { |
255 | DCHECK(first_page())((void) 0); |
256 | DCHECK(last_page())((void) 0); |
257 | current_page_ = first_page(); |
258 | current_capacity_ = Page::kPageSize; |
259 | } |
260 | |
261 | void SemiSpace::RemovePage(Page* page) { |
262 | if (current_page_ == page) { |
263 | if (page->prev_page()) { |
264 | current_page_ = page->prev_page(); |
265 | } |
266 | } |
267 | memory_chunk_list_.Remove(page); |
268 | AccountUncommitted(Page::kPageSize); |
269 | DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory()); |
270 | for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) { |
271 | ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i); |
272 | DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t)); |
273 | } |
274 | } |
275 | |
276 | void SemiSpace::PrependPage(Page* page) { |
277 | page->SetFlags(current_page()->GetFlags()); |
278 | page->set_owner(this); |
279 | memory_chunk_list_.PushFront(page); |
280 | current_capacity_ += Page::kPageSize; |
281 | AccountCommitted(Page::kPageSize); |
282 | IncrementCommittedPhysicalMemory(page->CommittedPhysicalMemory()); |
283 | for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) { |
284 | ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i); |
285 | IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t)); |
286 | } |
287 | } |
288 | |
289 | void SemiSpace::MovePageToTheEnd(Page* page) { |
290 | DCHECK_EQ(page->owner(), this)((void) 0); |
291 | memory_chunk_list_.Remove(page); |
292 | memory_chunk_list_.PushBack(page); |
293 | current_page_ = page; |
294 | } |
295 | |
296 | void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { |
297 | // We won't be swapping semispaces without data in them. |
298 | DCHECK(from->first_page())((void) 0); |
299 | DCHECK(to->first_page())((void) 0); |
300 | |
301 | auto saved_to_space_flags = to->current_page()->GetFlags(); |
302 | |
303 | // We swap all properties but id_. |
304 | std::swap(from->target_capacity_, to->target_capacity_); |
305 | std::swap(from->maximum_capacity_, to->maximum_capacity_); |
306 | std::swap(from->minimum_capacity_, to->minimum_capacity_); |
307 | std::swap(from->age_mark_, to->age_mark_); |
308 | std::swap(from->memory_chunk_list_, to->memory_chunk_list_); |
309 | std::swap(from->current_page_, to->current_page_); |
310 | std::swap(from->external_backing_store_bytes_, |
311 | to->external_backing_store_bytes_); |
312 | std::swap(from->committed_physical_memory_, to->committed_physical_memory_); |
313 | |
314 | to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask); |
315 | from->FixPagesFlags(Page::NO_FLAGS, Page::NO_FLAGS); |
316 | } |
317 | |
318 | void SemiSpace::IncrementCommittedPhysicalMemory(size_t increment_value) { |
319 | if (!base::OS::HasLazyCommits()) return; |
320 | DCHECK_LE(committed_physical_memory_,((void) 0) |
321 | committed_physical_memory_ + increment_value)((void) 0); |
322 | committed_physical_memory_ += increment_value; |
323 | } |
324 | |
325 | void SemiSpace::DecrementCommittedPhysicalMemory(size_t decrement_value) { |
326 | if (!base::OS::HasLazyCommits()) return; |
327 | DCHECK_LE(decrement_value, committed_physical_memory_)((void) 0); |
328 | committed_physical_memory_ -= decrement_value; |
329 | } |
330 | |
331 | void SemiSpace::AddRangeToActiveSystemPages(Address start, Address end) { |
332 | Page* page = current_page(); |
333 | |
334 | DCHECK_LE(page->address(), start)((void) 0); |
335 | DCHECK_LT(start, end)((void) 0); |
336 | DCHECK_LE(end, page->address() + Page::kPageSize)((void) 0); |
337 | |
338 | const size_t added_pages = page->active_system_pages()->Add( |
339 | start - page->address(), end - page->address(), |
340 | MemoryAllocator::GetCommitPageSizeBits()); |
341 | IncrementCommittedPhysicalMemory(added_pages * |
342 | MemoryAllocator::GetCommitPageSize()); |
343 | } |
344 | |
345 | void SemiSpace::set_age_mark(Address mark) { |
346 | DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this)((void) 0); |
347 | age_mark_ = mark; |
348 | // Mark all pages up to the one containing mark. |
349 | for (Page* p : PageRange(space_start(), mark)) { |
350 | p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
351 | } |
352 | } |
353 | |
354 | std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator(Heap* heap) { |
355 | // Use the NewSpace::NewObjectIterator to iterate the ToSpace. |
356 | UNREACHABLE()V8_Fatal("unreachable code"); |
357 | } |
358 | |
359 | #ifdef DEBUG |
360 | void SemiSpace::Print() {} |
361 | #endif |
362 | |
363 | #ifdef VERIFY_HEAP |
364 | void SemiSpace::Verify() const { |
365 | bool is_from_space = (id_ == kFromSpace); |
366 | size_t external_backing_store_bytes[kNumTypes]; |
367 | |
368 | for (int i = 0; i < kNumTypes; i++) { |
369 | external_backing_store_bytes[static_cast<ExternalBackingStoreType>(i)] = 0; |
370 | } |
371 | |
372 | int actual_pages = 0; |
373 | size_t computed_committed_physical_memory = 0; |
374 | |
375 | for (const Page* page : *this) { |
376 | CHECK_EQ(page->owner(), this)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(page->owner())>::type, typename ::v8::base::pass_value_or_ref<decltype(this)>::type> ((page->owner()), (this)); do { if ((__builtin_expect(!!(! (_cmp)), 0))) { V8_Fatal("Check failed: %s.", "page->owner()" " " "==" " " "this"); } } while (false); } while (false); |
377 | CHECK(page->InNewSpace())do { if ((__builtin_expect(!!(!(page->InNewSpace())), 0))) { V8_Fatal("Check failed: %s.", "page->InNewSpace()"); } } while (false); |
378 | CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGEdo { if ((__builtin_expect(!!(!(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE : MemoryChunk::TO_PAGE))), 0))) { V8_Fatal ("Check failed: %s.", "page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE : MemoryChunk::TO_PAGE)" ); } } while (false) |
379 | : MemoryChunk::TO_PAGE))do { if ((__builtin_expect(!!(!(page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE : MemoryChunk::TO_PAGE))), 0))) { V8_Fatal ("Check failed: %s.", "page->IsFlagSet(is_from_space ? MemoryChunk::FROM_PAGE : MemoryChunk::TO_PAGE)" ); } } while (false); |
380 | CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGEdo { if ((__builtin_expect(!!(!(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE))), 0))) { V8_Fatal ("Check failed: %s.", "!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE)" ); } } while (false) |
381 | : MemoryChunk::FROM_PAGE))do { if ((__builtin_expect(!!(!(!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE))), 0))) { V8_Fatal ("Check failed: %s.", "!page->IsFlagSet(is_from_space ? MemoryChunk::TO_PAGE : MemoryChunk::FROM_PAGE)" ); } } while (false); |
382 | CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING))do { if ((__builtin_expect(!!(!(page->IsFlagSet(MemoryChunk ::POINTERS_TO_HERE_ARE_INTERESTING))), 0))) { V8_Fatal("Check failed: %s." , "page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)" ); } } while (false); |
383 | if (!is_from_space) { |
384 | // The pointers-from-here-are-interesting flag isn't updated dynamically |
385 | // on from-space pages, so it might be out of sync with the marking state. |
386 | if (page->heap()->incremental_marking()->IsMarking()) { |
387 | CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING))do { if ((__builtin_expect(!!(!(page->IsFlagSet(MemoryChunk ::POINTERS_FROM_HERE_ARE_INTERESTING))), 0))) { V8_Fatal("Check failed: %s." , "page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)" ); } } while (false); |
388 | } else { |
389 | CHECK(do { if ((__builtin_expect(!!(!(!page->IsFlagSet(MemoryChunk ::POINTERS_FROM_HERE_ARE_INTERESTING))), 0))) { V8_Fatal("Check failed: %s." , "!page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)" ); } } while (false) |
390 | !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING))do { if ((__builtin_expect(!!(!(!page->IsFlagSet(MemoryChunk ::POINTERS_FROM_HERE_ARE_INTERESTING))), 0))) { V8_Fatal("Check failed: %s." , "!page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)" ); } } while (false); |
391 | } |
392 | } |
393 | for (int i = 0; i < kNumTypes; i++) { |
394 | ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i); |
395 | external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t); |
396 | } |
397 | |
398 | computed_committed_physical_memory += page->CommittedPhysicalMemory(); |
399 | |
400 | CHECK_IMPLIES(page->list_node().prev(),do { if ((__builtin_expect(!!(!(!(page->list_node().prev() ) || (page->list_node().prev()->list_node().next() == page ))), 0))) { V8_Fatal("Check failed: %s.", "page->list_node().prev()" " implies " "page->list_node().prev()->list_node().next() == page" ); } } while (false) |
401 | page->list_node().prev()->list_node().next() == page)do { if ((__builtin_expect(!!(!(!(page->list_node().prev() ) || (page->list_node().prev()->list_node().next() == page ))), 0))) { V8_Fatal("Check failed: %s.", "page->list_node().prev()" " implies " "page->list_node().prev()->list_node().next() == page" ); } } while (false); |
402 | actual_pages++; |
403 | } |
404 | CHECK_EQ(actual_pages * size_t(Page::kPageSize), CommittedMemory())do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(actual_pages * size_t(Page::kPageSize ))>::type, typename ::v8::base::pass_value_or_ref<decltype (CommittedMemory())>::type>((actual_pages * size_t(Page ::kPageSize)), (CommittedMemory())); do { if ((__builtin_expect (!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "actual_pages * size_t(Page::kPageSize)" " " "==" " " "CommittedMemory()"); } } while (false); } while (false); |
405 | CHECK_EQ(computed_committed_physical_memory, CommittedPhysicalMemory())do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(computed_committed_physical_memory )>::type, typename ::v8::base::pass_value_or_ref<decltype (CommittedPhysicalMemory())>::type>((computed_committed_physical_memory ), (CommittedPhysicalMemory())); do { if ((__builtin_expect(! !(!(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "computed_committed_physical_memory" " " "==" " " "CommittedPhysicalMemory()"); } } while (false) ; } while (false); |
406 | |
407 | for (int i = 0; i < kNumTypes; i++) { |
408 | ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i); |
409 | CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t))do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(external_backing_store_bytes[ t])>::type, typename ::v8::base::pass_value_or_ref<decltype (ExternalBackingStoreBytes(t))>::type>((external_backing_store_bytes [t]), (ExternalBackingStoreBytes(t))); do { if ((__builtin_expect (!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "external_backing_store_bytes[t]" " " "==" " " "ExternalBackingStoreBytes(t)"); } } while (false ); } while (false); |
410 | } |
411 | } |
412 | #endif |
413 | |
414 | #ifdef DEBUG |
415 | void SemiSpace::AssertValidRange(Address start, Address end) { |
416 | // Addresses belong to same semi-space |
417 | Page* page = Page::FromAllocationAreaAddress(start); |
418 | Page* end_page = Page::FromAllocationAreaAddress(end); |
419 | SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner()); |
420 | DCHECK_EQ(space, end_page->owner())((void) 0); |
421 | // Start address is before end address, either on same page, |
422 | // or end address is on a later page in the linked list of |
423 | // semi-space pages. |
424 | if (page == end_page) { |
425 | DCHECK_LE(start, end)((void) 0); |
426 | } else { |
427 | while (page != end_page) { |
428 | page = page->next_page(); |
429 | } |
430 | DCHECK(page)((void) 0); |
431 | } |
432 | } |
433 | #endif |
434 | |
435 | // ----------------------------------------------------------------------------- |
436 | // SemiSpaceObjectIterator implementation. |
437 | |
438 | SemiSpaceObjectIterator::SemiSpaceObjectIterator(const NewSpace* space) { |
439 | Initialize(space->first_allocatable_address(), space->top()); |
440 | } |
441 | |
442 | void SemiSpaceObjectIterator::Initialize(Address start, Address end) { |
443 | SemiSpace::AssertValidRange(start, end); |
444 | current_ = start; |
445 | limit_ = end; |
446 | } |
447 | |
448 | size_t NewSpace::CommittedPhysicalMemory() const { |
449 | if (!base::OS::HasLazyCommits()) return CommittedMemory(); |
450 | BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top()); |
451 | size_t size = to_space_.CommittedPhysicalMemory(); |
452 | if (from_space_.IsCommitted()) { |
453 | size += from_space_.CommittedPhysicalMemory(); |
454 | } |
455 | return size; |
456 | } |
457 | |
458 | // ----------------------------------------------------------------------------- |
459 | // NewSpace implementation |
460 | |
461 | NewSpace::NewSpace(Heap* heap, v8::PageAllocator* page_allocator, |
462 | size_t initial_semispace_capacity, |
463 | size_t max_semispace_capacity, |
464 | LinearAllocationArea* allocation_info) |
465 | : SpaceWithLinearArea(heap, NEW_SPACE, new NoFreeList(), allocation_info), |
466 | to_space_(heap, kToSpace), |
467 | from_space_(heap, kFromSpace) { |
468 | DCHECK(initial_semispace_capacity <= max_semispace_capacity)((void) 0); |
469 | |
470 | to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity); |
471 | from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity); |
472 | if (!to_space_.Commit()) { |
473 | V8::FatalProcessOutOfMemory(heap->isolate(), "New space setup"); |
474 | } |
475 | DCHECK(!from_space_.IsCommitted())((void) 0); // No need to use memory yet. |
476 | ResetLinearAllocationArea(); |
477 | } |
478 | |
479 | NewSpace::~NewSpace() { |
480 | // Tears down the space. Heap memory was not allocated by the space, so it |
481 | // is not deallocated here. |
482 | allocation_info_->Reset(kNullAddress, kNullAddress); |
483 | |
484 | to_space_.TearDown(); |
485 | from_space_.TearDown(); |
486 | } |
487 | |
488 | void NewSpace::ResetParkedAllocationBuffers() { |
489 | parked_allocation_buffers_.clear(); |
490 | } |
491 | |
492 | void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } |
493 | |
494 | void NewSpace::Grow() { |
495 | heap()->safepoint()->AssertActive(); |
496 | // Double the semispace size but only up to maximum capacity. |
497 | DCHECK(TotalCapacity() < MaximumCapacity())((void) 0); |
498 | size_t new_capacity = std::min( |
499 | MaximumCapacity(), |
500 | static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity()); |
501 | if (to_space_.GrowTo(new_capacity)) { |
502 | // Only grow from space if we managed to grow to-space. |
503 | if (!from_space_.GrowTo(new_capacity)) { |
504 | // If we managed to grow to-space but couldn't grow from-space, |
505 | // attempt to shrink to-space. |
506 | to_space_.ShrinkTo(from_space_.target_capacity()); |
507 | } |
508 | } |
509 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_)((void)0); |
510 | } |
511 | |
512 | void NewSpace::Shrink() { |
513 | size_t new_capacity = std::max(InitialTotalCapacity(), 2 * Size()); |
514 | size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize); |
515 | if (rounded_new_capacity < TotalCapacity()) { |
516 | to_space_.ShrinkTo(rounded_new_capacity); |
517 | // Only shrink from-space if we managed to shrink to-space. |
518 | if (from_space_.IsCommitted()) from_space_.Reset(); |
519 | from_space_.ShrinkTo(rounded_new_capacity); |
520 | } |
521 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_)((void)0); |
522 | } |
523 | |
524 | bool NewSpace::Rebalance() { |
525 | // Order here is important to make use of the page pool. |
526 | return to_space_.EnsureCurrentCapacity() && |
527 | from_space_.EnsureCurrentCapacity(); |
528 | } |
529 | |
530 | void NewSpace::UpdateLinearAllocationArea(Address known_top) { |
531 | AdvanceAllocationObservers(); |
532 | |
533 | Address new_top = known_top == 0 ? to_space_.page_low() : known_top; |
534 | BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top()); |
535 | allocation_info_->Reset(new_top, to_space_.page_high()); |
536 | // The order of the following two stores is important. |
537 | // See the corresponding loads in ConcurrentMarking::Run. |
538 | { |
539 | base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_); |
540 | original_limit_.store(limit(), std::memory_order_relaxed); |
541 | original_top_.store(top(), std::memory_order_release); |
542 | } |
543 | |
544 | to_space_.AddRangeToActiveSystemPages(top(), limit()); |
545 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_)((void)0); |
546 | |
547 | UpdateInlineAllocationLimit(0); |
548 | } |
549 | |
550 | void NewSpace::ResetLinearAllocationArea() { |
551 | to_space_.Reset(); |
552 | UpdateLinearAllocationArea(); |
553 | // Clear all mark-bits in the to-space. |
554 | IncrementalMarking::NonAtomicMarkingState* marking_state = |
555 | heap()->incremental_marking()->non_atomic_marking_state(); |
556 | for (Page* p : to_space_) { |
557 | marking_state->ClearLiveness(p); |
558 | // Concurrent marking may have local live bytes for this page. |
559 | heap()->concurrent_marking()->ClearMemoryChunkData(p); |
560 | } |
561 | } |
562 | |
563 | void NewSpace::UpdateInlineAllocationLimit(size_t min_size) { |
564 | Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size); |
565 | DCHECK_LE(top(), new_limit)((void) 0); |
566 | DCHECK_LE(new_limit, to_space_.page_high())((void) 0); |
567 | allocation_info_->SetLimit(new_limit); |
568 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_)((void)0); |
569 | |
570 | #if DEBUG |
571 | VerifyTop(); |
572 | #endif |
573 | } |
574 | |
575 | bool NewSpace::AddFreshPage() { |
576 | Address top = allocation_info_->top(); |
577 | DCHECK(!OldSpace::IsAtPageStart(top))((void) 0); |
578 | |
579 | // Clear remainder of current page. |
580 | Address limit = Page::FromAllocationAreaAddress(top)->area_end(); |
581 | int remaining_in_page = static_cast<int>(limit - top); |
582 | heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo); |
583 | |
584 | if (!to_space_.AdvancePage()) { |
585 | // No more pages left to advance. |
586 | return false; |
587 | } |
588 | |
589 | // We park unused allocation buffer space of allocations happenting from the |
590 | // mutator. |
591 | if (FLAG_allocation_buffer_parking && heap()->gc_state() == Heap::NOT_IN_GC && |
592 | remaining_in_page >= kAllocationBufferParkingThreshold) { |
593 | parked_allocation_buffers_.push_back( |
594 | ParkedAllocationBuffer(remaining_in_page, top)); |
595 | } |
596 | UpdateLinearAllocationArea(); |
597 | |
598 | return true; |
599 | } |
600 | |
601 | bool NewSpace::AddFreshPageSynchronized() { |
602 | base::MutexGuard guard(&mutex_); |
603 | return AddFreshPage(); |
604 | } |
605 | |
606 | bool NewSpace::AddParkedAllocationBuffer(int size_in_bytes, |
607 | AllocationAlignment alignment) { |
608 | int parked_size = 0; |
609 | Address start = 0; |
610 | for (auto it = parked_allocation_buffers_.begin(); |
611 | it != parked_allocation_buffers_.end();) { |
612 | parked_size = it->first; |
613 | start = it->second; |
614 | int filler_size = Heap::GetFillToAlign(start, alignment); |
615 | if (size_in_bytes + filler_size <= parked_size) { |
616 | parked_allocation_buffers_.erase(it); |
617 | Page* page = Page::FromAddress(start); |
618 | // We move a page with a parked allocaiton to the end of the pages list |
619 | // to maintain the invariant that the last page is the used one. |
620 | to_space_.MovePageToTheEnd(page); |
621 | UpdateLinearAllocationArea(start); |
622 | return true; |
623 | } else { |
624 | it++; |
625 | } |
626 | } |
627 | return false; |
628 | } |
629 | |
630 | bool NewSpace::EnsureAllocation(int size_in_bytes, |
631 | AllocationAlignment alignment, |
632 | AllocationOrigin origin, |
633 | int* out_max_aligned_size) { |
634 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_)((void)0); |
635 | #if DEBUG |
636 | VerifyTop(); |
637 | #endif // DEBUG |
638 | |
639 | AdvanceAllocationObservers(); |
640 | |
641 | Address old_top = allocation_info_->top(); |
642 | Address high = to_space_.page_high(); |
643 | int filler_size = Heap::GetFillToAlign(old_top, alignment); |
644 | int aligned_size_in_bytes = size_in_bytes + filler_size; |
645 | |
646 | if (old_top + aligned_size_in_bytes > high) { |
647 | // Not enough room in the page, try to allocate a new one. |
648 | if (!AddFreshPage()) { |
649 | // When we cannot grow NewSpace anymore we query for parked allocations. |
650 | if (!FLAG_allocation_buffer_parking || |
651 | !AddParkedAllocationBuffer(size_in_bytes, alignment)) |
652 | return false; |
653 | } |
654 | |
655 | old_top = allocation_info_->top(); |
656 | high = to_space_.page_high(); |
Value stored to 'high' is never read | |
657 | filler_size = Heap::GetFillToAlign(old_top, alignment); |
658 | aligned_size_in_bytes = size_in_bytes + filler_size; |
659 | } |
660 | |
661 | if (out_max_aligned_size) { |
662 | *out_max_aligned_size = aligned_size_in_bytes; |
663 | } |
664 | |
665 | DCHECK(old_top + aligned_size_in_bytes <= high)((void) 0); |
666 | UpdateInlineAllocationLimit(aligned_size_in_bytes); |
667 | DCHECK_EQ(allocation_info_->start(), allocation_info_->top())((void) 0); |
668 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_)((void)0); |
669 | return true; |
670 | } |
671 | |
672 | void NewSpace::MaybeFreeUnusedLab(LinearAllocationArea info) { |
673 | if (allocation_info_->MergeIfAdjacent(info)) { |
674 | original_top_.store(allocation_info_->top(), std::memory_order_release); |
675 | } |
676 | |
677 | #if DEBUG |
678 | VerifyTop(); |
679 | #endif |
680 | } |
681 | |
682 | std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator(Heap* heap) { |
683 | return std::unique_ptr<ObjectIterator>(new SemiSpaceObjectIterator(this)); |
684 | } |
685 | |
686 | void NewSpace::MakeLinearAllocationAreaIterable() { |
687 | Address to_top = top(); |
688 | Page* page = Page::FromAddress(to_top - kTaggedSize); |
689 | if (page->Contains(to_top)) { |
690 | int remaining_in_page = static_cast<int>(page->area_end() - to_top); |
691 | heap_->CreateFillerObjectAt(to_top, remaining_in_page, |
692 | ClearRecordedSlots::kNo); |
693 | } |
694 | } |
695 | |
696 | void NewSpace::FreeLinearAllocationArea() { |
697 | MakeLinearAllocationAreaIterable(); |
698 | UpdateInlineAllocationLimit(0); |
699 | } |
700 | |
701 | #if DEBUG |
702 | void NewSpace::VerifyTop() const { |
703 | SpaceWithLinearArea::VerifyTop(); |
704 | |
705 | // Ensure that original_top_ always >= LAB start. The delta between start_ |
706 | // and top_ is still to be processed by allocation observers. |
707 | DCHECK_GE(original_top_, allocation_info_->start())((void) 0); |
708 | |
709 | // Ensure that limit() is <= original_limit_, original_limit_ always needs |
710 | // to be end of curent to space page. |
711 | DCHECK_LE(allocation_info_->limit(), original_limit_)((void) 0); |
712 | DCHECK_EQ(original_limit_, to_space_.page_high())((void) 0); |
713 | } |
714 | #endif // DEBUG |
715 | |
716 | #ifdef VERIFY_HEAP |
717 | // We do not use the SemiSpaceObjectIterator because verification doesn't assume |
718 | // that it works (it depends on the invariants we are checking). |
719 | void NewSpace::Verify(Isolate* isolate) const { |
720 | // The allocation pointer should be in the space or at the very end. |
721 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_)((void)0); |
722 | |
723 | // There should be objects packed in from the low address up to the |
724 | // allocation pointer. |
725 | Address current = to_space_.first_page()->area_start(); |
726 | CHECK_EQ(current, to_space_.space_start())do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(current)>::type, typename :: v8::base::pass_value_or_ref<decltype(to_space_.space_start ())>::type>((current), (to_space_.space_start())); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s." , "current" " " "==" " " "to_space_.space_start()"); } } while (false); } while (false); |
727 | |
728 | size_t external_space_bytes[kNumTypes]; |
729 | for (int i = 0; i < kNumTypes; i++) { |
730 | external_space_bytes[static_cast<ExternalBackingStoreType>(i)] = 0; |
731 | } |
732 | |
733 | CHECK(!Page::FromAllocationAreaAddress(current)->IsFlagSet(do { if ((__builtin_expect(!!(!(!Page::FromAllocationAreaAddress (current)->IsFlagSet( Page::PAGE_NEW_OLD_PROMOTION))), 0)) ) { V8_Fatal("Check failed: %s.", "!Page::FromAllocationAreaAddress(current)->IsFlagSet( Page::PAGE_NEW_OLD_PROMOTION)" ); } } while (false) |
734 | Page::PAGE_NEW_OLD_PROMOTION))do { if ((__builtin_expect(!!(!(!Page::FromAllocationAreaAddress (current)->IsFlagSet( Page::PAGE_NEW_OLD_PROMOTION))), 0)) ) { V8_Fatal("Check failed: %s.", "!Page::FromAllocationAreaAddress(current)->IsFlagSet( Page::PAGE_NEW_OLD_PROMOTION)" ); } } while (false); |
735 | CHECK(!Page::FromAllocationAreaAddress(current)->IsFlagSet(do { if ((__builtin_expect(!!(!(!Page::FromAllocationAreaAddress (current)->IsFlagSet( Page::PAGE_NEW_NEW_PROMOTION))), 0)) ) { V8_Fatal("Check failed: %s.", "!Page::FromAllocationAreaAddress(current)->IsFlagSet( Page::PAGE_NEW_NEW_PROMOTION)" ); } } while (false) |
736 | Page::PAGE_NEW_NEW_PROMOTION))do { if ((__builtin_expect(!!(!(!Page::FromAllocationAreaAddress (current)->IsFlagSet( Page::PAGE_NEW_NEW_PROMOTION))), 0)) ) { V8_Fatal("Check failed: %s.", "!Page::FromAllocationAreaAddress(current)->IsFlagSet( Page::PAGE_NEW_NEW_PROMOTION)" ); } } while (false); |
737 | |
738 | PtrComprCageBase cage_base(isolate); |
739 | while (current != top()) { |
740 | if (!Page::IsAlignedToPageSize(current)) { |
741 | // The allocation pointer should not be in the middle of an object. |
742 | CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||do { if ((__builtin_expect(!!(!(!Page::FromAllocationAreaAddress (current)->ContainsLimit(top()) || current < top())), 0 ))) { V8_Fatal("Check failed: %s.", "!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) || current < top()" ); } } while (false) |
743 | current < top())do { if ((__builtin_expect(!!(!(!Page::FromAllocationAreaAddress (current)->ContainsLimit(top()) || current < top())), 0 ))) { V8_Fatal("Check failed: %s.", "!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) || current < top()" ); } } while (false); |
744 | |
745 | HeapObject object = HeapObject::FromAddress(current); |
746 | |
747 | // The first word should be a map, and we expect all map pointers to |
748 | // be in map space or read-only space. |
749 | Map map = object.map(cage_base); |
750 | CHECK(map.IsMap(cage_base))do { if ((__builtin_expect(!!(!(map.IsMap(cage_base))), 0))) { V8_Fatal("Check failed: %s.", "map.IsMap(cage_base)"); } } while (false); |
751 | CHECK(ReadOnlyHeap::Contains(map) ||do { if ((__builtin_expect(!!(!(ReadOnlyHeap::Contains(map) || isolate->heap()->space_for_maps()->Contains(map))), 0))) { V8_Fatal("Check failed: %s.", "ReadOnlyHeap::Contains(map) || isolate->heap()->space_for_maps()->Contains(map)" ); } } while (false) |
752 | isolate->heap()->space_for_maps()->Contains(map))do { if ((__builtin_expect(!!(!(ReadOnlyHeap::Contains(map) || isolate->heap()->space_for_maps()->Contains(map))), 0))) { V8_Fatal("Check failed: %s.", "ReadOnlyHeap::Contains(map) || isolate->heap()->space_for_maps()->Contains(map)" ); } } while (false); |
753 | |
754 | // The object should not be code or a map. |
755 | CHECK(!object.IsMap(cage_base))do { if ((__builtin_expect(!!(!(!object.IsMap(cage_base))), 0 ))) { V8_Fatal("Check failed: %s.", "!object.IsMap(cage_base)" ); } } while (false); |
756 | CHECK(!object.IsAbstractCode(cage_base))do { if ((__builtin_expect(!!(!(!object.IsAbstractCode(cage_base ))), 0))) { V8_Fatal("Check failed: %s.", "!object.IsAbstractCode(cage_base)" ); } } while (false); |
757 | |
758 | // The object itself should look OK. |
759 | object.ObjectVerify(isolate); |
760 | |
761 | // All the interior pointers should be contained in the heap. |
762 | VerifyPointersVisitor visitor(heap()); |
763 | int size = object.Size(cage_base); |
764 | object.IterateBody(map, size, &visitor); |
765 | |
766 | if (object.IsExternalString(cage_base)) { |
767 | ExternalString external_string = ExternalString::cast(object); |
768 | size_t string_size = external_string.ExternalPayloadSize(); |
769 | external_space_bytes[ExternalBackingStoreType::kExternalString] += |
770 | string_size; |
771 | } |
772 | |
773 | current += size; |
774 | } else { |
775 | // At end of page, switch to next page. |
776 | Page* page = Page::FromAllocationAreaAddress(current)->next_page(); |
777 | CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION))do { if ((__builtin_expect(!!(!(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION ))), 0))) { V8_Fatal("Check failed: %s.", "!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)" ); } } while (false); |
778 | CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION))do { if ((__builtin_expect(!!(!(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION ))), 0))) { V8_Fatal("Check failed: %s.", "!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)" ); } } while (false); |
779 | current = page->area_start(); |
780 | } |
781 | } |
782 | |
783 | for (int i = 0; i < kNumTypes; i++) { |
784 | if (i == ExternalBackingStoreType::kArrayBuffer) continue; |
785 | ExternalBackingStoreType t = static_cast<ExternalBackingStoreType>(i); |
786 | CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t))do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(external_space_bytes[t])>:: type, typename ::v8::base::pass_value_or_ref<decltype(ExternalBackingStoreBytes (t))>::type>((external_space_bytes[t]), (ExternalBackingStoreBytes (t))); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal ("Check failed: %s.", "external_space_bytes[t]" " " "==" " " "ExternalBackingStoreBytes(t)" ); } } while (false); } while (false); |
787 | } |
788 | |
789 | if (!FLAG_concurrent_array_buffer_sweeping) { |
790 | size_t bytes = heap()->array_buffer_sweeper()->young().BytesSlow(); |
791 | CHECK_EQ(bytes,do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(bytes)>::type, typename :: v8::base::pass_value_or_ref<decltype(ExternalBackingStoreBytes (ExternalBackingStoreType::kArrayBuffer))>::type>((bytes ), (ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer ))); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal( "Check failed: %s.", "bytes" " " "==" " " "ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer)" ); } } while (false); } while (false) |
792 | ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer))do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(bytes)>::type, typename :: v8::base::pass_value_or_ref<decltype(ExternalBackingStoreBytes (ExternalBackingStoreType::kArrayBuffer))>::type>((bytes ), (ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer ))); do { if ((__builtin_expect(!!(!(_cmp)), 0))) { V8_Fatal( "Check failed: %s.", "bytes" " " "==" " " "ExternalBackingStoreBytes(ExternalBackingStoreType::kArrayBuffer)" ); } } while (false); } while (false); |
793 | } |
794 | |
795 | // Check semi-spaces. |
796 | CHECK_EQ(from_space_.id(), kFromSpace)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(from_space_.id())>::type, typename ::v8::base::pass_value_or_ref<decltype(kFromSpace)>::type >((from_space_.id()), (kFromSpace)); do { if ((__builtin_expect (!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "from_space_.id()" " " "==" " " "kFromSpace"); } } while (false); } while (false ); |
797 | CHECK_EQ(to_space_.id(), kToSpace)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(to_space_.id())>::type, typename ::v8::base::pass_value_or_ref<decltype(kToSpace)>::type >((to_space_.id()), (kToSpace)); do { if ((__builtin_expect (!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "to_space_.id()" " " "==" " " "kToSpace"); } } while (false); } while (false); |
798 | from_space_.Verify(); |
799 | to_space_.Verify(); |
800 | } |
801 | #endif |
802 | |
803 | } // namespace internal |
804 | } // namespace v8 |