File: | out/../deps/v8/src/heap/read-only-heap.cc |
Warning: | line 100, column 7 Value stored to 'ro_heap' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // Copyright 2019 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/heap/read-only-heap.h" |
6 | |
7 | #include <cstddef> |
8 | #include <cstring> |
9 | |
10 | #include "src/base/lazy-instance.h" |
11 | #include "src/base/platform/mutex.h" |
12 | #include "src/common/ptr-compr-inl.h" |
13 | #include "src/heap/basic-memory-chunk.h" |
14 | #include "src/heap/heap-write-barrier-inl.h" |
15 | #include "src/heap/memory-chunk.h" |
16 | #include "src/heap/read-only-spaces.h" |
17 | #include "src/heap/third-party/heap-api.h" |
18 | #include "src/objects/heap-object-inl.h" |
19 | #include "src/objects/objects-inl.h" |
20 | #include "src/objects/smi.h" |
21 | #include "src/snapshot/read-only-deserializer.h" |
22 | #include "src/utils/allocation.h" |
23 | |
24 | namespace v8 { |
25 | namespace internal { |
26 | |
27 | namespace { |
28 | // Mutex used to ensure that ReadOnlyArtifacts creation is only done once. |
29 | base::LazyMutex read_only_heap_creation_mutex_ = LAZY_MUTEX_INITIALIZER{ { 0 }, { {} } }; |
30 | |
31 | // Weak pointer holding ReadOnlyArtifacts. ReadOnlyHeap::SetUp creates a |
32 | // std::shared_ptr from this when it attempts to reuse it. Since all Isolates |
33 | // hold a std::shared_ptr to this, the object is destroyed when no Isolates |
34 | // remain. |
35 | base::LazyInstance<std::weak_ptr<ReadOnlyArtifacts>>::type |
36 | read_only_artifacts_ = LAZY_INSTANCE_INITIALIZER{ { 0 }, { {} } }; |
37 | |
38 | std::shared_ptr<ReadOnlyArtifacts> InitializeSharedReadOnlyArtifacts() { |
39 | std::shared_ptr<ReadOnlyArtifacts> artifacts; |
40 | if (COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOLfalse) { |
41 | artifacts = std::make_shared<PointerCompressedReadOnlyArtifacts>(); |
42 | } else { |
43 | artifacts = std::make_shared<SingleCopyReadOnlyArtifacts>(); |
44 | } |
45 | *read_only_artifacts_.Pointer() = artifacts; |
46 | return artifacts; |
47 | } |
48 | } // namespace |
49 | |
50 | bool ReadOnlyHeap::IsSharedMemoryAvailable() { |
51 | static bool shared_memory_allocation_supported = |
52 | GetPlatformPageAllocator()->CanAllocateSharedPages(); |
53 | return shared_memory_allocation_supported; |
54 | } |
55 | |
56 | // This ReadOnlyHeap instance will only be accessed by Isolates that are already |
57 | // set up. As such it doesn't need to be guarded by a mutex or shared_ptrs, |
58 | // since an already set up Isolate will hold a shared_ptr to |
59 | // read_only_artifacts_. |
60 | SoleReadOnlyHeap* SoleReadOnlyHeap::shared_ro_heap_ = nullptr; |
61 | |
62 | // static |
63 | void ReadOnlyHeap::SetUp(Isolate* isolate, |
64 | SnapshotData* read_only_snapshot_data, |
65 | bool can_rehash) { |
66 | DCHECK_NOT_NULL(isolate)((void) 0); |
67 | |
68 | if (IsReadOnlySpaceShared()) { |
69 | ReadOnlyHeap* ro_heap; |
70 | if (read_only_snapshot_data != nullptr) { |
71 | bool read_only_heap_created = false; |
72 | base::MutexGuard guard(read_only_heap_creation_mutex_.Pointer()); |
73 | std::shared_ptr<ReadOnlyArtifacts> artifacts = |
74 | read_only_artifacts_.Get().lock(); |
75 | if (!artifacts) { |
76 | artifacts = InitializeSharedReadOnlyArtifacts(); |
77 | artifacts->InitializeChecksum(read_only_snapshot_data); |
78 | ro_heap = CreateInitalHeapForBootstrapping(isolate, artifacts); |
79 | ro_heap->DeseralizeIntoIsolate(isolate, read_only_snapshot_data, |
80 | can_rehash); |
81 | read_only_heap_created = true; |
82 | } else { |
83 | // With pointer compression, there is one ReadOnlyHeap per Isolate. |
84 | // Without PC, there is only one shared between all Isolates. |
85 | ro_heap = artifacts->GetReadOnlyHeapForIsolate(isolate); |
86 | isolate->SetUpFromReadOnlyArtifacts(artifacts, ro_heap); |
87 | } |
88 | artifacts->VerifyChecksum(read_only_snapshot_data, |
89 | read_only_heap_created); |
90 | ro_heap->InitializeIsolateRoots(isolate); |
91 | } else { |
92 | // This path should only be taken in mksnapshot, should only be run once |
93 | // before tearing down the Isolate that holds this ReadOnlyArtifacts and |
94 | // is not thread-safe. |
95 | std::shared_ptr<ReadOnlyArtifacts> artifacts = |
96 | read_only_artifacts_.Get().lock(); |
97 | CHECK(!artifacts)do { if ((__builtin_expect(!!(!(!artifacts)), 0))) { V8_Fatal ("Check failed: %s.", "!artifacts"); } } while (false); |
98 | artifacts = InitializeSharedReadOnlyArtifacts(); |
99 | |
100 | ro_heap = CreateInitalHeapForBootstrapping(isolate, artifacts); |
Value stored to 'ro_heap' is never read | |
101 | artifacts->VerifyChecksum(read_only_snapshot_data, true); |
102 | } |
103 | } else { |
104 | auto* ro_heap = new ReadOnlyHeap(new ReadOnlySpace(isolate->heap())); |
105 | isolate->SetUpFromReadOnlyArtifacts(nullptr, ro_heap); |
106 | if (read_only_snapshot_data != nullptr) { |
107 | ro_heap->DeseralizeIntoIsolate(isolate, read_only_snapshot_data, |
108 | can_rehash); |
109 | } |
110 | } |
111 | } |
112 | |
113 | void ReadOnlyHeap::DeseralizeIntoIsolate(Isolate* isolate, |
114 | SnapshotData* read_only_snapshot_data, |
115 | bool can_rehash) { |
116 | DCHECK_NOT_NULL(read_only_snapshot_data)((void) 0); |
117 | ReadOnlyDeserializer des(isolate, read_only_snapshot_data, can_rehash); |
118 | des.DeserializeIntoIsolate(); |
119 | InitFromIsolate(isolate); |
120 | } |
121 | |
122 | void ReadOnlyHeap::OnCreateHeapObjectsComplete(Isolate* isolate) { |
123 | DCHECK_NOT_NULL(isolate)((void) 0); |
124 | InitFromIsolate(isolate); |
125 | } |
126 | |
127 | // Only for compressed spaces |
128 | ReadOnlyHeap::ReadOnlyHeap(ReadOnlyHeap* ro_heap, ReadOnlySpace* ro_space) |
129 | : read_only_space_(ro_space), |
130 | read_only_object_cache_(ro_heap->read_only_object_cache_) { |
131 | DCHECK(ReadOnlyHeap::IsReadOnlySpaceShared())((void) 0); |
132 | DCHECK(COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL)((void) 0); |
133 | } |
134 | |
135 | // static |
136 | ReadOnlyHeap* ReadOnlyHeap::CreateInitalHeapForBootstrapping( |
137 | Isolate* isolate, std::shared_ptr<ReadOnlyArtifacts> artifacts) { |
138 | DCHECK(IsReadOnlySpaceShared())((void) 0); |
139 | |
140 | std::unique_ptr<ReadOnlyHeap> ro_heap; |
141 | auto* ro_space = new ReadOnlySpace(isolate->heap()); |
142 | if (COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOLfalse) { |
143 | ro_heap.reset(new ReadOnlyHeap(ro_space)); |
144 | } else { |
145 | std::unique_ptr<SoleReadOnlyHeap> sole_ro_heap( |
146 | new SoleReadOnlyHeap(ro_space)); |
147 | // The global shared ReadOnlyHeap is only used without pointer compression. |
148 | SoleReadOnlyHeap::shared_ro_heap_ = sole_ro_heap.get(); |
149 | ro_heap = std::move(sole_ro_heap); |
150 | } |
151 | artifacts->set_read_only_heap(std::move(ro_heap)); |
152 | isolate->SetUpFromReadOnlyArtifacts(artifacts, artifacts->read_only_heap()); |
153 | return artifacts->read_only_heap(); |
154 | } |
155 | |
156 | void SoleReadOnlyHeap::InitializeIsolateRoots(Isolate* isolate) { |
157 | void* const isolate_ro_roots = |
158 | isolate->roots_table().read_only_roots_begin().location(); |
159 | std::memcpy(isolate_ro_roots, read_only_roots_, |
160 | kEntriesCount * sizeof(Address)); |
161 | } |
162 | |
163 | void SoleReadOnlyHeap::InitializeFromIsolateRoots(Isolate* isolate) { |
164 | void* const isolate_ro_roots = |
165 | isolate->roots_table().read_only_roots_begin().location(); |
166 | std::memcpy(read_only_roots_, isolate_ro_roots, |
167 | kEntriesCount * sizeof(Address)); |
168 | } |
169 | |
170 | void ReadOnlyHeap::InitFromIsolate(Isolate* isolate) { |
171 | DCHECK(!init_complete_)((void) 0); |
172 | read_only_space_->ShrinkPages(); |
173 | if (IsReadOnlySpaceShared()) { |
174 | InitializeFromIsolateRoots(isolate); |
175 | std::shared_ptr<ReadOnlyArtifacts> artifacts( |
176 | *read_only_artifacts_.Pointer()); |
177 | |
178 | read_only_space()->DetachPagesAndAddToArtifacts(artifacts); |
179 | artifacts->ReinstallReadOnlySpace(isolate); |
180 | |
181 | read_only_space_ = artifacts->shared_read_only_space(); |
182 | |
183 | #ifdef DEBUG |
184 | artifacts->VerifyHeapAndSpaceRelationships(isolate); |
185 | #endif |
186 | } else { |
187 | read_only_space_->Seal(ReadOnlySpace::SealMode::kDoNotDetachFromHeap); |
188 | } |
189 | init_complete_ = true; |
190 | } |
191 | |
192 | void ReadOnlyHeap::OnHeapTearDown(Heap* heap) { |
193 | read_only_space_->TearDown(heap->memory_allocator()); |
194 | delete read_only_space_; |
195 | } |
196 | |
197 | void SoleReadOnlyHeap::OnHeapTearDown(Heap* heap) { |
198 | // Do nothing as ReadOnlyHeap is shared between all Isolates. |
199 | } |
200 | |
201 | // static |
202 | void ReadOnlyHeap::PopulateReadOnlySpaceStatistics( |
203 | SharedMemoryStatistics* statistics) { |
204 | statistics->read_only_space_size_ = 0; |
205 | statistics->read_only_space_used_size_ = 0; |
206 | statistics->read_only_space_physical_size_ = 0; |
207 | if (IsReadOnlySpaceShared()) { |
208 | std::shared_ptr<ReadOnlyArtifacts> artifacts = |
209 | read_only_artifacts_.Get().lock(); |
210 | if (artifacts) { |
211 | auto* ro_space = artifacts->shared_read_only_space(); |
212 | statistics->read_only_space_size_ = ro_space->CommittedMemory(); |
213 | statistics->read_only_space_used_size_ = ro_space->Size(); |
214 | statistics->read_only_space_physical_size_ = |
215 | ro_space->CommittedPhysicalMemory(); |
216 | } |
217 | } |
218 | } |
219 | |
220 | // static |
221 | bool ReadOnlyHeap::Contains(Address address) { |
222 | if (V8_ENABLE_THIRD_PARTY_HEAP_BOOLfalse) { |
223 | return third_party_heap::Heap::InReadOnlySpace(address); |
224 | } else { |
225 | return BasicMemoryChunk::FromAddress(address)->InReadOnlySpace(); |
226 | } |
227 | } |
228 | |
229 | // static |
230 | bool ReadOnlyHeap::Contains(HeapObject object) { |
231 | if (V8_ENABLE_THIRD_PARTY_HEAP_BOOLfalse) { |
232 | return third_party_heap::Heap::InReadOnlySpace(object.address()); |
233 | } else { |
234 | return BasicMemoryChunk::FromHeapObject(object)->InReadOnlySpace(); |
235 | } |
236 | } |
237 | |
238 | Object* ReadOnlyHeap::ExtendReadOnlyObjectCache() { |
239 | read_only_object_cache_.push_back(Smi::zero()); |
240 | return &read_only_object_cache_.back(); |
241 | } |
242 | |
243 | Object ReadOnlyHeap::cached_read_only_object(size_t i) const { |
244 | DCHECK_LE(i, read_only_object_cache_.size())((void) 0); |
245 | return read_only_object_cache_[i]; |
246 | } |
247 | |
248 | bool ReadOnlyHeap::read_only_object_cache_is_initialized() const { |
249 | return read_only_object_cache_.size() > 0; |
250 | } |
251 | |
252 | size_t ReadOnlyHeap::read_only_object_cache_size() const { |
253 | return read_only_object_cache_.size(); |
254 | } |
255 | |
256 | ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator( |
257 | const ReadOnlyHeap* ro_heap) |
258 | : ReadOnlyHeapObjectIterator(ro_heap->read_only_space()) {} |
259 | |
260 | ReadOnlyHeapObjectIterator::ReadOnlyHeapObjectIterator( |
261 | const ReadOnlySpace* ro_space) |
262 | : ro_space_(ro_space), |
263 | current_page_(V8_ENABLE_THIRD_PARTY_HEAP_BOOLfalse |
264 | ? std::vector<ReadOnlyPage*>::iterator() |
265 | : ro_space->pages().begin()), |
266 | current_addr_(V8_ENABLE_THIRD_PARTY_HEAP_BOOLfalse |
267 | ? Address() |
268 | : (*current_page_)->GetAreaStart()) {} |
269 | |
270 | HeapObject ReadOnlyHeapObjectIterator::Next() { |
271 | if (V8_ENABLE_THIRD_PARTY_HEAP_BOOLfalse) { |
272 | return HeapObject(); // Unsupported |
273 | } |
274 | |
275 | if (current_page_ == ro_space_->pages().end()) { |
276 | return HeapObject(); |
277 | } |
278 | |
279 | ReadOnlyPage* current_page = *current_page_; |
280 | for (;;) { |
281 | Address end = current_page->address() + current_page->area_size() + |
282 | MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(RO_SPACE); |
283 | DCHECK_LE(current_addr_, end)((void) 0); |
284 | if (current_addr_ == end) { |
285 | // Progress to the next page. |
286 | ++current_page_; |
287 | if (current_page_ == ro_space_->pages().end()) { |
288 | return HeapObject(); |
289 | } |
290 | current_page = *current_page_; |
291 | current_addr_ = current_page->GetAreaStart(); |
292 | } |
293 | |
294 | if (current_addr_ == ro_space_->top() && |
295 | current_addr_ != ro_space_->limit()) { |
296 | current_addr_ = ro_space_->limit(); |
297 | continue; |
298 | } |
299 | HeapObject object = HeapObject::FromAddress(current_addr_); |
300 | const int object_size = object.Size(); |
301 | current_addr_ += object_size; |
302 | |
303 | if (object.IsFreeSpaceOrFiller()) { |
304 | continue; |
305 | } |
306 | |
307 | DCHECK_OBJECT_SIZE(object_size)((void) 0); |
308 | return object; |
309 | } |
310 | } |
311 | |
312 | } // namespace internal |
313 | } // namespace v8 |