File: | out/../deps/v8/src/objects/backing-store.cc |
Warning: | line 228, column 12 Value stored to 'reservation_size' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | // Copyright 2019 the V8 project authors. All rights reserved. |
2 | // Use of this source code is governed by a BSD-style license that can be |
3 | // found in the LICENSE file. |
4 | |
5 | #include "src/objects/backing-store.h" |
6 | |
7 | #include <cstring> |
8 | |
9 | #include "src/base/platform/wrappers.h" |
10 | #include "src/execution/isolate.h" |
11 | #include "src/handles/global-handles.h" |
12 | #include "src/logging/counters.h" |
13 | #include "src/sandbox/sandbox.h" |
14 | |
15 | #if V8_ENABLE_WEBASSEMBLY1 |
16 | #include "src/trap-handler/trap-handler.h" |
17 | #include "src/wasm/wasm-constants.h" |
18 | #include "src/wasm/wasm-engine.h" |
19 | #include "src/wasm/wasm-limits.h" |
20 | #include "src/wasm/wasm-objects-inl.h" |
21 | #endif // V8_ENABLE_WEBASSEMBLY |
22 | |
23 | #define TRACE_BS(...) \ |
24 | do { \ |
25 | if (FLAG_trace_backing_store) PrintF(__VA_ARGS__); \ |
26 | } while (false) |
27 | |
28 | namespace v8 { |
29 | namespace internal { |
30 | |
31 | namespace { |
32 | |
33 | #if V8_ENABLE_WEBASSEMBLY1 |
34 | constexpr uint64_t kNegativeGuardSize = uint64_t{2} * GB; |
35 | |
36 | #if V8_TARGET_ARCH_64_BIT1 |
37 | constexpr uint64_t kFullGuardSize = uint64_t{10} * GB; |
38 | #endif |
39 | |
40 | #endif // V8_ENABLE_WEBASSEMBLY |
41 | |
42 | std::atomic<uint32_t> next_backing_store_id_{1}; |
43 | |
44 | // Allocation results are reported to UMA |
45 | // |
46 | // See wasm_memory_allocation_result in counters-definitions.h |
47 | enum class AllocationStatus { |
48 | kSuccess, // Succeeded on the first try |
49 | |
50 | kSuccessAfterRetry, // Succeeded after garbage collection |
51 | |
52 | kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address |
53 | // space limit |
54 | |
55 | kOtherFailure // Failed for an unknown reason |
56 | }; |
57 | |
58 | // Attempts to allocate memory inside the sandbox currently fall back to |
59 | // allocating memory outside of the sandbox if necessary. Once this fallback is |
60 | // no longer allowed/possible, these cases will become allocation failures |
61 | // instead. To track the frequency of such events, the outcome of memory |
62 | // allocation attempts inside the sandbox is reported to UMA. |
63 | // |
64 | // See caged_memory_allocation_outcome in counters-definitions.h |
65 | // This class and the entry in counters-definitions.h use the term "cage" |
66 | // instead of "sandbox" for historical reasons. |
67 | enum class CagedMemoryAllocationOutcome { |
68 | kSuccess, // Allocation succeeded inside the cage |
69 | kOutsideCage, // Allocation failed inside the cage but succeeded outside |
70 | kFailure, // Allocation failed inside and outside of the cage |
71 | }; |
72 | |
73 | base::AddressRegion GetReservedRegion(bool has_guard_regions, |
74 | void* buffer_start, |
75 | size_t byte_capacity) { |
76 | #if V8_TARGET_ARCH_64_BIT1 && V8_ENABLE_WEBASSEMBLY1 |
77 | if (has_guard_regions) { |
78 | // Guard regions always look like this: |
79 | // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx| |
80 | // ^ buffer_start |
81 | // ^ byte_length |
82 | // ^ negative guard region ^ positive guard region |
83 | |
84 | Address start = reinterpret_cast<Address>(buffer_start); |
85 | DCHECK_EQ(8, sizeof(size_t))((void) 0); // only use on 64-bit |
86 | DCHECK_EQ(0, start % AllocatePageSize())((void) 0); |
87 | return base::AddressRegion(start - kNegativeGuardSize, |
88 | static_cast<size_t>(kFullGuardSize)); |
89 | } |
90 | #endif |
91 | |
92 | DCHECK(!has_guard_regions)((void) 0); |
93 | return base::AddressRegion(reinterpret_cast<Address>(buffer_start), |
94 | byte_capacity); |
95 | } |
96 | |
97 | size_t GetReservationSize(bool has_guard_regions, size_t byte_capacity) { |
98 | #if V8_TARGET_ARCH_64_BIT1 && V8_ENABLE_WEBASSEMBLY1 |
99 | if (has_guard_regions) return kFullGuardSize; |
100 | #else |
101 | DCHECK(!has_guard_regions)((void) 0); |
102 | #endif |
103 | |
104 | return byte_capacity; |
105 | } |
106 | |
107 | void RecordStatus(Isolate* isolate, AllocationStatus status) { |
108 | isolate->counters()->wasm_memory_allocation_result()->AddSample( |
109 | static_cast<int>(status)); |
110 | } |
111 | |
112 | // When the sandbox is active, this function records the outcome of attempts to |
113 | // allocate memory inside the sandbox which fall back to allocating memory |
114 | // outside of the sandbox. Passing a value of nullptr for the result indicates |
115 | // that the memory could not be allocated at all. |
116 | void RecordSandboxMemoryAllocationResult(Isolate* isolate, void* result) { |
117 | // This metric is only meaningful when the sandbox is active. |
118 | #ifdef V8_SANDBOX |
119 | if (GetProcessWideSandbox()->is_initialized()) { |
120 | CagedMemoryAllocationOutcome outcome; |
121 | if (result) { |
122 | bool allocation_in_cage = GetProcessWideSandbox()->Contains(result); |
123 | outcome = allocation_in_cage ? CagedMemoryAllocationOutcome::kSuccess |
124 | : CagedMemoryAllocationOutcome::kOutsideCage; |
125 | } else { |
126 | outcome = CagedMemoryAllocationOutcome::kFailure; |
127 | } |
128 | isolate->counters()->caged_memory_allocation_outcome()->AddSample( |
129 | static_cast<int>(outcome)); |
130 | } |
131 | #endif |
132 | } |
133 | |
134 | inline void DebugCheckZero(void* start, size_t byte_length) { |
135 | #if DEBUG |
136 | // Double check memory is zero-initialized. Despite being DEBUG-only, |
137 | // this function is somewhat optimized for the benefit of test suite |
138 | // execution times (some tests allocate several gigabytes). |
139 | const byte* bytes = reinterpret_cast<const byte*>(start); |
140 | const size_t kBaseCase = 32; |
141 | for (size_t i = 0; i < kBaseCase && i < byte_length; i++) { |
142 | DCHECK_EQ(0, bytes[i])((void) 0); |
143 | } |
144 | // Having checked the first kBaseCase bytes to be zero, we can now use |
145 | // {memcmp} to compare the range against itself shifted by that amount, |
146 | // thereby inductively checking the remaining bytes. |
147 | if (byte_length > kBaseCase) { |
148 | DCHECK_EQ(0, memcmp(bytes, bytes + kBaseCase, byte_length - kBaseCase))((void) 0); |
149 | } |
150 | #endif |
151 | } |
152 | } // namespace |
153 | |
154 | // The backing store for a Wasm shared memory remembers all the isolates |
155 | // with which it has been shared. |
156 | struct SharedWasmMemoryData { |
157 | std::vector<Isolate*> isolates_; |
158 | }; |
159 | |
160 | void BackingStore::Clear() { |
161 | buffer_start_ = nullptr; |
162 | byte_length_ = 0; |
163 | has_guard_regions_ = false; |
164 | if (holds_shared_ptr_to_allocator_) { |
165 | type_specific_data_.v8_api_array_buffer_allocator_shared |
166 | .std::shared_ptr<v8::ArrayBuffer::Allocator>::~shared_ptr(); |
167 | holds_shared_ptr_to_allocator_ = false; |
168 | } |
169 | type_specific_data_.v8_api_array_buffer_allocator = nullptr; |
170 | } |
171 | |
172 | BackingStore::BackingStore(void* buffer_start, size_t byte_length, |
173 | size_t max_byte_length, size_t byte_capacity, |
174 | SharedFlag shared, ResizableFlag resizable, |
175 | bool is_wasm_memory, bool free_on_destruct, |
176 | bool has_guard_regions, bool custom_deleter, |
177 | bool empty_deleter) |
178 | : buffer_start_(buffer_start), |
179 | byte_length_(byte_length), |
180 | max_byte_length_(max_byte_length), |
181 | byte_capacity_(byte_capacity), |
182 | id_(next_backing_store_id_.fetch_add(1)), |
183 | is_shared_(shared == SharedFlag::kShared), |
184 | is_resizable_(resizable == ResizableFlag::kResizable), |
185 | is_wasm_memory_(is_wasm_memory), |
186 | holds_shared_ptr_to_allocator_(false), |
187 | free_on_destruct_(free_on_destruct), |
188 | has_guard_regions_(has_guard_regions), |
189 | globally_registered_(false), |
190 | custom_deleter_(custom_deleter), |
191 | empty_deleter_(empty_deleter) { |
192 | // TODO(v8:11111): RAB / GSAB - Wasm integration. |
193 | DCHECK_IMPLIES(is_wasm_memory_, !is_resizable_)((void) 0); |
194 | DCHECK_IMPLIES(is_resizable_, !custom_deleter_)((void) 0); |
195 | DCHECK_IMPLIES(is_resizable_, free_on_destruct_)((void) 0); |
196 | DCHECK_IMPLIES(!is_wasm_memory && !is_resizable_,((void) 0) |
197 | byte_length_ == max_byte_length_)((void) 0); |
198 | DCHECK_GE(max_byte_length_, byte_length_)((void) 0); |
199 | DCHECK_GE(byte_capacity_, max_byte_length_)((void) 0); |
200 | } |
201 | |
202 | BackingStore::~BackingStore() { |
203 | GlobalBackingStoreRegistry::Unregister(this); |
204 | |
205 | if (buffer_start_ == nullptr) { |
206 | Clear(); |
207 | return; |
208 | } |
209 | |
210 | PageAllocator* page_allocator = GetPlatformPageAllocator(); |
211 | // TODO(saelo) here and elsewhere in this file, replace with |
212 | // GetArrayBufferPageAllocator once the fallback to the platform page |
213 | // allocator is no longer allowed. |
214 | #ifdef V8_SANDBOX |
215 | if (GetProcessWideSandbox()->Contains(buffer_start_)) { |
216 | page_allocator = GetSandboxPageAllocator(); |
217 | } else { |
218 | DCHECK(kAllowBackingStoresOutsideSandbox)((void) 0); |
219 | } |
220 | #endif |
221 | |
222 | #if V8_ENABLE_WEBASSEMBLY1 |
223 | if (is_wasm_memory_) { |
224 | // TODO(v8:11111): RAB / GSAB - Wasm integration. |
225 | DCHECK(!is_resizable_)((void) 0); |
226 | DCHECK(free_on_destruct_)((void) 0); |
227 | DCHECK(!custom_deleter_)((void) 0); |
228 | size_t reservation_size = |
Value stored to 'reservation_size' during its initialization is never read | |
229 | GetReservationSize(has_guard_regions_, byte_capacity_); |
230 | TRACE_BS( |
231 | "BSw:free bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n", |
232 | this, buffer_start_, byte_length(), byte_capacity_, reservation_size); |
233 | if (is_shared_) { |
234 | // Deallocate the list of attached memory objects. |
235 | SharedWasmMemoryData* shared_data = get_shared_wasm_memory_data(); |
236 | delete shared_data; |
237 | type_specific_data_.shared_wasm_memory_data = nullptr; |
238 | } |
239 | |
240 | // Wasm memories are always allocated through the page allocator. |
241 | auto region = |
242 | GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_); |
243 | |
244 | if (!region.is_empty()) { |
245 | FreePages(page_allocator, reinterpret_cast<void*>(region.begin()), |
246 | region.size()); |
247 | } |
248 | Clear(); |
249 | return; |
250 | } |
251 | #endif // V8_ENABLE_WEBASSEMBLY |
252 | |
253 | if (is_resizable_) { |
254 | DCHECK(free_on_destruct_)((void) 0); |
255 | DCHECK(!custom_deleter_)((void) 0); |
256 | auto region = |
257 | GetReservedRegion(has_guard_regions_, buffer_start_, byte_capacity_); |
258 | |
259 | if (!region.is_empty()) { |
260 | FreePages(page_allocator, reinterpret_cast<void*>(region.begin()), |
261 | region.size()); |
262 | } |
263 | Clear(); |
264 | return; |
265 | } |
266 | if (custom_deleter_) { |
267 | DCHECK(free_on_destruct_)((void) 0); |
268 | TRACE_BS("BS:custom deleter bs=%p mem=%p (length=%zu, capacity=%zu)\n", |
269 | this, buffer_start_, byte_length(), byte_capacity_); |
270 | type_specific_data_.deleter.callback(buffer_start_, byte_length_, |
271 | type_specific_data_.deleter.data); |
272 | Clear(); |
273 | return; |
274 | } |
275 | if (free_on_destruct_) { |
276 | // JSArrayBuffer backing store. Deallocate through the embedder's allocator. |
277 | auto allocator = get_v8_api_array_buffer_allocator(); |
278 | TRACE_BS("BS:free bs=%p mem=%p (length=%zu, capacity=%zu)\n", this, |
279 | buffer_start_, byte_length(), byte_capacity_); |
280 | allocator->Free(buffer_start_, byte_length_); |
281 | } |
282 | Clear(); |
283 | } |
284 | |
285 | // Allocate a backing store using the array buffer allocator from the embedder. |
286 | std::unique_ptr<BackingStore> BackingStore::Allocate( |
287 | Isolate* isolate, size_t byte_length, SharedFlag shared, |
288 | InitializedFlag initialized) { |
289 | void* buffer_start = nullptr; |
290 | auto allocator = isolate->array_buffer_allocator(); |
291 | CHECK_NOT_NULL(allocator)do { if ((__builtin_expect(!!(!((allocator) != nullptr)), 0)) ) { V8_Fatal("Check failed: %s.", "(allocator) != nullptr"); } } while (false); |
292 | if (byte_length != 0) { |
293 | auto counters = isolate->counters(); |
294 | int mb_length = static_cast<int>(byte_length / MB); |
295 | if (mb_length > 0) { |
296 | counters->array_buffer_big_allocations()->AddSample(mb_length); |
297 | } |
298 | if (shared == SharedFlag::kShared) { |
299 | counters->shared_array_allocations()->AddSample(mb_length); |
300 | } |
301 | auto allocate_buffer = [allocator, initialized](size_t byte_length) { |
302 | if (initialized == InitializedFlag::kUninitialized) { |
303 | return allocator->AllocateUninitialized(byte_length); |
304 | } |
305 | void* buffer_start = allocator->Allocate(byte_length); |
306 | if (buffer_start) { |
307 | // TODO(wasm): node does not implement the zero-initialization API. |
308 | // Reenable this debug check when node does implement it properly. |
309 | constexpr bool |
310 | kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI = true; |
311 | if ((!(kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI)) && |
312 | !FLAG_mock_arraybuffer_allocator) { |
313 | DebugCheckZero(buffer_start, byte_length); |
314 | } |
315 | } |
316 | return buffer_start; |
317 | }; |
318 | |
319 | buffer_start = isolate->heap()->AllocateExternalBackingStore( |
320 | allocate_buffer, byte_length); |
321 | |
322 | if (buffer_start == nullptr) { |
323 | // Allocation failed. |
324 | counters->array_buffer_new_size_failures()->AddSample(mb_length); |
325 | return {}; |
326 | } |
327 | } |
328 | |
329 | auto result = new BackingStore(buffer_start, // start |
330 | byte_length, // length |
331 | byte_length, // max length |
332 | byte_length, // capacity |
333 | shared, // shared |
334 | ResizableFlag::kNotResizable, // resizable |
335 | false, // is_wasm_memory |
336 | true, // free_on_destruct |
337 | false, // has_guard_regions |
338 | false, // custom_deleter |
339 | false); // empty_deleter |
340 | |
341 | TRACE_BS("BS:alloc bs=%p mem=%p (length=%zu)\n", result, |
342 | result->buffer_start(), byte_length); |
343 | result->SetAllocatorFromIsolate(isolate); |
344 | return std::unique_ptr<BackingStore>(result); |
345 | } |
346 | |
347 | void BackingStore::SetAllocatorFromIsolate(Isolate* isolate) { |
348 | if (auto allocator_shared = isolate->array_buffer_allocator_shared()) { |
349 | holds_shared_ptr_to_allocator_ = true; |
350 | new (&type_specific_data_.v8_api_array_buffer_allocator_shared) |
351 | std::shared_ptr<v8::ArrayBuffer::Allocator>( |
352 | std::move(allocator_shared)); |
353 | } else { |
354 | type_specific_data_.v8_api_array_buffer_allocator = |
355 | isolate->array_buffer_allocator(); |
356 | } |
357 | } |
358 | |
359 | #if V8_ENABLE_WEBASSEMBLY1 |
360 | // Allocate a backing store for a Wasm memory. Always use the page allocator |
361 | // and add guard regions. |
362 | std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory( |
363 | Isolate* isolate, size_t initial_pages, size_t maximum_pages, |
364 | SharedFlag shared) { |
365 | // Compute size of reserved memory. |
366 | size_t engine_max_pages = wasm::max_mem_pages(); |
367 | maximum_pages = std::min(engine_max_pages, maximum_pages); |
368 | |
369 | auto result = TryAllocateAndPartiallyCommitMemory( |
370 | isolate, initial_pages * wasm::kWasmPageSize, |
371 | maximum_pages * wasm::kWasmPageSize, wasm::kWasmPageSize, initial_pages, |
372 | maximum_pages, true, shared); |
373 | // Shared Wasm memories need an anchor for the memory object list. |
374 | if (result && shared == SharedFlag::kShared) { |
375 | result->type_specific_data_.shared_wasm_memory_data = |
376 | new SharedWasmMemoryData(); |
377 | } |
378 | return result; |
379 | } |
380 | #endif // V8_ENABLE_WEBASSEMBLY |
381 | |
382 | std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory( |
383 | Isolate* isolate, size_t byte_length, size_t max_byte_length, |
384 | size_t page_size, size_t initial_pages, size_t maximum_pages, |
385 | bool is_wasm_memory, SharedFlag shared) { |
386 | // Enforce engine limitation on the maximum number of pages. |
387 | if (maximum_pages > std::numeric_limits<size_t>::max() / page_size) { |
388 | return nullptr; |
389 | } |
390 | |
391 | // Cannot reserve 0 pages on some OSes. |
392 | if (maximum_pages == 0) maximum_pages = 1; |
393 | |
394 | TRACE_BS("BSw:try %zu pages, %zu max\n", initial_pages, maximum_pages); |
395 | |
396 | #if V8_ENABLE_WEBASSEMBLY1 |
397 | bool guards = is_wasm_memory && trap_handler::IsTrapHandlerEnabled(); |
398 | #else |
399 | CHECK(!is_wasm_memory)do { if ((__builtin_expect(!!(!(!is_wasm_memory)), 0))) { V8_Fatal ("Check failed: %s.", "!is_wasm_memory"); } } while (false); |
400 | bool guards = false; |
401 | #endif // V8_ENABLE_WEBASSEMBLY |
402 | |
403 | // For accounting purposes, whether a GC was necessary. |
404 | bool did_retry = false; |
405 | |
406 | // A helper to try running a function up to 3 times, executing a GC |
407 | // if the first and second attempts failed. |
408 | auto gc_retry = [&](const std::function<bool()>& fn) { |
409 | for (int i = 0; i < 3; i++) { |
410 | if (fn()) return true; |
411 | // Collect garbage and retry. |
412 | did_retry = true; |
413 | // TODO(wasm): try Heap::EagerlyFreeExternalMemory() first? |
414 | isolate->heap()->MemoryPressureNotification( |
415 | MemoryPressureLevel::kCritical, true); |
416 | } |
417 | return false; |
418 | }; |
419 | |
420 | size_t byte_capacity = maximum_pages * page_size; |
421 | size_t reservation_size = GetReservationSize(guards, byte_capacity); |
422 | |
423 | //-------------------------------------------------------------------------- |
424 | // Allocate pages (inaccessible by default). |
425 | //-------------------------------------------------------------------------- |
426 | void* allocation_base = nullptr; |
427 | PageAllocator* page_allocator = GetPlatformPageAllocator(); |
428 | auto allocate_pages = [&] { |
429 | #ifdef V8_SANDBOX |
430 | page_allocator = GetSandboxPageAllocator(); |
431 | allocation_base = AllocatePages(page_allocator, nullptr, reservation_size, |
432 | page_size, PageAllocator::kNoAccess); |
433 | if (allocation_base) return true; |
434 | // We currently still allow falling back to the platform page allocator if |
435 | // the sandbox page allocator fails. This will eventually be removed. |
436 | // TODO(chromium:1218005) once we forbid the fallback, we should have a |
437 | // single API, e.g. GetArrayBufferPageAllocator(), that returns the correct |
438 | // page allocator to use here depending on whether the sandbox is enabled |
439 | // or not. |
440 | if (!kAllowBackingStoresOutsideSandbox) return false; |
441 | page_allocator = GetPlatformPageAllocator(); |
442 | #endif |
443 | allocation_base = AllocatePages(page_allocator, nullptr, reservation_size, |
444 | page_size, PageAllocator::kNoAccess); |
445 | return allocation_base != nullptr; |
446 | }; |
447 | if (!gc_retry(allocate_pages)) { |
448 | // Page allocator could not reserve enough pages. |
449 | RecordStatus(isolate, AllocationStatus::kOtherFailure); |
450 | RecordSandboxMemoryAllocationResult(isolate, nullptr); |
451 | TRACE_BS("BSw:try failed to allocate pages\n"); |
452 | return {}; |
453 | } |
454 | |
455 | // Get a pointer to the start of the buffer, skipping negative guard region |
456 | // if necessary. |
457 | #if V8_ENABLE_WEBASSEMBLY1 |
458 | byte* buffer_start = reinterpret_cast<byte*>(allocation_base) + |
459 | (guards ? kNegativeGuardSize : 0); |
460 | #else |
461 | DCHECK(!guards)((void) 0); |
462 | byte* buffer_start = reinterpret_cast<byte*>(allocation_base); |
463 | #endif |
464 | |
465 | //-------------------------------------------------------------------------- |
466 | // Commit the initial pages (allow read/write). |
467 | //-------------------------------------------------------------------------- |
468 | size_t committed_byte_length = initial_pages * page_size; |
469 | auto commit_memory = [&] { |
470 | return committed_byte_length == 0 || |
471 | SetPermissions(page_allocator, buffer_start, committed_byte_length, |
472 | PageAllocator::kReadWrite); |
473 | }; |
474 | if (!gc_retry(commit_memory)) { |
475 | TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start, |
476 | committed_byte_length); |
477 | FreePages(page_allocator, allocation_base, reservation_size); |
478 | // SetPermissions put us over the process memory limit. |
479 | // We return an empty result so that the caller can throw an exception. |
480 | return {}; |
481 | } |
482 | |
483 | DebugCheckZero(buffer_start, byte_length); // touch the bytes. |
484 | |
485 | RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry |
486 | : AllocationStatus::kSuccess); |
487 | RecordSandboxMemoryAllocationResult(isolate, allocation_base); |
488 | |
489 | ResizableFlag resizable = |
490 | is_wasm_memory ? ResizableFlag::kNotResizable : ResizableFlag::kResizable; |
491 | |
492 | auto result = new BackingStore(buffer_start, // start |
493 | byte_length, // length |
494 | max_byte_length, // max_byte_length |
495 | byte_capacity, // capacity |
496 | shared, // shared |
497 | resizable, // resizable |
498 | is_wasm_memory, // is_wasm_memory |
499 | true, // free_on_destruct |
500 | guards, // has_guard_regions |
501 | false, // custom_deleter |
502 | false); // empty_deleter |
503 | |
504 | TRACE_BS( |
505 | "BSw:alloc bs=%p mem=%p (length=%zu, capacity=%zu, reservation=%zu)\n", |
506 | result, result->buffer_start(), byte_length, byte_capacity, |
507 | reservation_size); |
508 | |
509 | return std::unique_ptr<BackingStore>(result); |
510 | } |
511 | |
512 | #if V8_ENABLE_WEBASSEMBLY1 |
513 | // Allocate a backing store for a Wasm memory. Always use the page allocator |
514 | // and add guard regions. |
515 | std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory( |
516 | Isolate* isolate, size_t initial_pages, size_t maximum_pages, |
517 | SharedFlag shared) { |
518 | // Wasm pages must be a multiple of the allocation page size. |
519 | DCHECK_EQ(0, wasm::kWasmPageSize % AllocatePageSize())((void) 0); |
520 | |
521 | // Enforce engine limitation on the maximum number of pages. |
522 | if (initial_pages > wasm::max_mem_pages()) return nullptr; |
523 | |
524 | auto backing_store = |
525 | TryAllocateWasmMemory(isolate, initial_pages, maximum_pages, shared); |
526 | if (maximum_pages == initial_pages) { |
527 | // If initial pages, and maximum are equal, nothing more to do return early. |
528 | return backing_store; |
529 | } |
530 | |
531 | // Retry with smaller maximum pages at each retry. |
532 | const int kAllocationTries = 3; |
533 | auto delta = (maximum_pages - initial_pages) / (kAllocationTries + 1); |
534 | size_t sizes[] = {maximum_pages - delta, maximum_pages - 2 * delta, |
535 | maximum_pages - 3 * delta, initial_pages}; |
536 | |
537 | for (size_t i = 0; i < arraysize(sizes)(sizeof(ArraySizeHelper(sizes))) && !backing_store; i++) { |
538 | backing_store = |
539 | TryAllocateWasmMemory(isolate, initial_pages, sizes[i], shared); |
540 | } |
541 | return backing_store; |
542 | } |
543 | |
544 | std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(Isolate* isolate, |
545 | size_t new_pages, |
546 | size_t max_pages) { |
547 | // Note that we could allocate uninitialized to save initialization cost here, |
548 | // but since Wasm memories are allocated by the page allocator, the zeroing |
549 | // cost is already built-in. |
550 | auto new_backing_store = BackingStore::AllocateWasmMemory( |
551 | isolate, new_pages, max_pages, |
552 | is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared); |
553 | |
554 | if (!new_backing_store || |
555 | new_backing_store->has_guard_regions() != has_guard_regions_) { |
556 | return {}; |
557 | } |
558 | |
559 | if (byte_length_ > 0) { |
560 | // If the allocation was successful, then the new buffer must be at least |
561 | // as big as the old one. |
562 | DCHECK_GE(new_pages * wasm::kWasmPageSize, byte_length_)((void) 0); |
563 | memcpy(new_backing_store->buffer_start(), buffer_start_, byte_length_); |
564 | } |
565 | |
566 | return new_backing_store; |
567 | } |
568 | |
569 | // Try to grow the size of a wasm memory in place, without realloc + copy. |
570 | base::Optional<size_t> BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, |
571 | size_t delta_pages, |
572 | size_t max_pages) { |
573 | // This function grows wasm memory by |
574 | // * changing the permissions of additional {delta_pages} pages to kReadWrite; |
575 | // * increment {byte_length_}; |
576 | // |
577 | // As this code is executed concurrently, the following steps are executed: |
578 | // 1) Read the current value of {byte_length_}; |
579 | // 2) Change the permission of all pages from {buffer_start_} to |
580 | // {byte_length_} + {delta_pages} * {page_size} to kReadWrite; |
581 | // * This operation may be executed racefully. The OS takes care of |
582 | // synchronization. |
583 | // 3) Try to update {byte_length_} with a compare_exchange; |
584 | // 4) Repeat 1) to 3) until the compare_exchange in 3) succeeds; |
585 | // |
586 | // The result of this function is the {byte_length_} before growing in pages. |
587 | // The result of this function appears like the result of an RMW-update on |
588 | // {byte_length_}, i.e. two concurrent calls to this function will result in |
589 | // different return values if {delta_pages} != 0. |
590 | // |
591 | // Invariants: |
592 | // * Permissions are always set incrementally, i.e. for any page {b} with |
593 | // kReadWrite permission, all pages between the first page {a} and page {b} |
594 | // also have kReadWrite permission. |
595 | // * {byte_length_} is always lower or equal than the amount of memory with |
596 | // permissions set to kReadWrite; |
597 | // * This is guaranteed by incrementing {byte_length_} with a |
598 | // compare_exchange after changing the permissions. |
599 | // * This invariant is the reason why we cannot use a fetch_add. |
600 | DCHECK(is_wasm_memory_)((void) 0); |
601 | max_pages = std::min(max_pages, byte_capacity_ / wasm::kWasmPageSize); |
602 | |
603 | // Do a compare-exchange loop, because we also need to adjust page |
604 | // permissions. Note that multiple racing grows both try to set page |
605 | // permissions for the entire range (to be RW), so the operating system |
606 | // should deal with that raciness. We know we succeeded when we can |
607 | // compare/swap the old length with the new length. |
608 | size_t old_length = byte_length_.load(std::memory_order_relaxed); |
609 | |
610 | if (delta_pages == 0) |
611 | return {old_length / wasm::kWasmPageSize}; // degenerate grow. |
612 | if (delta_pages > max_pages) return {}; // would never work. |
613 | |
614 | size_t new_length = 0; |
615 | while (true) { |
616 | size_t current_pages = old_length / wasm::kWasmPageSize; |
617 | |
618 | // Check if we have exceed the supplied maximum. |
619 | if (current_pages > (max_pages - delta_pages)) return {}; |
620 | |
621 | new_length = (current_pages + delta_pages) * wasm::kWasmPageSize; |
622 | |
623 | // Try to adjust the permissions on the memory. |
624 | if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_, |
625 | new_length, PageAllocator::kReadWrite)) { |
626 | return {}; |
627 | } |
628 | if (byte_length_.compare_exchange_weak(old_length, new_length, |
629 | std::memory_order_acq_rel)) { |
630 | // Successfully updated both the length and permissions. |
631 | break; |
632 | } |
633 | } |
634 | |
635 | if (!is_shared_ && free_on_destruct_) { |
636 | // Only do per-isolate accounting for non-shared backing stores. |
637 | reinterpret_cast<v8::Isolate*>(isolate) |
638 | ->AdjustAmountOfExternalAllocatedMemory(new_length - old_length); |
639 | } |
640 | return {old_length / wasm::kWasmPageSize}; |
641 | } |
642 | |
643 | void BackingStore::AttachSharedWasmMemoryObject( |
644 | Isolate* isolate, Handle<WasmMemoryObject> memory_object) { |
645 | DCHECK(is_wasm_memory_)((void) 0); |
646 | DCHECK(is_shared_)((void) 0); |
647 | // We need to take the global registry lock for this operation. |
648 | GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(isolate, this, |
649 | memory_object); |
650 | } |
651 | |
652 | void BackingStore::BroadcastSharedWasmMemoryGrow( |
653 | Isolate* isolate, std::shared_ptr<BackingStore> backing_store) { |
654 | GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(isolate, |
655 | backing_store); |
656 | } |
657 | |
658 | void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) { |
659 | GlobalBackingStoreRegistry::Purge(isolate); |
660 | } |
661 | |
662 | void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) { |
663 | GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(isolate); |
664 | } |
665 | #endif // V8_ENABLE_WEBASSEMBLY |
666 | |
667 | // Commit already reserved memory (for RAB backing stores (not shared)). |
668 | BackingStore::ResizeOrGrowResult BackingStore::ResizeInPlace( |
669 | Isolate* isolate, size_t new_byte_length, size_t new_committed_length) { |
670 | DCHECK_LE(new_byte_length, new_committed_length)((void) 0); |
671 | DCHECK(!is_shared())((void) 0); |
672 | |
673 | if (new_byte_length < byte_length_) { |
674 | // TOOO(v8:11111): Figure out a strategy for shrinking - when do we |
675 | // un-commit the memory? |
676 | |
677 | // Zero the memory so that in case the buffer is grown later, we have |
678 | // zeroed the contents already. |
679 | memset(reinterpret_cast<byte*>(buffer_start_) + new_byte_length, 0, |
680 | byte_length_ - new_byte_length); |
681 | |
682 | // Changing the byte length wouldn't strictly speaking be needed, since |
683 | // the JSArrayBuffer already stores the updated length. This is to keep |
684 | // the BackingStore and JSArrayBuffer in sync. |
685 | byte_length_ = new_byte_length; |
686 | return kSuccess; |
687 | } |
688 | if (new_byte_length == byte_length_) { |
689 | // i::SetPermissions with size 0 fails on some platforms, so special |
690 | // handling for the case byte_length_ == new_byte_length == 0 is required. |
691 | return kSuccess; |
692 | } |
693 | |
694 | // Try to adjust the permissions on the memory. |
695 | if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_, |
696 | new_committed_length, PageAllocator::kReadWrite)) { |
697 | return kFailure; |
698 | } |
699 | |
700 | // Do per-isolate accounting for non-shared backing stores. |
701 | DCHECK(free_on_destruct_)((void) 0); |
702 | reinterpret_cast<v8::Isolate*>(isolate) |
703 | ->AdjustAmountOfExternalAllocatedMemory(new_byte_length - byte_length_); |
704 | byte_length_ = new_byte_length; |
705 | return kSuccess; |
706 | } |
707 | |
708 | // Commit already reserved memory (for GSAB backing stores (shared)). |
709 | BackingStore::ResizeOrGrowResult BackingStore::GrowInPlace( |
710 | Isolate* isolate, size_t new_byte_length, size_t new_committed_length) { |
711 | DCHECK_LE(new_byte_length, new_committed_length)((void) 0); |
712 | DCHECK(is_shared())((void) 0); |
713 | // See comment in GrowWasmMemoryInPlace. |
714 | // GrowableSharedArrayBuffer.prototype.grow can be called from several |
715 | // threads. If two threads try to grow() in a racy way, the spec allows the |
716 | // larger grow to throw also if the smaller grow succeeds first. The |
717 | // implementation below doesn't throw in that case - instead, it retries and |
718 | // succeeds. If the larger grow finishes first though, the smaller grow must |
719 | // throw. |
720 | size_t old_byte_length = byte_length_.load(std::memory_order_seq_cst); |
721 | while (true) { |
722 | if (new_byte_length < old_byte_length) { |
723 | // The caller checks for the new_byte_length < old_byte_length_ case. This |
724 | // can only happen if another thread grew the memory after that. |
725 | return kRace; |
726 | } |
727 | if (new_byte_length == old_byte_length) { |
728 | // i::SetPermissions with size 0 fails on some platforms, so special |
729 | // handling for the case old_byte_length == new_byte_length == 0 is |
730 | // required. |
731 | return kSuccess; |
732 | } |
733 | |
734 | // Try to adjust the permissions on the memory. |
735 | if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_, |
736 | new_committed_length, PageAllocator::kReadWrite)) { |
737 | return kFailure; |
738 | } |
739 | |
740 | // compare_exchange_weak updates old_byte_length. |
741 | if (byte_length_.compare_exchange_weak(old_byte_length, new_byte_length, |
742 | std::memory_order_seq_cst)) { |
743 | // Successfully updated both the length and permissions. |
744 | break; |
745 | } |
746 | } |
747 | return kSuccess; |
748 | } |
749 | |
750 | std::unique_ptr<BackingStore> BackingStore::WrapAllocation( |
751 | Isolate* isolate, void* allocation_base, size_t allocation_length, |
752 | SharedFlag shared, bool free_on_destruct) { |
753 | auto result = new BackingStore(allocation_base, // start |
754 | allocation_length, // length |
755 | allocation_length, // max length |
756 | allocation_length, // capacity |
757 | shared, // shared |
758 | ResizableFlag::kNotResizable, // resizable |
759 | false, // is_wasm_memory |
760 | free_on_destruct, // free_on_destruct |
761 | false, // has_guard_regions |
762 | false, // custom_deleter |
763 | false); // empty_deleter |
764 | result->SetAllocatorFromIsolate(isolate); |
765 | TRACE_BS("BS:wrap bs=%p mem=%p (length=%zu)\n", result, |
766 | result->buffer_start(), result->byte_length()); |
767 | return std::unique_ptr<BackingStore>(result); |
768 | } |
769 | |
770 | std::unique_ptr<BackingStore> BackingStore::WrapAllocation( |
771 | void* allocation_base, size_t allocation_length, |
772 | v8::BackingStore::DeleterCallback deleter, void* deleter_data, |
773 | SharedFlag shared) { |
774 | bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter); |
775 | auto result = new BackingStore(allocation_base, // start |
776 | allocation_length, // length |
777 | allocation_length, // max length |
778 | allocation_length, // capacity |
779 | shared, // shared |
780 | ResizableFlag::kNotResizable, // resizable |
781 | false, // is_wasm_memory |
782 | true, // free_on_destruct |
783 | false, // has_guard_regions |
784 | true, // custom_deleter |
785 | is_empty_deleter); // empty_deleter |
786 | result->type_specific_data_.deleter = {deleter, deleter_data}; |
787 | TRACE_BS("BS:wrap bs=%p mem=%p (length=%zu)\n", result, |
788 | result->buffer_start(), result->byte_length()); |
789 | return std::unique_ptr<BackingStore>(result); |
790 | } |
791 | |
792 | std::unique_ptr<BackingStore> BackingStore::EmptyBackingStore( |
793 | SharedFlag shared) { |
794 | auto result = new BackingStore(nullptr, // start |
795 | 0, // length |
796 | 0, // max length |
797 | 0, // capacity |
798 | shared, // shared |
799 | ResizableFlag::kNotResizable, // resizable |
800 | false, // is_wasm_memory |
801 | true, // free_on_destruct |
802 | false, // has_guard_regions |
803 | false, // custom_deleter |
804 | false); // empty_deleter |
805 | |
806 | return std::unique_ptr<BackingStore>(result); |
807 | } |
808 | |
809 | bool BackingStore::Reallocate(Isolate* isolate, size_t new_byte_length) { |
810 | CHECK(!is_wasm_memory_ && !custom_deleter_ && !globally_registered_ &&do { if ((__builtin_expect(!!(!(!is_wasm_memory_ && ! custom_deleter_ && !globally_registered_ && free_on_destruct_ && !is_resizable_)), 0))) { V8_Fatal("Check failed: %s." , "!is_wasm_memory_ && !custom_deleter_ && !globally_registered_ && free_on_destruct_ && !is_resizable_" ); } } while (false) |
811 | free_on_destruct_ && !is_resizable_)do { if ((__builtin_expect(!!(!(!is_wasm_memory_ && ! custom_deleter_ && !globally_registered_ && free_on_destruct_ && !is_resizable_)), 0))) { V8_Fatal("Check failed: %s." , "!is_wasm_memory_ && !custom_deleter_ && !globally_registered_ && free_on_destruct_ && !is_resizable_" ); } } while (false); |
812 | auto allocator = get_v8_api_array_buffer_allocator(); |
813 | CHECK_EQ(isolate->array_buffer_allocator(), allocator)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(isolate->array_buffer_allocator ())>::type, typename ::v8::base::pass_value_or_ref<decltype (allocator)>::type>((isolate->array_buffer_allocator ()), (allocator)); do { if ((__builtin_expect(!!(!(_cmp)), 0) )) { V8_Fatal("Check failed: %s.", "isolate->array_buffer_allocator()" " " "==" " " "allocator"); } } while (false); } while (false ); |
814 | CHECK_EQ(byte_length_, byte_capacity_)do { bool _cmp = ::v8::base::CmpEQImpl< typename ::v8::base ::pass_value_or_ref<decltype(byte_length_)>::type, typename ::v8::base::pass_value_or_ref<decltype(byte_capacity_)> ::type>((byte_length_), (byte_capacity_)); do { if ((__builtin_expect (!!(!(_cmp)), 0))) { V8_Fatal("Check failed: %s.", "byte_length_" " " "==" " " "byte_capacity_"); } } while (false); } while ( false); |
815 | void* new_start = |
816 | allocator->Reallocate(buffer_start_, byte_length_, new_byte_length); |
817 | if (!new_start) return false; |
818 | buffer_start_ = new_start; |
819 | byte_capacity_ = new_byte_length; |
820 | byte_length_ = new_byte_length; |
821 | max_byte_length_ = new_byte_length; |
822 | return true; |
823 | } |
824 | |
825 | v8::ArrayBuffer::Allocator* BackingStore::get_v8_api_array_buffer_allocator() { |
826 | CHECK(!is_wasm_memory_)do { if ((__builtin_expect(!!(!(!is_wasm_memory_)), 0))) { V8_Fatal ("Check failed: %s.", "!is_wasm_memory_"); } } while (false); |
827 | auto array_buffer_allocator = |
828 | holds_shared_ptr_to_allocator_ |
829 | ? type_specific_data_.v8_api_array_buffer_allocator_shared.get() |
830 | : type_specific_data_.v8_api_array_buffer_allocator; |
831 | CHECK_NOT_NULL(array_buffer_allocator)do { if ((__builtin_expect(!!(!((array_buffer_allocator) != nullptr )), 0))) { V8_Fatal("Check failed: %s.", "(array_buffer_allocator) != nullptr" ); } } while (false); |
832 | return array_buffer_allocator; |
833 | } |
834 | |
835 | SharedWasmMemoryData* BackingStore::get_shared_wasm_memory_data() { |
836 | CHECK(is_wasm_memory_ && is_shared_)do { if ((__builtin_expect(!!(!(is_wasm_memory_ && is_shared_ )), 0))) { V8_Fatal("Check failed: %s.", "is_wasm_memory_ && is_shared_" ); } } while (false); |
837 | auto shared_wasm_memory_data = type_specific_data_.shared_wasm_memory_data; |
838 | CHECK(shared_wasm_memory_data)do { if ((__builtin_expect(!!(!(shared_wasm_memory_data)), 0) )) { V8_Fatal("Check failed: %s.", "shared_wasm_memory_data") ; } } while (false); |
839 | return shared_wasm_memory_data; |
840 | } |
841 | |
842 | namespace { |
843 | // Implementation details of GlobalBackingStoreRegistry. |
844 | struct GlobalBackingStoreRegistryImpl { |
845 | GlobalBackingStoreRegistryImpl() = default; |
846 | base::Mutex mutex_; |
847 | std::unordered_map<const void*, std::weak_ptr<BackingStore>> map_; |
848 | }; |
849 | base::LazyInstance<GlobalBackingStoreRegistryImpl>::type global_registry_impl_ = |
850 | LAZY_INSTANCE_INITIALIZER{ { 0 }, { {} } }; |
851 | inline GlobalBackingStoreRegistryImpl* impl() { |
852 | return global_registry_impl_.Pointer(); |
853 | } |
854 | } // namespace |
855 | |
856 | void GlobalBackingStoreRegistry::Register( |
857 | std::shared_ptr<BackingStore> backing_store) { |
858 | if (!backing_store || !backing_store->buffer_start()) return; |
859 | // Only wasm memory backing stores need to be registered globally. |
860 | CHECK(backing_store->is_wasm_memory())do { if ((__builtin_expect(!!(!(backing_store->is_wasm_memory ())), 0))) { V8_Fatal("Check failed: %s.", "backing_store->is_wasm_memory()" ); } } while (false); |
861 | |
862 | base::MutexGuard scope_lock(&impl()->mutex_); |
863 | if (backing_store->globally_registered_) return; |
864 | TRACE_BS("BS:reg bs=%p mem=%p (length=%zu, capacity=%zu)\n", |
865 | backing_store.get(), backing_store->buffer_start(), |
866 | backing_store->byte_length(), backing_store->byte_capacity()); |
867 | std::weak_ptr<BackingStore> weak = backing_store; |
868 | auto result = impl()->map_.insert({backing_store->buffer_start(), weak}); |
869 | CHECK(result.second)do { if ((__builtin_expect(!!(!(result.second)), 0))) { V8_Fatal ("Check failed: %s.", "result.second"); } } while (false); |
870 | backing_store->globally_registered_ = true; |
871 | } |
872 | |
873 | void GlobalBackingStoreRegistry::Unregister(BackingStore* backing_store) { |
874 | if (!backing_store->globally_registered_) return; |
875 | |
876 | CHECK(backing_store->is_wasm_memory())do { if ((__builtin_expect(!!(!(backing_store->is_wasm_memory ())), 0))) { V8_Fatal("Check failed: %s.", "backing_store->is_wasm_memory()" ); } } while (false); |
877 | |
878 | DCHECK_NOT_NULL(backing_store->buffer_start())((void) 0); |
879 | |
880 | base::MutexGuard scope_lock(&impl()->mutex_); |
881 | const auto& result = impl()->map_.find(backing_store->buffer_start()); |
882 | if (result != impl()->map_.end()) { |
883 | DCHECK(!result->second.lock())((void) 0); |
884 | impl()->map_.erase(result); |
885 | } |
886 | backing_store->globally_registered_ = false; |
887 | } |
888 | |
889 | void GlobalBackingStoreRegistry::Purge(Isolate* isolate) { |
890 | // We need to keep a reference to all backing stores that are inspected |
891 | // in the purging loop below. Otherwise, we might get a deadlock |
892 | // if the temporary backing store reference created in the loop is |
893 | // the last reference. In that case the destructor of the backing store |
894 | // may try to take the &impl()->mutex_ in order to unregister itself. |
895 | std::vector<std::shared_ptr<BackingStore>> prevent_destruction_under_lock; |
896 | base::MutexGuard scope_lock(&impl()->mutex_); |
897 | // Purge all entries in the map that refer to the given isolate. |
898 | for (auto& entry : impl()->map_) { |
899 | auto backing_store = entry.second.lock(); |
900 | prevent_destruction_under_lock.emplace_back(backing_store); |
901 | if (!backing_store) continue; // skip entries where weak ptr is null |
902 | CHECK(backing_store->is_wasm_memory())do { if ((__builtin_expect(!!(!(backing_store->is_wasm_memory ())), 0))) { V8_Fatal("Check failed: %s.", "backing_store->is_wasm_memory()" ); } } while (false); |
903 | if (!backing_store->is_shared()) continue; // skip non-shared memory |
904 | SharedWasmMemoryData* shared_data = |
905 | backing_store->get_shared_wasm_memory_data(); |
906 | // Remove this isolate from the isolates list. |
907 | auto& isolates = shared_data->isolates_; |
908 | for (size_t i = 0; i < isolates.size(); i++) { |
909 | if (isolates[i] == isolate) isolates[i] = nullptr; |
910 | } |
911 | } |
912 | } |
913 | |
914 | #if V8_ENABLE_WEBASSEMBLY1 |
915 | void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject( |
916 | Isolate* isolate, BackingStore* backing_store, |
917 | Handle<WasmMemoryObject> memory_object) { |
918 | // Add to the weak array list of shared memory objects in the isolate. |
919 | isolate->AddSharedWasmMemory(memory_object); |
920 | |
921 | // Add the isolate to the list of isolates sharing this backing store. |
922 | base::MutexGuard scope_lock(&impl()->mutex_); |
923 | SharedWasmMemoryData* shared_data = |
924 | backing_store->get_shared_wasm_memory_data(); |
925 | auto& isolates = shared_data->isolates_; |
926 | int free_entry = -1; |
927 | for (size_t i = 0; i < isolates.size(); i++) { |
928 | if (isolates[i] == isolate) return; |
929 | if (isolates[i] == nullptr) free_entry = static_cast<int>(i); |
930 | } |
931 | if (free_entry >= 0) |
932 | isolates[free_entry] = isolate; |
933 | else |
934 | isolates.push_back(isolate); |
935 | } |
936 | |
937 | void GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow( |
938 | Isolate* isolate, std::shared_ptr<BackingStore> backing_store) { |
939 | { |
940 | // The global lock protects the list of isolates per backing store. |
941 | base::MutexGuard scope_lock(&impl()->mutex_); |
942 | SharedWasmMemoryData* shared_data = |
943 | backing_store->get_shared_wasm_memory_data(); |
944 | for (Isolate* other : shared_data->isolates_) { |
945 | if (other && other != isolate) { |
946 | other->stack_guard()->RequestGrowSharedMemory(); |
947 | } |
948 | } |
949 | } |
950 | // Update memory objects in this isolate. |
951 | UpdateSharedWasmMemoryObjects(isolate); |
952 | } |
953 | |
954 | void GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects( |
955 | Isolate* isolate) { |
956 | HandleScope scope(isolate); |
957 | Handle<WeakArrayList> shared_wasm_memories = |
958 | isolate->factory()->shared_wasm_memories(); |
959 | |
960 | for (int i = 0; i < shared_wasm_memories->length(); i++) { |
961 | HeapObject obj; |
962 | if (!shared_wasm_memories->Get(i).GetHeapObject(&obj)) continue; |
963 | |
964 | Handle<WasmMemoryObject> memory_object(WasmMemoryObject::cast(obj), |
965 | isolate); |
966 | Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate); |
967 | std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore(); |
968 | |
969 | Handle<JSArrayBuffer> new_buffer = |
970 | isolate->factory()->NewJSSharedArrayBuffer(std::move(backing_store)); |
971 | memory_object->update_instances(isolate, new_buffer); |
972 | } |
973 | } |
974 | #endif // V8_ENABLE_WEBASSEMBLY |
975 | |
976 | } // namespace internal |
977 | } // namespace v8 |
978 | |
979 | #undef TRACE_BS |