From 8cbe268e4ab12719894a4b2ab14a7bd4b0af06f9 Mon Sep 17 00:00:00 2001 From: wangchen Date: Wed, 27 Sep 2023 20:08:42 +0800 Subject: [PATCH 1/2] =?UTF-8?q?feat:=20=E5=86=85=E5=AD=98=E5=88=86?= =?UTF-8?q?=E9=85=8D=E5=99=A8=E9=9A=8F=E6=9C=BA=E5=8C=96=20Close=20#I84SCL?= =?UTF-8?q?=20Signed-off-by:=20wangchen=20?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../partition_allocator/partition_bucket.cc | 33 +++++++++++++++++++ .../partition_allocator/partition_page.cc | 26 +++++++++++++-- 2 files changed, 57 insertions(+), 2 deletions(-) diff --git a/base/allocator/partition_allocator/partition_bucket.cc b/base/allocator/partition_allocator/partition_bucket.cc index 776b7d04d4..051e5450b7 100644 --- a/base/allocator/partition_allocator/partition_bucket.cc +++ b/base/allocator/partition_allocator/partition_bucket.cc @@ -790,6 +790,36 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne( // Add all slots that fit within so far committed pages to the free list. PartitionFreelistEntry* prev_entry = nullptr; + +#if defined(OHOS_ENABLE_RANDOM) + uintptr_t random_slot; + int num = (commit_end - next_slot) / size; + int i; + + size_t random[num]; + size_t tmp; + uint32_t rand; + size_t free_list_entries_added = 0; + + for (i = 0; i < num; i++) { + random[i] = i; + } + + for (i = num - 1; i > 0; i--) { + rand = RandomValue() % i; + tmp = random[i]; + random[i] = random[rand]; + random[rand] = tmp; + } + + for (i = 0; i < num; i++) { + random_slot = next_slot + size * random[i]; + if (LIKELY(size <= kMaxMemoryTaggingSize)) { + random_slot = memory::TagMemoryRangeRandomly(random_slot, size); + } + auto* entry = + new (reinterpret_cast(random_slot)) PartitionFreelistEntry(); +#else uintptr_t next_slot_end = next_slot + size; size_t free_list_entries_added = 0; while (next_slot_end <= commit_end) { @@ -798,6 +828,7 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne( } auto* entry = new (reinterpret_cast(next_slot)) PartitionFreelistEntry(); +#endif if (!slot_span->get_freelist_head()) { PA_DCHECK(!prev_entry); PA_DCHECK(!free_list_entries_added); @@ -806,8 +837,10 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne( PA_DCHECK(free_list_entries_added); prev_entry->SetNext(entry); } +#if !defined(OHOS_ENABLE_RANDOM) next_slot = next_slot_end; next_slot_end = next_slot + size; +#endif prev_entry = entry; #if DCHECK_IS_ON() free_list_entries_added++; diff --git a/base/allocator/partition_allocator/partition_page.cc b/base/allocator/partition_allocator/partition_page.cc index 3090c77dd1..eb2d3d8898 100644 --- a/base/allocator/partition_allocator/partition_page.cc +++ b/base/allocator/partition_allocator/partition_page.cc @@ -279,12 +279,34 @@ void SlotSpanMetadata::SortFreelist() { if (num_free_slots > 1) { PartitionFreelistEntry* back = nullptr; PartitionFreelistEntry* head = nullptr; + size_t i; +#if defined(OHOS_ENABLE_RANDOM) + size_t tmp; + size_t random[num_provisioned_slots]; + uint32_t rand; + + for (i = 0; i < num_provisioned_slots; i++) { + random[i] = i; + } + + for (i = num_provisioned_slots - 1; i > 0; i--) { + rand = RandomValue() % i; + tmp = random[i]; + random[i] = random[rand]; + random[rand] = tmp; + } +#endif for (size_t slot_number = 0; slot_number < num_provisioned_slots; slot_number++) { - if (free_slots[slot_number]) { +#if defined(OHOS_ENABLE_RANDOM) + i = random[slot_number]; +#else + i = slot_number; +#endif + if (free_slots[i]) { uintptr_t slot_address = - memory::RemaskPtr(slot_span_start + (slot_size * slot_number)); + memory::RemaskPtr(slot_span_start + (slot_size * i)); auto* entry = new (reinterpret_cast(slot_address)) PartitionFreelistEntry(); -- Gitee From 055fdab8081a46ee84ddf0a40bbc802887c0addb Mon Sep 17 00:00:00 2001 From: wangchen Date: Wed, 18 Oct 2023 10:08:18 +0000 Subject: [PATCH 2/2] =?UTF-8?q?=E5=88=A0=E9=99=A4=E6=96=87=E4=BB=B6=20base?= =?UTF-8?q?/allocator/partition=5Fallocator/partition=5Fpage.cc?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../partition_allocator/partition_page.cc | 388 ------------------ 1 file changed, 388 deletions(-) delete mode 100644 base/allocator/partition_allocator/partition_page.cc diff --git a/base/allocator/partition_allocator/partition_page.cc b/base/allocator/partition_allocator/partition_page.cc deleted file mode 100644 index eb2d3d8898..0000000000 --- a/base/allocator/partition_allocator/partition_page.cc +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright (c) 2018 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "base/allocator/partition_allocator/partition_page.h" - -#include -#include - -#include "base/allocator/buildflags.h" -#include "base/allocator/partition_allocator/address_pool_manager.h" -#include "base/allocator/partition_allocator/page_allocator.h" -#include "base/allocator/partition_allocator/page_allocator_constants.h" -#include "base/allocator/partition_allocator/partition_address_space.h" -#include "base/allocator/partition_allocator/partition_alloc_check.h" -#include "base/allocator/partition_allocator/partition_alloc_config.h" -#include "base/allocator/partition_allocator/partition_alloc_constants.h" -#include "base/allocator/partition_allocator/partition_alloc_forward.h" -#include "base/allocator/partition_allocator/partition_direct_map_extent.h" -#include "base/allocator/partition_allocator/partition_root.h" -#include "base/allocator/partition_allocator/reservation_offset_table.h" -#include "base/bits.h" -#include "base/dcheck_is_on.h" -#include "base/memory/tagging.h" - -namespace base { -namespace internal { - -namespace { - -void UnmapNow(uintptr_t reservation_start, - size_t reservation_size, - pool_handle pool); - -template -ALWAYS_INLINE void PartitionDirectUnmap( - SlotSpanMetadata* slot_span) { - using ::partition_alloc::internal::ScopedUnlockGuard; - - auto* root = PartitionRoot::FromSlotSpan(slot_span); - root->lock_.AssertAcquired(); - auto* extent = PartitionDirectMapExtent::FromSlotSpan(slot_span); - - // Maintain the doubly-linked list of all direct mappings. - if (extent->prev_extent) { - PA_DCHECK(extent->prev_extent->next_extent == extent); - extent->prev_extent->next_extent = extent->next_extent; - } else { - root->direct_map_list = extent->next_extent; - } - if (extent->next_extent) { - PA_DCHECK(extent->next_extent->prev_extent == extent); - extent->next_extent->prev_extent = extent->prev_extent; - } - - // The actual decommit is deferred below after releasing the lock. - root->DecreaseCommittedPages(slot_span->bucket->slot_size); - - size_t reservation_size = extent->reservation_size; - PA_DCHECK(!(reservation_size & DirectMapAllocationGranularityOffsetMask())); - PA_DCHECK(root->total_size_of_direct_mapped_pages >= reservation_size); - root->total_size_of_direct_mapped_pages -= reservation_size; - - uintptr_t reservation_start = - SlotSpanMetadata::ToSlotSpanStart(slot_span); - // The mapping may start at an unspecified location within a super page, but - // we always reserve memory aligned to super page size. - reservation_start = bits::AlignDown(reservation_start, kSuperPageSize); - - // All the metadata have been updated above, in particular the mapping has - // been unlinked. We can safely release the memory outside the lock, which is - // important as decommitting memory can be expensive. - // - // This can create a fake "address space exhaustion" OOM, in the case where - // e.g. a large allocation is freed on a thread, and another large one is made - // from another *before* UnmapNow() has finished running. In this case the - // second one may not find enough space in the GigaCage, and fail. This is - // expected to be very rare though, and likely preferable to holding the lock - // while releasing the address space. - ScopedUnlockGuard unlock{root->lock_}; - ScopedSyscallTimer timer{root}; -#if BUILDFLAG(IS_OHOS) || BUILDFLAG(IS_LINUX) - ALLOW_UNUSED_LOCAL(timer); // Supress compiler warning: unused-variable -#endif - UnmapNow(reservation_start, reservation_size, root->ChoosePool()); -} - -} // namespace - -template -ALWAYS_INLINE void SlotSpanMetadata::RegisterEmpty() { - PA_DCHECK(is_empty()); - auto* root = PartitionRoot::FromSlotSpan(this); - root->lock_.AssertAcquired(); - - root->empty_slot_spans_dirty_bytes += - base::bits::AlignUp(GetProvisionedSize(), SystemPageSize()); - - ToSuperPageExtent()->DecrementNumberOfNonemptySlotSpans(); - - // If the slot span is already registered as empty, give it another life. - if (in_empty_cache_) { - PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans); - PA_DCHECK(root->global_empty_slot_span_ring[empty_cache_index_] == this); - root->global_empty_slot_span_ring[empty_cache_index_] = nullptr; - } - - int16_t current_index = root->global_empty_slot_span_ring_index; - SlotSpanMetadata* slot_span_to_decommit = - root->global_empty_slot_span_ring[current_index]; - // The slot span might well have been re-activated, filled up, etc. before we - // get around to looking at it here. - if (slot_span_to_decommit) - slot_span_to_decommit->DecommitIfPossible(root); - - // We put the empty slot span on our global list of "slot spans that were once - // empty", thus providing it a bit of breathing room to get re-used before we - // really free it. This reduces the number of system calls. Otherwise any - // free() from a single-slot slot span would lead to a syscall, for instance. - root->global_empty_slot_span_ring[current_index] = this; - empty_cache_index_ = current_index; - in_empty_cache_ = 1; - ++current_index; - if (current_index == root->global_empty_slot_span_ring_size) - current_index = 0; - root->global_empty_slot_span_ring_index = current_index; - - // Avoid wasting too much memory on empty slot spans. Note that we only divide - // by powers of two, since division can be very slow, and this path is taken - // for every single-slot slot span deallocation. - // - // Empty slot spans are also all decommitted with MemoryReclaimer, but it may - // never run, be delayed arbitrarily, and/or miss large memory spikes. - size_t max_empty_dirty_bytes = - root->total_size_of_committed_pages.load(std::memory_order_relaxed) >> - root->max_empty_slot_spans_dirty_bytes_shift; - if (root->empty_slot_spans_dirty_bytes > max_empty_dirty_bytes) { - root->ShrinkEmptySlotSpansRing(std::min( - root->empty_slot_spans_dirty_bytes / 2, max_empty_dirty_bytes)); - } -} - -// static -template -SlotSpanMetadata - SlotSpanMetadata::sentinel_slot_span_; - -// static -template -SlotSpanMetadata* -SlotSpanMetadata::get_sentinel_slot_span() { - return &sentinel_slot_span_; -} - -template -SlotSpanMetadata::SlotSpanMetadata( - PartitionBucket* bucket) - : bucket(bucket), can_store_raw_size_(bucket->CanStoreRawSize()) {} - -template -void SlotSpanMetadata::FreeSlowPath(size_t number_of_freed) { -#if DCHECK_IS_ON() - auto* root = PartitionRoot::FromSlotSpan(this); - root->lock_.AssertAcquired(); -#endif - PA_DCHECK(this != get_sentinel_slot_span()); - - // The caller has already modified |num_allocated_slots|. It is a - // responsibility of this function to react to it, and update the state. We - // can get here only if the slot span is marked full and/or is now empty. Both - // are possible at the same time, which can happen when the caller lowered - // |num_allocated_slots| from "all" to 0 (common for single-slot spans). First - // execute the "is marked full" path, as it sets up |active_slot_spans_head| - // in a way later needed for the "is empty" path. - if (marked_full) { - // Direct map slot spans aren't added to any lists, hence never marked full. - PA_DCHECK(!bucket->is_direct_mapped()); - // Double check that the slot span was full. - PA_DCHECK(num_allocated_slots == - bucket->get_slots_per_span() - number_of_freed); - marked_full = 0; - // Fully used slot span became partially used. It must be put back on the - // non-full list. Also make it the current slot span to increase the - // chances of it being filled up again. The old current slot span will be - // the next slot span. - PA_DCHECK(!next_slot_span); - if (LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span())) - next_slot_span = bucket->active_slot_spans_head; - bucket->active_slot_spans_head = this; - --bucket->num_full_slot_spans; - } - - if (LIKELY(num_allocated_slots == 0)) { - // Slot span became fully unused. - if (UNLIKELY(bucket->is_direct_mapped())) { - PartitionDirectUnmap(this); - return; - } -#if DCHECK_IS_ON() - freelist_head->CheckFreeList(bucket->slot_size); -#endif - // If it's the current active slot span, change it. We bounce the slot span - // to the empty list as a force towards defragmentation. - if (LIKELY(this == bucket->active_slot_spans_head)) - bucket->SetNewActiveSlotSpan(); - PA_DCHECK(bucket->active_slot_spans_head != this); - - if (CanStoreRawSize()) - SetRawSize(0); - - RegisterEmpty(); - } -} - -template -void SlotSpanMetadata::Decommit(PartitionRoot* root) { - root->lock_.AssertAcquired(); - PA_DCHECK(is_empty()); - PA_DCHECK(!bucket->is_direct_mapped()); - uintptr_t slot_span_start = SlotSpanMetadata::ToSlotSpanStart(this); - // If lazy commit is enabled, only provisioned slots are committed. - size_t dirty_size = bits::AlignUp(GetProvisionedSize(), SystemPageSize()); - size_t size_to_decommit = - kUseLazyCommit ? dirty_size : bucket->get_bytes_per_span(); - - PA_DCHECK(root->empty_slot_spans_dirty_bytes >= dirty_size); - root->empty_slot_spans_dirty_bytes -= dirty_size; - - // Not decommitted slot span must've had at least 1 allocation. - PA_DCHECK(size_to_decommit > 0); - root->DecommitSystemPagesForData(slot_span_start, size_to_decommit, - PageKeepPermissionsIfPossible); - - // We actually leave the decommitted slot span in the active list. We'll sweep - // it on to the decommitted list when we next walk the active list. - // Pulling this trick enables us to use a singly-linked list for all - // cases, which is critical in keeping the slot span metadata structure down - // to 32 bytes in size. - SetFreelistHead(nullptr); - num_unprovisioned_slots = 0; - PA_DCHECK(is_decommitted()); - PA_DCHECK(bucket); -} - -template -void SlotSpanMetadata::DecommitIfPossible( - PartitionRoot* root) { - root->lock_.AssertAcquired(); - PA_DCHECK(in_empty_cache_); - PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans); - PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index_]); - in_empty_cache_ = 0; - if (is_empty()) - Decommit(root); -} - -template -void SlotSpanMetadata::SortFreelist() { - std::bitset free_slots; - uintptr_t slot_span_start = ToSlotSpanStart(this); - - size_t num_provisioned_slots = - bucket->get_slots_per_span() - num_unprovisioned_slots; - PA_CHECK(num_provisioned_slots <= kMaxSlotsPerSlotSpan); - - size_t num_free_slots = 0; - size_t slot_size = bucket->slot_size; - for (PartitionFreelistEntry* head = freelist_head; head; - head = head->GetNext(slot_size)) { - ++num_free_slots; - size_t offset_in_slot_span = - memory::UnmaskPtr(reinterpret_cast(head)) - slot_span_start; - size_t slot_number = bucket->GetSlotNumber(offset_in_slot_span); - PA_DCHECK(slot_number < num_provisioned_slots); - free_slots[slot_number] = true; - } - - // Empty or single-element list is always sorted. - if (num_free_slots > 1) { - PartitionFreelistEntry* back = nullptr; - PartitionFreelistEntry* head = nullptr; - size_t i; -#if defined(OHOS_ENABLE_RANDOM) - size_t tmp; - size_t random[num_provisioned_slots]; - uint32_t rand; - - for (i = 0; i < num_provisioned_slots; i++) { - random[i] = i; - } - - for (i = num_provisioned_slots - 1; i > 0; i--) { - rand = RandomValue() % i; - tmp = random[i]; - random[i] = random[rand]; - random[rand] = tmp; - } -#endif - - for (size_t slot_number = 0; slot_number < num_provisioned_slots; - slot_number++) { -#if defined(OHOS_ENABLE_RANDOM) - i = random[slot_number]; -#else - i = slot_number; -#endif - if (free_slots[i]) { - uintptr_t slot_address = - memory::RemaskPtr(slot_span_start + (slot_size * i)); - auto* entry = new (reinterpret_cast(slot_address)) - PartitionFreelistEntry(); - - if (!head) - head = entry; - else - back->SetNext(entry); - - back = entry; - } - } - SetFreelistHead(head); - } - - freelist_is_sorted_ = true; -} - -namespace { -void UnmapNow(uintptr_t reservation_start, - size_t reservation_size, - pool_handle pool) { - PA_DCHECK(reservation_start && reservation_size > 0); -#if DCHECK_IS_ON() - // When USE_BACKUP_REF_PTR is off, BRP pool isn't used. -#if BUILDFLAG(USE_BACKUP_REF_PTR) - if (pool == GetBRPPool()) { - // In 32-bit mode, the beginning of a reservation may be excluded from the - // BRP pool, so shift the pointer. Other pools don't have this logic. - PA_DCHECK(IsManagedByPartitionAllocBRPPool( -#if defined(PA_HAS_64_BITS_POINTERS) - reservation_start -#else - reservation_start + - AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap * - AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap -#endif - )); - } else -#endif // BUILDFLAG(USE_BACKUP_REF_PTR) - { - PA_DCHECK(pool == GetRegularPool() || - (IsConfigurablePoolAvailable() && pool == GetConfigurablePool())); - // Non-BRP pools don't need adjustment that BRP needs in 32-bit mode. - PA_DCHECK(IsManagedByPartitionAllocRegularPool(reservation_start) || - IsManagedByPartitionAllocConfigurablePool(reservation_start)); - } -#endif // DCHECK_IS_ON() - - PA_DCHECK((reservation_start & kSuperPageOffsetMask) == 0); - uintptr_t reservation_end = reservation_start + reservation_size; - auto* offset_ptr = ReservationOffsetPointer(reservation_start); - // Reset the offset table entries for the given memory before unreserving - // it. Since the memory is not unreserved and not available for other - // threads, the table entries for the memory are not modified by other - // threads either. So we can update the table entries without race - // condition. - uint16_t i = 0; - for (uintptr_t address = reservation_start; address < reservation_end; - address += kSuperPageSize) { - PA_DCHECK(offset_ptr < GetReservationOffsetTableEnd(address)); - PA_DCHECK(*offset_ptr == i++); - *offset_ptr++ = kOffsetTagNotAllocated; - } - -#if !defined(PA_HAS_64_BITS_POINTERS) - AddressPoolManager::GetInstance()->MarkUnused(pool, reservation_start, - reservation_size); -#endif - - // After resetting the table entries, unreserve and decommit the memory. - AddressPoolManager::GetInstance()->UnreserveAndDecommit( - pool, reservation_start, reservation_size); -} -} // namespace - -template struct SlotSpanMetadata; - -} // namespace internal -} // namespace base -- Gitee