diff --git a/base/allocator/partition_allocator/partition_alloc_unittest.cc b/base/allocator/partition_allocator/partition_alloc_unittest.cc index 258d78eb105d44a3676cbfc686f38dee4fc604df..edf638ccf66e7dc72dca2bfa0ce093d12fea2bdb 100644 --- a/base/allocator/partition_allocator/partition_alloc_unittest.cc +++ b/base/allocator/partition_allocator/partition_alloc_unittest.cc @@ -2508,6 +2508,49 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) { allocator.root()->Free(ptr7); } +#define TEST_STEP_SIZE 20 +#define TEST_TIME 5 +#if defined(OHOS_ENABLE_FREELIST_HARDENED) +TEST_F(PartitionAllocTest, FreelistHardenedTest) { + for (int i = 0; i < TEST_TIME; i++) { + size_t size = TEST_STEP_SIZE * i; + void* ptr = allocator.root()->Alloc(size, type_name); + + SlotSpanMetadata* slot_span = + SlotSpanMetadata::FromSlotStart( + allocator.root()->AdjustPointerForExtrasSubtract(ptr)); + + PartitionFreelistEntry *entry = slot_span->get_freelist_head(); + PartitionFreelistEntry *next = entry->GetNext(size, slot_span->bucket->random_cookie); + + EXPECT_EQ((uintptr_t)entry & 0xfffff000, (uintptr_t)next & 0xfffff000) + << "size: " << size << "e: " << (uintptr_t)entry << " n: " << next; + + allocator.root()->Free(ptr); + } +} +#endif + +#if defined(OHOS_ENABLE_RANDOM) +TEST_F(PartitionAllocTest, RandomTest) { + for (int i = 0; i < TEST_TIME; i++) { + size_t size = TEST_STEP_SIZE * i; + void* ptr1 = allocator.root()->Alloc(size, type_name); + void* ptr2 = allocator.root()->Alloc(size, type_name); + void* ptr3 = allocator.root()->Alloc(size, type_name); + void* ptr4 = allocator.root()->Alloc(size, type_name); + + EXPECT_NE((uintptr_t)ptr2 - (uintptr_t)ptr1, (uintptr_t)ptr4 - (uintptr_t)ptr3) + << "size: " << size << "ptr1:" << ptr1 << "ptr2:" << ptr2 << "ptr3:" << ptr3 << "ptr4:" << ptr4; + + allocator.root()->Free(ptr1); + allocator.root()->Free(ptr2); + allocator.root()->Free(ptr3); + allocator.root()->Free(ptr4); + } +} +#endif + // Tests the API to purge discardable memory. TEST_F(PartitionAllocTest, PurgeDiscardableSecondPage) { // Free the second of two 4096 byte allocations and then purge. diff --git a/base/allocator/partition_allocator/partition_bucket.cc b/base/allocator/partition_allocator/partition_bucket.cc index 776b7d04d437e77256e6469ff204110301ab906e..e2cc7a3a6f5ae5b6ccc2e4ca39dd85e0c4c28f09 100644 --- a/base/allocator/partition_allocator/partition_bucket.cc +++ b/base/allocator/partition_allocator/partition_bucket.cc @@ -485,6 +485,13 @@ uint8_t PartitionBucket::ComputeSystemPagesPerSlotSpan( return static_cast(best_pages); } +#if defined(OHOS_ENABLE_FREELIST_HARDENED) +uint64_t GenerateRandomCookie() +{ + return RandomValue(); +} +#endif + template void PartitionBucket::Init(uint32_t new_slot_size) { slot_size = new_slot_size; @@ -495,6 +502,9 @@ void PartitionBucket::Init(uint32_t new_slot_size) { decommitted_slot_spans_head = nullptr; num_full_slot_spans = 0; num_system_pages_per_slot_span = ComputeSystemPagesPerSlotSpan(slot_size); +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + random_cookie = GenerateRandomCookie(); +#endif } template @@ -790,6 +800,36 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne( // Add all slots that fit within so far committed pages to the free list. PartitionFreelistEntry* prev_entry = nullptr; + +#if defined(OHOS_ENABLE_RANDOM) + uintptr_t random_slot; + int num = (commit_end - next_slot) / size; + int i; + + size_t random[num]; + size_t tmp; + uint32_t rand; + size_t free_list_entries_added = 0; + + for (i = 0; i < num; i++) { + random[i] = i; + } + + for (i = num - 1; i > 0; i--) { + rand = RandomValue() % i; + tmp = random[i]; + random[i] = random[rand]; + random[rand] = tmp; + } + + for (i = 0; i < num; i++) { + random_slot = next_slot + size * random[i]; + if (LIKELY(size <= kMaxMemoryTaggingSize)) { + random_slot = memory::TagMemoryRangeRandomly(random_slot, size); + } + auto* entry = + new (reinterpret_cast(random_slot)) PartitionFreelistEntry(); +#else uintptr_t next_slot_end = next_slot + size; size_t free_list_entries_added = 0; while (next_slot_end <= commit_end) { @@ -798,16 +838,23 @@ PartitionBucket::ProvisionMoreSlotsAndAllocOne( } auto* entry = new (reinterpret_cast(next_slot)) PartitionFreelistEntry(); +#endif if (!slot_span->get_freelist_head()) { PA_DCHECK(!prev_entry); PA_DCHECK(!free_list_entries_added); slot_span->SetFreelistHead(entry); } else { PA_DCHECK(free_list_entries_added); +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + prev_entry->SetNext(entry, random_cookie); +#else prev_entry->SetNext(entry); +#endif } +#if !defined(OHOS_ENABLE_RANDOM) next_slot = next_slot_end; next_slot_end = next_slot + size; +#endif prev_entry = entry; #if DCHECK_IS_ON() free_list_entries_added++; diff --git a/base/allocator/partition_allocator/partition_bucket.h b/base/allocator/partition_allocator/partition_bucket.h index 408f299b94156cb1b7c149b04987f11974d9ea29..e8734cee25818ff0861efd1d14ad443ee6611599 100644 --- a/base/allocator/partition_allocator/partition_bucket.h +++ b/base/allocator/partition_allocator/partition_bucket.h @@ -35,6 +35,9 @@ struct PartitionBucket { uint32_t num_system_pages_per_slot_span : kPartitionNumSystemPagesPerSlotSpanBits; uint32_t num_full_slot_spans : 24; +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + uintptr_t random_cookie; +#endif // `slot_size_reciprocal` is used to improve the performance of // `GetSlotOffset`. It is computed as `(1 / size) * (2 ** M)` where M is diff --git a/base/allocator/partition_allocator/partition_freelist_entry.h b/base/allocator/partition_allocator/partition_freelist_entry.h index d86b12d1d8d68809a758b673db5d198efeb65f76..beb37f92dab13f88fb1e8f198c9b45cdda7e1e85 100644 --- a/base/allocator/partition_allocator/partition_freelist_entry.h +++ b/base/allocator/partition_allocator/partition_freelist_entry.h @@ -62,17 +62,29 @@ static_assert( // the rationale and mechanism, respectively. class PartitionFreelistEntry { public: +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + PartitionFreelistEntry() { SetNext(nullptr, 0); } +#else PartitionFreelistEntry() { SetNext(nullptr); } +#endif ~PartitionFreelistEntry() = delete; // Creates a new entry, with |next| following it. static ALWAYS_INLINE PartitionFreelistEntry* InitForThreadCache( uintptr_t slot_start, +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + PartitionFreelistEntry* next, uintptr_t random_cookie) { +#else PartitionFreelistEntry* next) { +#endif auto* entry = reinterpret_cast(slot_start); // ThreadCache freelists can point to entries across superpage boundaries, // no check contrary to |SetNext()|. +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + entry->SetNextInternal(next, random_cookie); +#else entry->SetNextInternal(next); +#endif return entry; } @@ -82,34 +94,62 @@ class PartitionFreelistEntry { void* operator new(size_t, void* buffer) { return buffer; } ALWAYS_INLINE static EncodedPartitionFreelistEntry* Encode( +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + PartitionFreelistEntry* ptr, uintptr_t random_cookie) { + return reinterpret_cast(Transform(ptr, random_cookie)); +#else PartitionFreelistEntry* ptr) { return reinterpret_cast(Transform(ptr)); +#endif } // Puts |extra| on the stack before crashing in case of memory // corruption. Meant to be used to report the failed allocation size. ALWAYS_INLINE PartitionFreelistEntry* GetNextForThreadCache( +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + size_t extra, uintptr_t random_cookie) const; + ALWAYS_INLINE PartitionFreelistEntry* GetNext(size_t extra, uintptr_t random_cookie) const; + + NOINLINE void CheckFreeList(size_t extra, uintptr_t random_cookie) const { +#else size_t extra) const; ALWAYS_INLINE PartitionFreelistEntry* GetNext(size_t extra) const; NOINLINE void CheckFreeList(size_t extra) const { +#endif #if defined(PA_HAS_FREELIST_HARDENING) +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + for (auto* entry = this; entry; entry = entry->GetNext(extra, random_cookie)) { +#else for (auto* entry = this; entry; entry = entry->GetNext(extra)) { +#endif // |GetNext()| checks freelist integrity. } #endif } +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + NOINLINE void CheckFreeListForThreadCache(size_t extra, uintptr_t random_cookie) const { +#else NOINLINE void CheckFreeListForThreadCache(size_t extra) const { +#endif #if defined(PA_HAS_FREELIST_HARDENING) for (auto* entry = this; entry; +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + entry = entry->GetNextForThreadCache(extra, random_cookie)) { +#else entry = entry->GetNextForThreadCache(extra)) { +#endif // |GetNextForThreadCache()| checks freelist integrity. } #endif } +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + ALWAYS_INLINE void SetNext(PartitionFreelistEntry* ptr, uintptr_t random_cookie) { +#else ALWAYS_INLINE void SetNext(PartitionFreelistEntry* ptr) { +#endif // SetNext() is either called on the freelist head, when provisioning new // slots, or when GetNext() has been called before, no need to pass the // size. @@ -123,7 +163,11 @@ class PartitionFreelistEntry { FreelistCorruptionDetected(0); } #endif // DCHECK_IS_ON() +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + SetNextInternal(ptr, random_cookie); +#else SetNextInternal(ptr); +#endif } // Zeroes out |this| before returning it. @@ -137,7 +181,11 @@ class PartitionFreelistEntry { private: friend struct EncodedPartitionFreelistEntry; +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + ALWAYS_INLINE static void* Transform(void* ptr, uintptr_t random_cookie) { +#else ALWAYS_INLINE static void* Transform(void* ptr) { +#endif // We use bswap on little endian as a fast mask for two reasons: // 1) If an object is freed and its vtable used where the attacker doesn't // get the chance to run allocations between the free and use, the vtable @@ -146,16 +194,30 @@ class PartitionFreelistEntry { // corrupt a freelist pointer, partial pointer overwrite attacks are // thwarted. // For big endian, similar guarantees are arrived at with a negation. + +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + if (ptr == nullptr) { + return nullptr; + } + + return reinterpret_cast(reinterpret_cast(ptr) ^ random_cookie); +#else #if defined(ARCH_CPU_BIG_ENDIAN) uintptr_t masked = ~reinterpret_cast(ptr); #else uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast(ptr)); #endif return reinterpret_cast(masked); +#endif } +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + ALWAYS_INLINE void SetNextInternal(PartitionFreelistEntry* ptr, uintptr_t random_cookie) { + next_ = Encode(ptr, random_cookie); +#else ALWAYS_INLINE void SetNextInternal(PartitionFreelistEntry* ptr) { next_ = Encode(ptr); +#endif #if defined(PA_HAS_FREELIST_HARDENING) inverted_next_ = ~reinterpret_cast(next_); #endif @@ -163,7 +225,11 @@ class PartitionFreelistEntry { ALWAYS_INLINE PartitionFreelistEntry* GetNextInternal( size_t extra, +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + bool for_thread_cache, uintptr_t random_cookie) const; +#else bool for_thread_cache) const; +#endif #if defined(PA_HAS_FREELIST_HARDENING) static ALWAYS_INLINE bool IsSane(const PartitionFreelistEntry* here, const PartitionFreelistEntry* next, @@ -216,9 +282,15 @@ struct EncodedPartitionFreelistEntry { ~EncodedPartitionFreelistEntry() = delete; ALWAYS_INLINE static PartitionFreelistEntry* Decode( +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + EncodedPartitionFreelistEntry* ptr, uintptr_t random_cookie) { + return reinterpret_cast( + PartitionFreelistEntry::Transform(ptr, random_cookie)); +#else EncodedPartitionFreelistEntry* ptr) { return reinterpret_cast( PartitionFreelistEntry::Transform(ptr)); +#endif } }; @@ -228,8 +300,13 @@ static_assert(sizeof(PartitionFreelistEntry) == ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNextInternal( size_t extra, +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + bool for_thread_cache, uintptr_t random_cookie) const { + auto* ret = EncodedPartitionFreelistEntry::Decode(next_, random_cookie); +#else bool for_thread_cache) const { auto* ret = EncodedPartitionFreelistEntry::Decode(next_); +#endif // GetNext() can be called on decommitted memory, in which case |next| is // nullptr, and none of the checks apply. Don't prefetch nullptr either. if (!ret) @@ -253,6 +330,17 @@ ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNextInternal( return ret; } +#if defined(OHOS_ENABLE_FREELIST_HARDENED) +ALWAYS_INLINE PartitionFreelistEntry* +PartitionFreelistEntry::GetNextForThreadCache(size_t extra, uintptr_t random_cookie) const { + return GetNextInternal(extra, true, random_cookie); +} + +ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext( + size_t extra, uintptr_t random_cookie) const { + return GetNextInternal(extra, false, random_cookie); +} +#else ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNextForThreadCache(size_t extra) const { return GetNextInternal(extra, true); @@ -262,7 +350,7 @@ ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistEntry::GetNext( size_t extra) const { return GetNextInternal(extra, false); } - +#endif } // namespace internal } // namespace base diff --git a/base/allocator/partition_allocator/partition_page.cc b/base/allocator/partition_allocator/partition_page.cc index 3090c77dd18cd904bc959c0442cc3d2890a109d7..3e7d4dc50ebeb613b65e32eb327be83cc61dcdeb 100644 --- a/base/allocator/partition_allocator/partition_page.cc +++ b/base/allocator/partition_allocator/partition_page.cc @@ -266,7 +266,11 @@ void SlotSpanMetadata::SortFreelist() { size_t num_free_slots = 0; size_t slot_size = bucket->slot_size; for (PartitionFreelistEntry* head = freelist_head; head; +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + head = head->GetNext(slot_size, this->bucket->random_cookie)) { +#else head = head->GetNext(slot_size)) { +#endif ++num_free_slots; size_t offset_in_slot_span = memory::UnmaskPtr(reinterpret_cast(head)) - slot_span_start; @@ -279,20 +283,45 @@ void SlotSpanMetadata::SortFreelist() { if (num_free_slots > 1) { PartitionFreelistEntry* back = nullptr; PartitionFreelistEntry* head = nullptr; + size_t i; +#if defined(OHOS_ENABLE_RANDOM) + size_t tmp; + size_t random[num_provisioned_slots]; + uint32_t rand; + + for (i = 0; i < num_provisioned_slots; i++) { + random[i] = i; + } + + for (i = num_provisioned_slots - 1; i > 0; i--) { + rand = RandomValue() % i; + tmp = random[i]; + random[i] = random[rand]; + random[rand] = tmp; + } +#endif for (size_t slot_number = 0; slot_number < num_provisioned_slots; slot_number++) { - if (free_slots[slot_number]) { +#if defined(OHOS_ENABLE_RANDOM) + i = random[slot_number]; +#else + i = slot_number; +#endif + if (free_slots[i]) { uintptr_t slot_address = - memory::RemaskPtr(slot_span_start + (slot_size * slot_number)); + memory::RemaskPtr(slot_span_start + (slot_size * i)); auto* entry = new (reinterpret_cast(slot_address)) PartitionFreelistEntry(); if (!head) head = entry; else +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + back->SetNext(entry, this->bucket->random_cookie); +#else back->SetNext(entry); - +#endif back = entry; } } diff --git a/base/allocator/partition_allocator/partition_page.h b/base/allocator/partition_allocator/partition_page.h index 1940f0624f8511ab8ad72f59611fb37066591429..fa0f9d03565b2c1553708f2b1a5625e225c7dbcc 100644 --- a/base/allocator/partition_allocator/partition_page.h +++ b/base/allocator/partition_allocator/partition_page.h @@ -620,7 +620,11 @@ SlotSpanMetadata::PopForAlloc(size_t size) { PartitionFreelistEntry* result = freelist_head; // Not setting freelist_is_sorted_ to false since this doesn't destroy // ordering. +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + freelist_head = freelist_head->GetNext(size, bucket->random_cookie); +#else freelist_head = freelist_head->GetNext(size); +#endif num_allocated_slots++; return result; } @@ -638,9 +642,15 @@ ALWAYS_INLINE void SlotSpanMetadata::Free(uintptr_t slot_start) // Catches an immediate double free. PA_CHECK(entry != freelist_head); // Look for double free one level deeper in debug. +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + PA_DCHECK(!freelist_head || + entry != freelist_head->GetNext(bucket->slot_size, bucket->random_cookie)); + entry->SetNext(freelist_head, this->bucket->random_cookie); +#else PA_DCHECK(!freelist_head || entry != freelist_head->GetNext(bucket->slot_size)); entry->SetNext(freelist_head); +#endif SetFreelistHead(entry); // A best effort double-free check. Works only on empty slot spans. PA_CHECK(num_allocated_slots); @@ -666,7 +676,11 @@ ALWAYS_INLINE void SlotSpanMetadata::AppendFreeList( #if DCHECK_IS_ON() auto* root = PartitionRoot::FromSlotSpan(this); root->lock_.AssertAcquired(); +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + PA_DCHECK(!tail->GetNext(bucket->slot_size, bucket->random_cookie)); +#else PA_DCHECK(!tail->GetNext(bucket->slot_size)); +#endif PA_DCHECK(number_of_freed); PA_DCHECK(num_allocated_slots); if (CanStoreRawSize()) { @@ -675,7 +689,11 @@ ALWAYS_INLINE void SlotSpanMetadata::AppendFreeList( { size_t number_of_entries = 0; for (auto* entry = head; entry; +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + entry = entry->GetNext(bucket->slot_size, bucket->random_cookie), ++number_of_entries) { +#else entry = entry->GetNext(bucket->slot_size), ++number_of_entries) { +#endif uintptr_t unmasked_entry = memory::UnmaskPtr(reinterpret_cast(entry)); // Check that all entries belong to this slot span. @@ -686,8 +704,11 @@ ALWAYS_INLINE void SlotSpanMetadata::AppendFreeList( PA_DCHECK(number_of_entries == number_of_freed); } #endif - +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + tail->SetNext(freelist_head, bucket->random_cookie); +#else tail->SetNext(freelist_head); +#endif SetFreelistHead(head); PA_DCHECK(num_allocated_slots >= number_of_freed); num_allocated_slots -= number_of_freed; diff --git a/base/allocator/partition_allocator/partition_root.cc b/base/allocator/partition_allocator/partition_root.cc index 393f46ad51f362bdc06bd1127dce14604fa94353..8758adbaa28d398f6acba2810cdb53c5e176b5d3 100644 --- a/base/allocator/partition_allocator/partition_root.cc +++ b/base/allocator/partition_allocator/partition_root.cc @@ -247,14 +247,22 @@ static size_t PartitionPurgeSlotSpan( slot_size; PA_DCHECK(slot_index < num_slots); slot_usage[slot_index] = 0; +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + entry = entry->GetNext(slot_size, slot_span->bucket->random_cookie); +#else entry = entry->GetNext(slot_size); +#endif #if !BUILDFLAG(IS_WIN) // If we have a slot where the masked freelist entry is 0, we can actually // discard that freelist entry because touching a discarded page is // guaranteed to return original content or 0. (Note that this optimization // won't fire on big-endian machines because the masking function is // negation.) +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + if (!internal::PartitionFreelistEntry::Encode(entry, slot_span->bucket->random_cookie)) +#else if (!internal::PartitionFreelistEntry::Encode(entry)) +#endif last_slot = slot_index; #endif } @@ -304,7 +312,11 @@ static size_t PartitionPurgeSlotSpan( head = entry; back = entry; } else { +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + back->SetNext(entry, slot_span->bucket->random_cookie); +#else back->SetNext(entry); +#endif back = entry; } num_new_entries++; diff --git a/base/allocator/partition_allocator/starscan/pcscan_internal.cc b/base/allocator/partition_allocator/starscan/pcscan_internal.cc index ea3bf97bdf0f1d7c0afa1de8ca9316e694d2748d..e0ae0d06f843be5199137a6539dd3d3303be2e8a 100644 --- a/base/allocator/partition_allocator/starscan/pcscan_internal.cc +++ b/base/allocator/partition_allocator/starscan/pcscan_internal.cc @@ -1031,7 +1031,11 @@ void UnmarkInCardTable(uintptr_t object, } if (freelist_tail) { +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + freelist_tail->SetNext(entry, current_slot_span->bucket->random_cookie); +#else freelist_tail->SetNext(entry); +#endif } freelist_tail = entry; ++freelist_entries; diff --git a/base/allocator/partition_allocator/starscan/pcscan_unittest.cc b/base/allocator/partition_allocator/starscan/pcscan_unittest.cc index eefcacd750d9b1e3da92ced78637350d57c96fdf..8ad66f51c02a94303d2d89ee39bf45cb415ff265 100644 --- a/base/allocator/partition_allocator/starscan/pcscan_unittest.cc +++ b/base/allocator/partition_allocator/starscan/pcscan_unittest.cc @@ -168,7 +168,11 @@ bool IsInFreeList(uintptr_t slot_start) { slot_start = memory::RemaskPtr(slot_start); auto* slot_span = SlotSpan::FromSlotStart(slot_start); for (auto* entry = slot_span->get_freelist_head(); entry; +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + entry = entry->GetNext(slot_span->bucket->slot_size, slot_span->bucket->random_cookie)) { +#else entry = entry->GetNext(slot_span->bucket->slot_size)) { +#endif if (reinterpret_cast(entry) == slot_start) return true; } diff --git a/base/allocator/partition_allocator/thread_cache.cc b/base/allocator/partition_allocator/thread_cache.cc index 273c329795cf728b31e5c31035e323eff369a454..eeeaa4dbd8d81e1f7cfb72d6333010b4742875aa 100644 --- a/base/allocator/partition_allocator/thread_cache.cc +++ b/base/allocator/partition_allocator/thread_cache.cc @@ -466,6 +466,9 @@ ThreadCache::ThreadCache(PartitionRoot<>* root) std::memory_order_relaxed); tcache_bucket->slot_size = root_bucket.slot_size; +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + tcache_bucket->random_cookie = root_bucket.random_cookie; +#endif // Invalid bucket. if (!root_bucket.is_valid()) { // Explicitly set this, as size computations iterate over all buckets. @@ -603,11 +606,18 @@ void ThreadCache::ClearBucket(ThreadCache::Bucket& bucket, size_t limit) { // triggers a major page fault, and we are running on a low-priority // thread, we don't want the thread to be blocked while holding the lock, // causing a priority inversion. +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + bucket.freelist_head->CheckFreeListForThreadCache(bucket.slot_size, bucket.random_cookie); +#else bucket.freelist_head->CheckFreeListForThreadCache(bucket.slot_size); - +#endif uint8_t count_before = bucket.count; if (limit == 0) { +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + FreeAfter(bucket.freelist_head, bucket.slot_size, bucket.random_cookie); +#else FreeAfter(bucket.freelist_head, bucket.slot_size); +#endif bucket.freelist_head = nullptr; } else { // Free the *end* of the list, not the head, since the head contains the @@ -615,11 +625,21 @@ void ThreadCache::ClearBucket(ThreadCache::Bucket& bucket, size_t limit) { auto* head = bucket.freelist_head; size_t items = 1; // Cannot free the freelist head. while (items < limit) { +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + head = head->GetNextForThreadCache(bucket.slot_size, bucket.random_cookie); +#else head = head->GetNextForThreadCache(bucket.slot_size); +#endif items++; } +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + FreeAfter(head->GetNextForThreadCache(bucket.slot_size, bucket.random_cookie), + bucket.slot_size, bucket.random_cookie); + head->SetNext(nullptr, 0); +#else FreeAfter(head->GetNextForThreadCache(bucket.slot_size), bucket.slot_size); head->SetNext(nullptr); +#endif } bucket.count = limit; uint8_t count_after = bucket.count; @@ -630,14 +650,22 @@ void ThreadCache::ClearBucket(ThreadCache::Bucket& bucket, size_t limit) { PA_DCHECK(cached_memory_ == CachedMemory()); } +#if defined(OHOS_ENABLE_FREELIST_HARDENED) +void ThreadCache::FreeAfter(PartitionFreelistEntry* head, size_t slot_size, uintptr_t random_cookie) { +#else void ThreadCache::FreeAfter(PartitionFreelistEntry* head, size_t slot_size) { +#endif // Acquire the lock once. Deallocation from the same bucket are likely to be // hitting the same cache lines in the central allocator, and lock // acquisitions can be expensive. partition_alloc::ScopedGuard guard(root_->lock_); while (head) { uintptr_t slot_start = reinterpret_cast(head); +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + head = head->GetNextForThreadCache(slot_size, random_cookie); +#else head = head->GetNextForThreadCache(slot_size); +#endif root_->RawFreeLocked(slot_start); } } diff --git a/base/allocator/partition_allocator/thread_cache.h b/base/allocator/partition_allocator/thread_cache.h index dbf1bdcd01953184093f6895e097f92354ccf8e1..e207ab067551d8e8180ef110e7fc25af6de45be0 100644 --- a/base/allocator/partition_allocator/thread_cache.h +++ b/base/allocator/partition_allocator/thread_cache.h @@ -322,11 +322,17 @@ class BASE_EXPORT ThreadCache { uint8_t count = 0; std::atomic limit{}; // Can be changed from another thread. uint16_t slot_size = 0; - +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + uintptr_t random_cookie; +#endif Bucket(); }; - static_assert(sizeof(Bucket) <= 2 * sizeof(void*), "Keep Bucket small."); +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + static_assert(sizeof(Bucket) <= 3 * sizeof(void*), "Keep Bucket small."); +#else + static_assert(sizeof(Bucket) <= 2 * sizeof(void*), "Keep Bucket small."); +#endif explicit ThreadCache(PartitionRoot<>* root); static void Delete(void* thread_cache_ptr); void PurgeInternal(); @@ -337,7 +343,11 @@ class BASE_EXPORT ThreadCache { ALWAYS_INLINE void PutInBucket(Bucket& bucket, uintptr_t slot_start); void ResetForTesting(); // Releases the entire freelist starting at |head| to the root. +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + void FreeAfter(PartitionFreelistEntry* head, size_t slot_size, uintptr_t random_cookie); +#else void FreeAfter(PartitionFreelistEntry* head, size_t slot_size); +#endif static void SetGlobalLimits(PartitionRoot<>* root, float multiplier); #if BUILDFLAG(IS_NACL) @@ -490,7 +500,11 @@ ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index, // corruption, we know the bucket size that lead to the crash, helping to // narrow down the search for culprit. |bucket| was touched just now, so this // does not introduce another cache miss. +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + auto* next = result->GetNextForThreadCache(bucket.slot_size, bucket.random_cookie); +#else auto* next = result->GetNextForThreadCache(bucket.slot_size); +#endif PA_DCHECK(result != next); bucket.count--; PA_DCHECK(bucket.count != 0 || !next); @@ -551,7 +565,11 @@ ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket, // defined(PA_HAS_64_BITS_POINTERS) auto* entry = PartitionFreelistEntry::InitForThreadCache( +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + slot_start, bucket.freelist_head, bucket.random_cookie); +#else slot_start, bucket.freelist_head); +#endif bucket.freelist_head = entry; bucket.count++; } diff --git a/base/allocator/partition_allocator/thread_cache_unittest.cc b/base/allocator/partition_allocator/thread_cache_unittest.cc index 6b91d19637b405ac3683bfd16a043cdb8fc18254..bb23d6e0ec43bccc4d62f7a0df98d4db62388493 100644 --- a/base/allocator/partition_allocator/thread_cache_unittest.cc +++ b/base/allocator/partition_allocator/thread_cache_unittest.cc @@ -848,7 +848,11 @@ TEST_F(PartitionAllocThreadCacheTest, ClearFromTail) { uint8_t count = 0; auto* head = tcache->buckets_[index].freelist_head; while (head) { +#if defined(OHOS_ENABLE_FREELIST_HARDENED) + head = head->GetNext(tcache->buckets_[index].slot_size, tcache->buckets_[index].random_cookie); +#else head = head->GetNext(tcache->buckets_[index].slot_size); +#endif count++; } return count; diff --git a/ohos_build/build/config/ohos.json b/ohos_build/build/config/ohos.json index 6afe31758b8e2c0672b13cd4d2f7a218f92b5765..4404c32ddeae2e15ae8e567b6a082ee140cf9145 100755 --- a/ohos_build/build/config/ohos.json +++ b/ohos_build/build/config/ohos.json @@ -5,4 +5,20 @@ "genCommandline": "default", "dependence": "", "default": "true" +}, +{ "name": "OHOS_ENABLE_FREELIST_HARDENED", + "owner": "", + "desc": "partition allocator freelist hardened", + "effect":"main gn blink_core other", + "genCommandline": "default", + "dependence": "", + "default": "true" +}, +{ "name": "OHOS_ENABLE_RANDOM", + "owner": "", + "desc": "partition allocator freelist random", + "effect":"main gn blink_core other", + "genCommandline": "default", + "dependence": "", + "default": "true" } \ No newline at end of file