Revert "Bug 1928254 - pt 8. Improve the idle purge markers r=jstutte" for causing multiple failures.

This reverts commit 1de23dc2a6.

This reverts commit dc7de1e170.

This reverts commit 0891965b10.

This reverts commit 52c855715a.

This reverts commit fcdc85d6b4.

This reverts commit 1fa61503c2.

This reverts commit df279eb40e.

This reverts commit dd4278a7d5.
This commit is contained in:
Serban Stanca
2025-05-16 11:38:38 +03:00
committed by sstanca@mozilla.com
parent 0ae173924f
commit 72ddd15355
17 changed files with 117 additions and 484 deletions

View File

@@ -10,7 +10,6 @@
#include "mozmemory.h" #include "mozmemory.h"
#include "mozilla/mozalloc_oom.h" // for mozalloc_handle_oom #include "mozilla/mozalloc_oom.h" // for mozalloc_handle_oom
#include "nsString.h"
#define NS_DECL_DOMARENA_DESTROY void Destroy(void); #define NS_DECL_DOMARENA_DESTROY void Destroy(void);
@@ -35,13 +34,10 @@ namespace mozilla::dom {
class DOMArena { class DOMArena {
public: public:
friend class DocGroup; friend class DocGroup;
explicit DOMArena(const nsACString& aLabel) { DOMArena() {
nsCString label = PromiseFlatCString("DOMArena "_ns + aLabel);
arena_params_t params; arena_params_t params;
params.mMaxDirtyIncreaseOverride = 7; params.mMaxDirtyIncreaseOverride = 7;
params.mFlags = ARENA_FLAG_THREAD_MAIN_THREAD_ONLY; params.mFlags = ARENA_FLAG_THREAD_MAIN_THREAD_ONLY;
params.mLabel = label.get();
mArenaId = moz_create_arena_with_params(&params); mArenaId = moz_create_arena_with_params(&params);
} }

View File

@@ -128,7 +128,7 @@ DocGroup::DocGroup(BrowsingContextGroup* aBrowsingContextGroup,
// mBrowsingContextGroup->mDocGroups as the caller does it for us. // mBrowsingContextGroup->mDocGroups as the caller does it for us.
MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(NS_IsMainThread());
if (StaticPrefs::dom_arena_allocator_enabled_AtStartup()) { if (StaticPrefs::dom_arena_allocator_enabled_AtStartup()) {
mArena = new mozilla::dom::DOMArena(aKey.mKey); mArena = new mozilla::dom::DOMArena();
} }
} }

View File

@@ -101,16 +101,13 @@ JS_PUBLIC_DATA arena_id_t js::StringBufferArena;
void js::InitMallocAllocator() { void js::InitMallocAllocator() {
arena_params_t mallocArenaParams; arena_params_t mallocArenaParams;
mallocArenaParams.mMaxDirtyIncreaseOverride = 5; mallocArenaParams.mMaxDirtyIncreaseOverride = 5;
mallocArenaParams.mLabel = "JS malloc";
MallocArena = moz_create_arena_with_params(&mallocArenaParams); MallocArena = moz_create_arena_with_params(&mallocArenaParams);
BackgroundMallocArena = moz_create_arena_with_params(&mallocArenaParams); BackgroundMallocArena = moz_create_arena_with_params(&mallocArenaParams);
arena_params_t params; arena_params_t params;
params.mMaxDirtyIncreaseOverride = 5; params.mMaxDirtyIncreaseOverride = 5;
params.mFlags |= ARENA_FLAG_RANDOMIZE_SMALL_ENABLED; params.mFlags |= ARENA_FLAG_RANDOMIZE_SMALL_ENABLED;
params.mLabel = "Array buffer contents";
ArrayBufferContentsArena = moz_create_arena_with_params(&params); ArrayBufferContentsArena = moz_create_arena_with_params(&params);
params.mLabel = "String buffer contents";
StringBufferArena = moz_create_arena_with_params(&params); StringBufferArena = moz_create_arena_with_params(&params);
} }

View File

@@ -141,7 +141,7 @@ MALLOC_DECL(moz_enable_deferred_purge, bool, bool)
// Perform some purging. // Perform some purging.
// //
// Returns a may_purge_now_result_t with the following meaning: // Returns a purge_result_t with the following meaning:
// Done: Purge has completed for all arenas. // Done: Purge has completed for all arenas.
// NeedsMore: There may be an arena that needs to be purged now. The caller // NeedsMore: There may be an arena that needs to be purged now. The caller
// may call moz_may_purge_one_now again. // may call moz_may_purge_one_now again.
@@ -163,7 +163,7 @@ MALLOC_DECL(moz_enable_deferred_purge, bool, bool)
// lock/unlock and iterating the list of purges. The mutex is never held during // lock/unlock and iterating the list of purges. The mutex is never held during
// expensive operations. // expensive operations.
# ifdef __cplusplus # ifdef __cplusplus
MALLOC_DECL(moz_may_purge_now, may_purge_now_result_t, bool, uint32_t, MALLOC_DECL(moz_may_purge_now, purge_result_t, bool, uint32_t,
const mozilla::Maybe<std::function<bool()>>&) const mozilla::Maybe<std::function<bool()>>&)
# endif # endif

View File

@@ -6,7 +6,6 @@
EXPORTS += [ EXPORTS += [
"malloc_decls.h", "malloc_decls.h",
"mozjemalloc_profiling.h",
"mozjemalloc_types.h", "mozjemalloc_types.h",
"mozmemory.h", "mozmemory.h",
"mozmemory_utils.h", "mozmemory_utils.h",
@@ -73,4 +72,3 @@ TEST_DIRS += ["test"]
if CONFIG["NIGHTLY_BUILD"]: if CONFIG["NIGHTLY_BUILD"]:
DEFINES["NON_RANDOM_ARENA_IDS"] = True DEFINES["NON_RANDOM_ARENA_IDS"] = True
DEFINES["MOZJEMALLOC_PROFILING_CALLBACKS"] = True

View File

@@ -124,7 +124,6 @@
#include "mozmemory_wrap.h" #include "mozmemory_wrap.h"
#include "mozjemalloc.h" #include "mozjemalloc.h"
#include "mozjemalloc_types.h" #include "mozjemalloc_types.h"
#include "mozjemalloc_profiling.h"
#include <cstring> #include <cstring>
#include <cerrno> #include <cerrno>
@@ -156,7 +155,6 @@
#include "mozilla/Literals.h" #include "mozilla/Literals.h"
#include "mozilla/MathAlgorithms.h" #include "mozilla/MathAlgorithms.h"
#include "mozilla/RandomNum.h" #include "mozilla/RandomNum.h"
#include "mozilla/RefPtr.h"
// Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap // Note: MozTaggedAnonymousMmap() could call an LD_PRELOADed mmap
// instead of the one defined here; use only MozTagAnonymousMemory(). // instead of the one defined here; use only MozTagAnonymousMemory().
#include "mozilla/TaggedAnonymousMemory.h" #include "mozilla/TaggedAnonymousMemory.h"
@@ -644,14 +642,6 @@ static Atomic<size_t> gRecycledSize;
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT; static size_t opt_dirty_max = DIRTY_MAX_DEFAULT;
#ifdef MOZJEMALLOC_PROFILING_CALLBACKS
// MallocProfilerCallbacks is refcounted so that one thread cannot destroy it
// while another thread accesses it. This means that clearing this value or
// otherwise dropping a reference to it must not be done while holding an
// arena's lock.
MOZ_CONSTINIT static RefPtr<MallocProfilerCallbacks> sCallbacks;
#endif
// Return the smallest chunk multiple that is >= s. // Return the smallest chunk multiple that is >= s.
#define CHUNK_CEILING(s) (((s) + kChunkSizeMask) & ~kChunkSizeMask) #define CHUNK_CEILING(s) (((s) + kChunkSizeMask) & ~kChunkSizeMask)
@@ -1308,12 +1298,6 @@ struct arena_t {
// released after a concurrent purge completes. // released after a concurrent purge completes.
bool mMustDeleteAfterPurge MOZ_GUARDED_BY(mLock) = false; bool mMustDeleteAfterPurge MOZ_GUARDED_BY(mLock) = false;
// mLabel describes the label for the firefox profiler. It's stored in a
// fixed size area including a null terminating byte. The actual maximum
// length of the string is one less than LABEL_MAX_CAPACITY;
static constexpr size_t LABEL_MAX_CAPACITY = 128;
char mLabel[LABEL_MAX_CAPACITY];
private: private:
// Size/address-ordered tree of this arena's available runs. This tree // Size/address-ordered tree of this arena's available runs. This tree
// is used for first-best-fit run allocation. // is used for first-best-fit run allocation.
@@ -1451,6 +1435,20 @@ struct arena_t {
MOZ_REQUIRES(mLock); MOZ_REQUIRES(mLock);
#endif #endif
enum PurgeResult {
// The stop threshold of dirty pages was reached.
Done,
// There's more chunks in this arena that could be purged.
Continue,
// The only chunks with dirty pages are busy being purged by other threads.
Busy,
// The arena needs to be destroyed by the caller.
Dying,
};
// Purge some dirty pages. // Purge some dirty pages.
// //
// When this is called the caller has already tested ShouldStartPurge() // When this is called the caller has already tested ShouldStartPurge()
@@ -1467,23 +1465,7 @@ struct arena_t {
// //
// This must be called without the mLock held (it'll take the lock). // This must be called without the mLock held (it'll take the lock).
// //
ArenaPurgeResult Purge(PurgeCondition aCond, PurgeStats& aStats) PurgeResult Purge(PurgeCondition aCond) MOZ_EXCLUDES(mLock);
MOZ_EXCLUDES(mLock);
// Run Purge() in a loop. If sCallback is non-null then collect statistics and
// publish them through the callback, aCaller should be used to identify the
// caller in the profiling data.
//
// aCond - when to stop purging
// aCaller - a string representing the caller, this is used for
// profiling
// aReuseGraceMS - Stop purging the arena if it was used within this many
// milliseconds. Or 0 to ignore recent reuse.
// aKeepGoing - Optional function to implement a time budget.
//
ArenaPurgeResult PurgeLoop(
PurgeCondition aCond, const char* aCaller, uint32_t aReuseGraceMS = 0,
Maybe<std::function<bool()>> aKeepGoing = Nothing()) MOZ_EXCLUDES(mLock);
class PurgeInfo { class PurgeInfo {
private: private:
@@ -1497,10 +1479,6 @@ struct arena_t {
arena_chunk_t* mChunk = nullptr; arena_chunk_t* mChunk = nullptr;
private:
PurgeStats& mPurgeStats;
public:
size_t FreeRunLenBytes() const { return mFreeRunLen << gPageSize2Pow; } size_t FreeRunLenBytes() const { return mFreeRunLen << gPageSize2Pow; }
// The last index of the free run. // The last index of the free run.
@@ -1534,8 +1512,8 @@ struct arena_t {
// is dying, or we hit the arena-level threshold. // is dying, or we hit the arena-level threshold.
void FinishPurgingInChunk(bool aAddToMAdvised) MOZ_REQUIRES(mArena.mLock); void FinishPurgingInChunk(bool aAddToMAdvised) MOZ_REQUIRES(mArena.mLock);
explicit PurgeInfo(arena_t& arena, arena_chunk_t* chunk, PurgeStats& stats) explicit PurgeInfo(arena_t& arena, arena_chunk_t* chunk)
: mArena(arena), mChunk(chunk), mPurgeStats(stats) {} : mArena(arena), mChunk(chunk) {}
}; };
void HardPurge(); void HardPurge();
@@ -1553,8 +1531,7 @@ struct arena_t {
inline purge_action_t ShouldStartPurge() MOZ_REQUIRES(mLock); inline purge_action_t ShouldStartPurge() MOZ_REQUIRES(mLock);
// Take action according to ShouldStartPurge. // Take action according to ShouldStartPurge.
inline void MayDoOrQueuePurge(purge_action_t aAction, const char* aCaller) inline void MayDoOrQueuePurge(purge_action_t aAction) MOZ_EXCLUDES(mLock);
MOZ_EXCLUDES(mLock);
// Check the EffectiveHalfMaxDirty threshold to decide if we continue purge. // Check the EffectiveHalfMaxDirty threshold to decide if we continue purge.
// This threshold is lower than ShouldStartPurge to have some hysteresis. // This threshold is lower than ShouldStartPurge to have some hysteresis.
@@ -1615,7 +1592,6 @@ class ArenaCollection {
arena_params_t params; arena_params_t params;
// The main arena allows more dirty pages than the default for other arenas. // The main arena allows more dirty pages than the default for other arenas.
params.mMaxDirty = opt_dirty_max; params.mMaxDirty = opt_dirty_max;
params.mLabel = "Default";
mDefaultArena = mDefaultArena =
mLock.Init() ? CreateArena(/* aIsPrivate = */ false, &params) : nullptr; mLock.Init() ? CreateArena(/* aIsPrivate = */ false, &params) : nullptr;
mPurgeListLock.Init(); mPurgeListLock.Init();
@@ -1787,7 +1763,7 @@ class ArenaCollection {
} }
} }
if (ret != aEnable) { if (ret != aEnable) {
MayPurgeAll(PurgeIfThreshold, __func__); MayPurgeAll(PurgeIfThreshold);
} }
return ret; return ret;
} }
@@ -1803,12 +1779,12 @@ class ArenaCollection {
MOZ_EXCLUDES(mPurgeListLock); MOZ_EXCLUDES(mPurgeListLock);
// Execute all outstanding purge requests, if any. // Execute all outstanding purge requests, if any.
void MayPurgeAll(PurgeCondition aCond, const char* aCaller); void MayPurgeAll(PurgeCondition aCond);
// Purge some dirty memory, based on purge requests, returns true if there are // Purge some dirty memory, based on purge requests, returns true if there are
// more to process. // more to process.
// //
// Returns a may_purge_now_result_t with the following meaning: // Returns a purge_result_t with the following meaning:
// Done: Purge has completed for all arenas. // Done: Purge has completed for all arenas.
// NeedsMore: There may be some arenas that needs to be purged now. // NeedsMore: There may be some arenas that needs to be purged now.
// WantsLater: There is at least one arena that might want a purge later, // WantsLater: There is at least one arena that might want a purge later,
@@ -1825,8 +1801,7 @@ class ArenaCollection {
// - There are more requests but aKeepGoing() returned false. (returns true) // - There are more requests but aKeepGoing() returned false. (returns true)
// - One arena is completely purged, (returns true). // - One arena is completely purged, (returns true).
// //
may_purge_now_result_t MayPurgeSteps( purge_result_t MayPurgeSteps(bool aPeekOnly, uint32_t aReuseGraceMS,
bool aPeekOnly, uint32_t aReuseGraceMS,
const Maybe<std::function<bool()>>& aKeepGoing); const Maybe<std::function<bool()>>& aKeepGoing);
private: private:
@@ -2195,17 +2170,6 @@ void* MozVirtualAlloc(void* lpAddress, size_t dwSize, uint32_t flAllocationType,
#endif // XP_WIN #endif // XP_WIN
#ifdef MOZJEMALLOC_PROFILING_CALLBACKS
namespace mozilla {
void jemalloc_set_profiler_callbacks(
RefPtr<MallocProfilerCallbacks>&& aCallbacks) {
sCallbacks = aCallbacks;
}
} // namespace mozilla
#endif
// *************************************************************************** // ***************************************************************************
static inline void pages_decommit(void* aAddr, size_t aSize) { static inline void pages_decommit(void* aAddr, size_t aSize) {
@@ -2976,9 +2940,8 @@ static inline arena_t* thread_local_arena(bool enabled) {
// called with `false`, but it doesn't matter at the moment. // called with `false`, but it doesn't matter at the moment.
// because in practice nothing actually calls this function // because in practice nothing actually calls this function
// with `false`, except maybe at shutdown. // with `false`, except maybe at shutdown.
arena_params_t params; arena =
params.mLabel = "Thread local"; gArenas.CreateArena(/* aIsPrivate = */ false, /* aParams = */ nullptr);
arena = gArenas.CreateArena(/* aIsPrivate = */ false, &params);
} else { } else {
arena = gArenas.GetDefault(); arena = gArenas.GetDefault();
} }
@@ -3538,7 +3501,7 @@ size_t arena_t::ExtraCommitPages(size_t aReqPages, size_t aRemainingPages) {
} }
#endif #endif
ArenaPurgeResult arena_t::Purge(PurgeCondition aCond, PurgeStats& aStats) { arena_t::PurgeResult arena_t::Purge(PurgeCondition aCond) {
arena_chunk_t* chunk; arena_chunk_t* chunk;
// The first critical section will find a chunk and mark dirty pages in it as // The first critical section will find a chunk and mark dirty pages in it as
@@ -3562,7 +3525,7 @@ ArenaPurgeResult arena_t::Purge(PurgeCondition aCond, PurgeStats& aStats) {
if (!ShouldContinuePurge(aCond)) { if (!ShouldContinuePurge(aCond)) {
mIsPurgePending = false; mIsPurgePending = false;
return ReachedThreshold; return Done;
} }
// Take a single chunk and attempt to purge some of its dirty pages. The // Take a single chunk and attempt to purge some of its dirty pages. The
@@ -3594,7 +3557,6 @@ ArenaPurgeResult arena_t::Purge(PurgeCondition aCond, PurgeStats& aStats) {
MOZ_ASSERT(!chunk->mIsPurging); MOZ_ASSERT(!chunk->mIsPurging);
mChunksDirty.Remove(chunk); mChunksDirty.Remove(chunk);
chunk->mIsPurging = true; chunk->mIsPurging = true;
aStats.chunks++;
} // MaybeMutexAutoLock } // MaybeMutexAutoLock
// True if we should continue purging memory from this arena. // True if we should continue purging memory from this arena.
@@ -3610,7 +3572,7 @@ ArenaPurgeResult arena_t::Purge(PurgeCondition aCond, PurgeStats& aStats) {
while (continue_purge_chunk && continue_purge_arena) { while (continue_purge_chunk && continue_purge_arena) {
// This structure is used to communicate between the two PurgePhase // This structure is used to communicate between the two PurgePhase
// functions. // functions.
PurgeInfo purge_info(*this, chunk, aStats); PurgeInfo purge_info(*this, chunk);
{ {
// Phase 1: Find pages that need purging. // Phase 1: Find pages that need purging.
@@ -3640,7 +3602,7 @@ ArenaPurgeResult arena_t::Purge(PurgeCondition aCond, PurgeStats& aStats) {
} }
// There's nothing else to do here, our caller may execute Purge() again // There's nothing else to do here, our caller may execute Purge() again
// if continue_purge_arena is true. // if continue_purge_arena is true.
return continue_purge_arena ? NotDone : ReachedThreshold; return continue_purge_arena ? Continue : Done;
} }
#ifdef MALLOC_DECOMMIT #ifdef MALLOC_DECOMMIT
@@ -3689,44 +3651,7 @@ ArenaPurgeResult arena_t::Purge(PurgeCondition aCond, PurgeStats& aStats) {
purged_once = true; purged_once = true;
} }
return continue_purge_arena ? NotDone : ReachedThreshold; return continue_purge_arena ? Continue : Done;
}
ArenaPurgeResult arena_t::PurgeLoop(PurgeCondition aCond, const char* aCaller,
uint32_t aReuseGraceMS,
Maybe<std::function<bool()>> aKeepGoing) {
PurgeStats purge_stats(mId, mLabel, aCaller);
#ifdef MOZJEMALLOC_PROFILING_CALLBACKS
// We hold our own reference to callbacks for the duration of PurgeLoop to
// make sure it's not released during purging.
RefPtr<MallocProfilerCallbacks> callbacks = sCallbacks;
TimeStamp start;
if (callbacks) {
start = TimeStamp::Now();
}
#endif
uint64_t reuseGraceNS = (uint64_t)aReuseGraceMS * 1000 * 1000;
uint64_t now = aReuseGraceMS ? 0 : GetTimestampNS();
ArenaPurgeResult pr;
do {
pr = Purge(aCond, purge_stats);
now = aReuseGraceMS ? 0 : GetTimestampNS();
} while (
pr == NotDone &&
(!aReuseGraceMS || (now - mLastSignificantReuseNS >= reuseGraceNS)) &&
(!aKeepGoing || (*aKeepGoing)()));
#ifdef MOZJEMALLOC_PROFILING_CALLBACKS
if (callbacks) {
TimeStamp end = TimeStamp::Now();
// We can't hold an arena lock while committing profiler markers.
callbacks->OnPurge(start, end, purge_stats, pr);
}
#endif
return pr;
} }
bool arena_t::PurgeInfo::FindDirtyPages(bool aPurgedOnce) { bool arena_t::PurgeInfo::FindDirtyPages(bool aPurgedOnce) {
@@ -3837,8 +3762,6 @@ std::pair<bool, arena_chunk_t*> arena_t::PurgeInfo::UpdatePagesAndCounts() {
#endif #endif
mArena.mStats.committed -= mDirtyNPages; mArena.mStats.committed -= mDirtyNPages;
mPurgeStats.pages += mDirtyNPages;
mPurgeStats.system_calls++;
if (mChunk->mDying) { if (mChunk->mDying) {
// A dying chunk doesn't need to be coaleased, it will already have one // A dying chunk doesn't need to be coaleased, it will already have one
@@ -4835,7 +4758,7 @@ static inline void arena_dalloc(void* aPtr, size_t aOffset, arena_t* aArena) {
chunk_dealloc((void*)chunk_dealloc_delay, kChunkSize, ARENA_CHUNK); chunk_dealloc((void*)chunk_dealloc_delay, kChunkSize, ARENA_CHUNK);
} }
arena->MayDoOrQueuePurge(purge_action, "arena_dalloc"); arena->MayDoOrQueuePurge(purge_action);
} }
static inline void idalloc(void* ptr, arena_t* aArena) { static inline void idalloc(void* ptr, arena_t* aArena) {
@@ -4865,8 +4788,7 @@ inline purge_action_t arena_t::ShouldStartPurge() {
return purge_action_t::None; return purge_action_t::None;
} }
inline void arena_t::MayDoOrQueuePurge(purge_action_t aAction, inline void arena_t::MayDoOrQueuePurge(purge_action_t aAction) {
const char* aCaller) {
switch (aAction) { switch (aAction) {
case purge_action_t::Queue: case purge_action_t::Queue:
// Note that this thread committed earlier by setting // Note that this thread committed earlier by setting
@@ -4876,14 +4798,16 @@ inline void arena_t::MayDoOrQueuePurge(purge_action_t aAction,
// ShouldStartPurge() or Purge() next time. // ShouldStartPurge() or Purge() next time.
gArenas.AddToOutstandingPurges(this); gArenas.AddToOutstandingPurges(this);
break; break;
case purge_action_t::PurgeNow: { case purge_action_t::PurgeNow:
ArenaPurgeResult pr = PurgeLoop(PurgeIfThreshold, aCaller); PurgeResult pr;
do {
pr = Purge(PurgeIfThreshold);
} while (pr == arena_t::PurgeResult::Continue);
// Arenas cannot die here because the caller is still using the arena, if // Arenas cannot die here because the caller is still using the arena, if
// they did it'd be a use-after-free: the arena is destroyed but then used // they did it'd be a use-after-free: the arena is destroyed but then used
// afterwards. // afterwards.
MOZ_RELEASE_ASSERT(pr != ArenaPurgeResult::Dying); MOZ_RELEASE_ASSERT(pr != arena_t::PurgeResult::Dying);
break; break;
}
case purge_action_t::None: case purge_action_t::None:
// do nothing. // do nothing.
break; break;
@@ -4914,7 +4838,7 @@ void arena_t::RallocShrinkLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
purge_action = ShouldStartPurge(); purge_action = ShouldStartPurge();
} }
MayDoOrQueuePurge(purge_action, "RallocShrinkLarge"); MayDoOrQueuePurge(purge_action);
} }
// Returns whether reallocation was successful. // Returns whether reallocation was successful.
@@ -5080,27 +5004,9 @@ arena_t::arena_t(arena_params_t* aParams, bool aIsPrivate) {
mMaxDirtyIncreaseOverride = aParams->mMaxDirtyIncreaseOverride; mMaxDirtyIncreaseOverride = aParams->mMaxDirtyIncreaseOverride;
mMaxDirtyDecreaseOverride = aParams->mMaxDirtyDecreaseOverride; mMaxDirtyDecreaseOverride = aParams->mMaxDirtyDecreaseOverride;
if (aParams->mLabel) {
// The string may be truncated so always place a null-byte in the last
// position.
strncpy(mLabel, aParams->mLabel, LABEL_MAX_CAPACITY - 1);
mLabel[LABEL_MAX_CAPACITY - 1] = 0;
// If the string was trucated, then replace it's end with "..."
if (strlen(aParams->mLabel) >= LABEL_MAX_CAPACITY) {
for (int i = 0; i < 3; i++) {
mLabel[LABEL_MAX_CAPACITY - 2 - i] = '.';
}
}
} else {
mLabel[0] = 0;
}
} else { } else {
mMaxDirtyIncreaseOverride = 0; mMaxDirtyIncreaseOverride = 0;
mMaxDirtyDecreaseOverride = 0; mMaxDirtyDecreaseOverride = 0;
mLabel[0] = 0;
} }
mLastSignificantReuseNS = GetTimestampNS(); mLastSignificantReuseNS = GetTimestampNS();
@@ -6074,13 +5980,13 @@ inline void MozJemalloc::jemalloc_purge_freed_pages() {
inline void MozJemalloc::jemalloc_free_dirty_pages(void) { inline void MozJemalloc::jemalloc_free_dirty_pages(void) {
if (malloc_initialized) { if (malloc_initialized) {
gArenas.MayPurgeAll(PurgeUnconditional, __func__); gArenas.MayPurgeAll(PurgeUnconditional);
} }
} }
inline void MozJemalloc::jemalloc_free_excess_dirty_pages(void) { inline void MozJemalloc::jemalloc_free_excess_dirty_pages(void) {
if (malloc_initialized) { if (malloc_initialized) {
gArenas.MayPurgeAll(PurgeIfThreshold, __func__); gArenas.MayPurgeAll(PurgeIfThreshold);
} }
} }
@@ -6206,7 +6112,7 @@ inline bool MozJemalloc::moz_enable_deferred_purge(bool aEnabled) {
return gArenas.SetDeferredPurge(aEnabled); return gArenas.SetDeferredPurge(aEnabled);
} }
inline may_purge_now_result_t MozJemalloc::moz_may_purge_now( inline purge_result_t MozJemalloc::moz_may_purge_now(
bool aPeekOnly, uint32_t aReuseGraceMS, bool aPeekOnly, uint32_t aReuseGraceMS,
const Maybe<std::function<bool()>>& aKeepGoing) { const Maybe<std::function<bool()>>& aKeepGoing) {
return gArenas.MayPurgeSteps(aPeekOnly, aReuseGraceMS, aKeepGoing); return gArenas.MayPurgeSteps(aPeekOnly, aReuseGraceMS, aKeepGoing);
@@ -6236,7 +6142,7 @@ inline bool ArenaCollection::RemoveFromOutstandingPurges(arena_t* aArena) {
return false; return false;
} }
may_purge_now_result_t ArenaCollection::MayPurgeSteps( purge_result_t ArenaCollection::MayPurgeSteps(
bool aPeekOnly, uint32_t aReuseGraceMS, bool aPeekOnly, uint32_t aReuseGraceMS,
const Maybe<std::function<bool()>>& aKeepGoing) { const Maybe<std::function<bool()>>& aKeepGoing) {
// This only works on the main thread because it may process main-thread-only // This only works on the main thread because it may process main-thread-only
@@ -6249,7 +6155,7 @@ may_purge_now_result_t ArenaCollection::MayPurgeSteps(
{ {
MutexAutoLock lock(mPurgeListLock); MutexAutoLock lock(mPurgeListLock);
if (mOutstandingPurges.isEmpty()) { if (mOutstandingPurges.isEmpty()) {
return may_purge_now_result_t::Done; return purge_result_t::Done;
} }
for (arena_t& arena : mOutstandingPurges) { for (arena_t& arena : mOutstandingPurges) {
if (now - arena.mLastSignificantReuseNS >= reuseGraceNS) { if (now - arena.mLastSignificantReuseNS >= reuseGraceNS) {
@@ -6259,10 +6165,10 @@ may_purge_now_result_t ArenaCollection::MayPurgeSteps(
} }
if (!found) { if (!found) {
return may_purge_now_result_t::WantsLater; return purge_result_t::WantsLater;
} }
if (aPeekOnly) { if (aPeekOnly) {
return may_purge_now_result_t::NeedsMore; return purge_result_t::NeedsMore;
} }
// We need to avoid the invalid state where mIsDeferredPurgePending is set // We need to avoid the invalid state where mIsDeferredPurgePending is set
@@ -6271,10 +6177,15 @@ may_purge_now_result_t ArenaCollection::MayPurgeSteps(
mOutstandingPurges.remove(found); mOutstandingPurges.remove(found);
} }
ArenaPurgeResult pr = arena_t::PurgeResult pr;
found->PurgeLoop(PurgeIfThreshold, __func__, aReuseGraceMS, aKeepGoing); do {
pr = found->Purge(PurgeIfThreshold);
now = GetTimestampNS();
} while (pr == arena_t::PurgeResult::Continue &&
(now - found->mLastSignificantReuseNS >= reuseGraceNS) &&
aKeepGoing && (*aKeepGoing)());
if (pr == ArenaPurgeResult::NotDone) { if (pr == arena_t::PurgeResult::Continue) {
// If there's more work to do we re-insert the arena into the purge queue. // If there's more work to do we re-insert the arena into the purge queue.
// If the arena was busy we don't since the other thread that's purging it // If the arena was busy we don't since the other thread that's purging it
// will finish that work. // will finish that work.
@@ -6291,7 +6202,7 @@ may_purge_now_result_t ArenaCollection::MayPurgeSteps(
// to increase the probability to find it fast. // to increase the probability to find it fast.
mOutstandingPurges.pushFront(found); mOutstandingPurges.pushFront(found);
} }
} else if (pr == ArenaPurgeResult::Dying) { } else if (pr == arena_t::PurgeResult::Dying) {
delete found; delete found;
} }
@@ -6299,22 +6210,25 @@ may_purge_now_result_t ArenaCollection::MayPurgeSteps(
// us again and we will do the above checks then and return their result. // us again and we will do the above checks then and return their result.
// Note that in the current surrounding setting this may (rarely) cause a // Note that in the current surrounding setting this may (rarely) cause a
// new slice of our idle task runner if we are exceeding idle budget. // new slice of our idle task runner if we are exceeding idle budget.
return may_purge_now_result_t::NeedsMore; return purge_result_t::NeedsMore;
} }
void ArenaCollection::MayPurgeAll(PurgeCondition aCond, const char* aCaller) { void ArenaCollection::MayPurgeAll(PurgeCondition aCond) {
MutexAutoLock lock(mLock); MutexAutoLock lock(mLock);
for (auto* arena : iter()) { for (auto* arena : iter()) {
// Arenas that are not IsMainThreadOnly can be purged from any thread. // Arenas that are not IsMainThreadOnly can be purged from any thread.
// So we do what we can even if called from another thread. // So we do what we can even if called from another thread.
if (!arena->IsMainThreadOnly() || IsOnMainThreadWeak()) { if (!arena->IsMainThreadOnly() || IsOnMainThreadWeak()) {
RemoveFromOutstandingPurges(arena); RemoveFromOutstandingPurges(arena);
ArenaPurgeResult pr = arena->PurgeLoop(aCond, aCaller); arena_t::PurgeResult pr;
do {
pr = arena->Purge(aCond);
} while (pr == arena_t::PurgeResult::Continue);
// No arena can die here because we're holding the arena collection lock. // No arena can die here because we're holding the arena collection lock.
// Arenas are removed from the collection before setting their mDying // Arenas are removed from the collection before setting their mDying
// flag. // flag.
MOZ_RELEASE_ASSERT(pr != ArenaPurgeResult::Dying); MOZ_RELEASE_ASSERT(pr != arena_t::PurgeResult::Dying);
} }
} }
} }

View File

@@ -172,10 +172,10 @@ struct DummyArenaAllocator {
static bool moz_enable_deferred_purge(bool aEnable) { return false; } static bool moz_enable_deferred_purge(bool aEnable) { return false; }
static may_purge_now_result_t moz_may_purge_now( static purge_result_t moz_may_purge_now(
bool aPeekOnly, uint32_t aReuseGraceMS, bool aPeekOnly, uint32_t aReuseGraceMS,
const mozilla::Maybe<std::function<bool()>>& aKeepGoing) { const mozilla::Maybe<std::function<bool()>>& aKeepGoing) {
return may_purge_now_result_t::Done; return purge_result_t::Done;
} }
#define MALLOC_DECL(name, return_type, ...) \ #define MALLOC_DECL(name, return_type, ...) \

View File

@@ -1,51 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef _MOZJEMALLOC_PROFILING_H
#define _MOZJEMALLOC_PROFILING_H
#include "mozilla/Atomics.h"
#include "mozilla/RefCounted.h"
#include "mozilla/RefPtr.h"
#include "mozilla/TimeStamp.h"
#include "mozjemalloc_types.h"
#include "mozmemory_wrap.h"
namespace mozilla {
struct PurgeStats {
arena_id_t arena_id;
const char* arena_label;
const char* caller;
size_t pages = 0;
size_t system_calls = 0;
size_t chunks = 0;
PurgeStats(arena_id_t aId, const char* aLabel, const char* aCaller)
: arena_id(aId), arena_label(aLabel), caller(aCaller) {}
};
#ifdef MOZJEMALLOC_PROFILING_CALLBACKS
class MallocProfilerCallbacks
: public external::AtomicRefCounted<MallocProfilerCallbacks> {
public:
virtual ~MallocProfilerCallbacks() {}
using TS = mozilla::TimeStamp;
virtual void OnPurge(TS aStart, TS aEnd, const PurgeStats& aStats,
ArenaPurgeResult aResult) = 0;
MOZ_DECLARE_REFCOUNTED_TYPENAME(ClassName);
};
MOZ_JEMALLOC_API void jemalloc_set_profiler_callbacks(
RefPtr<MallocProfilerCallbacks>&& aCallbacks);
#endif
} // namespace mozilla
#endif // ! _MOZJEMALLOC_PROFILING_H

View File

@@ -82,17 +82,12 @@ typedef struct arena_params_s {
uint32_t mFlags; uint32_t mFlags;
// The label will be copied into fixed-size storage (currently 128 bytes)
// within the arena. It may be null for unamed arenas
const char* mLabel;
#ifdef __cplusplus #ifdef __cplusplus
arena_params_s() arena_params_s()
: mMaxDirty(0), : mMaxDirty(0),
mMaxDirtyIncreaseOverride(0), mMaxDirtyIncreaseOverride(0),
mMaxDirtyDecreaseOverride(0), mMaxDirtyDecreaseOverride(0),
mFlags(0), mFlags(0) {}
mLabel(nullptr) {}
#endif #endif
} arena_params_t; } arena_params_t;
@@ -220,23 +215,8 @@ static inline bool jemalloc_ptr_is_freed_page(jemalloc_ptr_info_t* info) {
return info->tag == TagFreedPage; return info->tag == TagFreedPage;
} }
// The result of purging memory from a sigle arena // The result of a purge step.
enum ArenaPurgeResult { enum purge_result_t {
// The stop threshold of dirty pages was reached.
ReachedThreshold,
// There's more chunks in this arena that could be purged.
NotDone,
// The only chunks with dirty pages are busy being purged by other threads.
Busy,
// The arena needs to be destroyed by the caller.
Dying,
};
// The result of calling moz_may_purge_now().
enum may_purge_now_result_t {
// Done: No more purge requests are pending. // Done: No more purge requests are pending.
Done, Done,

View File

@@ -91,4 +91,3 @@ OS_LIBS += CONFIG["DL_LIBS"]
DisableStlWrapping() DisableStlWrapping()
include("/mozglue/build/replace_malloc.mozbuild") include("/mozglue/build/replace_malloc.mozbuild")
include("/mozglue/misc/timestamp.mozbuild")

View File

@@ -48,9 +48,12 @@ SOURCES += [
"Debug.cpp", "Debug.cpp",
"LoggingCore.cpp", "LoggingCore.cpp",
"MmapFaultHandler.cpp", "MmapFaultHandler.cpp",
"Now.cpp",
"Printf.cpp", "Printf.cpp",
"SIMD.cpp", "SIMD.cpp",
"StackWalk.cpp", "StackWalk.cpp",
"TimeStamp.cpp",
"Uptime.cpp",
] ]
if CONFIG["TARGET_CPU"].startswith("x86"): if CONFIG["TARGET_CPU"].startswith("x86"):
@@ -91,6 +94,7 @@ if CONFIG["OS_ARCH"] == "WINNT":
] ]
SOURCES += [ SOURCES += [
"GetKnownFolderPath.cpp", "GetKnownFolderPath.cpp",
"TimeStamp_windows.cpp",
"WindowsDiagnostics.cpp", "WindowsDiagnostics.cpp",
"WindowsDllMain.cpp", "WindowsDllMain.cpp",
"WindowsDpiInitialization.cpp", "WindowsDpiInitialization.cpp",
@@ -108,6 +112,17 @@ if CONFIG["OS_ARCH"] == "WINNT":
"PreXULSkeletonUI.cpp", "PreXULSkeletonUI.cpp",
] ]
elif CONFIG["OS_ARCH"] == "Darwin":
SOURCES += [
"TimeStamp_darwin.cpp",
]
elif CONFIG["HAVE_CLOCK_MONOTONIC"]:
SOURCES += [
"TimeStamp_posix.cpp",
]
elif CONFIG["COMPILE_ENVIRONMENT"]:
error("No TimeStamp implementation on this platform. Build will not succeed")
if CONFIG["OS_ARCH"] == "WINNT": if CONFIG["OS_ARCH"] == "WINNT":
SOURCES += [ SOURCES += [
"ConditionVariable_windows.cpp", "ConditionVariable_windows.cpp",
@@ -142,5 +157,3 @@ if CONFIG["CC_TYPE"] in ("clang", "clang-cl"):
for var in ("MOZ_APP_BASENAME", "MOZ_APP_VENDOR"): for var in ("MOZ_APP_BASENAME", "MOZ_APP_VENDOR"):
DEFINES[var] = '"%s"' % CONFIG[var] DEFINES[var] = '"%s"' % CONFIG[var]
include("/mozglue/misc/timestamp.mozbuild")

View File

@@ -1,25 +0,0 @@
# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
# vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SOURCES += [
"/mozglue/misc/Now.cpp",
"/mozglue/misc/TimeStamp.cpp",
"/mozglue/misc/Uptime.cpp",
]
if CONFIG["OS_TARGET"] == "WINNT":
SOURCES += [
"/mozglue/misc/TimeStamp_windows.cpp",
]
elif CONFIG["OS_TARGET"] == "Darwin":
SOURCES += [
"/mozglue/misc/TimeStamp_darwin.cpp",
]
elif CONFIG["HAVE_CLOCK_MONOTONIC"]:
SOURCES += [
"/mozglue/misc/TimeStamp_posix.cpp",
]
elif CONFIG["COMPILE_ENVIRONMENT"]:
error("No TimeStamp implementation on this platform. Build will not succeed")

View File

@@ -1,97 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "memory_markers.h"
#include "mozmemory.h"
#include "mozjemalloc_profiling.h"
#include "mozilla/RefPtr.h"
#include "mozilla/ProfilerMarkers.h"
namespace geckoprofiler::markers {
struct PurgeArenaMarker : mozilla::BaseMarkerType<PurgeArenaMarker> {
static constexpr const char* Name = "PurgeArena";
static constexpr const char* Description =
"Purge dirtied pages from the resident memory set";
using MS = mozilla::MarkerSchema;
using String8View = mozilla::ProfilerString8View;
static constexpr MS::PayloadField PayloadFields[] = {
{"id", MS::InputType::Uint32, "Arena Id", MS::Format::Integer},
{"label", MS::InputType::CString, "Arena", MS::Format::String},
{"caller", MS::InputType::CString, "Caller", MS::Format::String},
{"pages", MS::InputType::Uint32, "Number of pages", MS::Format::Integer},
{"syscalls", MS::InputType::Uint32, "Number of system calls",
MS::Format::Integer},
{"chunks", MS::InputType::Uint32, "Number of chunks processed",
MS::Format::Integer},
{"result", MS::InputType::CString, "Result", MS::Format::String}};
static void StreamJSONMarkerData(
mozilla::baseprofiler::SpliceableJSONWriter& aWriter, uint32_t aId,
const String8View& aLabel, const String8View& aCaller, uint32_t aPages,
uint32_t aSyscalls, uint32_t aChunks, const String8View& aResult) {
aWriter.IntProperty("id", aId);
aWriter.StringProperty("label", aLabel);
aWriter.StringProperty("caller", aCaller);
aWriter.IntProperty("pages", aPages);
aWriter.IntProperty("syscalls", aSyscalls);
aWriter.IntProperty("chunks", aChunks);
aWriter.StringProperty("result", aResult);
}
static constexpr MS::Location Locations[] = {MS::Location::MarkerChart,
MS::Location::MarkerTable};
};
} // namespace geckoprofiler::markers
namespace mozilla {
namespace profiler {
class GeckoProfilerMallocCallbacks : public MallocProfilerCallbacks {
public:
virtual void OnPurge(TimeStamp aStart, TimeStamp aEnd,
const PurgeStats& aStats,
ArenaPurgeResult aResult) override {
const char* result = nullptr;
switch (aResult) {
case ReachedThreshold:
result = "Reached dirty page threshold";
break;
case NotDone:
result = "Purge exited early (eg caller set a time budget)";
break;
case Busy:
result = "Last chunk is busy being purged on another thread";
break;
case Dying:
result = "Arena is being destroyed";
break;
}
PROFILER_MARKER(
"PurgeArena", GCCC, MarkerTiming::Interval(aStart, aEnd),
PurgeArenaMarker, aStats.arena_id,
ProfilerString8View::WrapNullTerminatedString(aStats.arena_label),
ProfilerString8View::WrapNullTerminatedString(aStats.caller),
aStats.pages, aStats.system_calls, aStats.chunks,
ProfilerString8View::WrapNullTerminatedString(result));
}
};
} // namespace profiler
void register_profiler_memory_callbacks() {
auto val = MakeRefPtr<profiler::GeckoProfilerMallocCallbacks>();
jemalloc_set_profiler_callbacks(val);
}
void unregister_profiler_memory_callbacks() {
jemalloc_set_profiler_callbacks(nullptr);
}
} // namespace mozilla

View File

@@ -1,19 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef memory_markers_h
#define memory_markers_h
#if defined(MOZ_PROFILER_MEMORY) && defined(MOZJEMALLOC_PROFILING_CALLBACKS)
namespace mozilla {
void register_profiler_memory_callbacks();
void unregister_profiler_memory_callbacks();
} // namespace mozilla
#endif
#endif

View File

@@ -60,7 +60,6 @@
#include "js/ProfilingFrameIterator.h" #include "js/ProfilingFrameIterator.h"
#include "memory_counter.h" #include "memory_counter.h"
#include "memory_hooks.h" #include "memory_hooks.h"
#include "memory_markers.h"
#include "mozilla/ArrayUtils.h" #include "mozilla/ArrayUtils.h"
#include "mozilla/AutoProfilerLabel.h" #include "mozilla/AutoProfilerLabel.h"
#include "mozilla/BaseAndGeckoProfilerDetail.h" #include "mozilla/BaseAndGeckoProfilerDetail.h"
@@ -1225,11 +1224,6 @@ class ActivePS {
} }
} }
#endif #endif
#if defined(MOZ_PROFILER_MEMORY) && defined(MOZJEMALLOC_PROFILING_CALLBACKS)
unregister_profiler_memory_callbacks();
#endif
if (mProfileBufferChunkManager) { if (mProfileBufferChunkManager) {
// We still control the chunk manager, remove it from the core buffer. // We still control the chunk manager, remove it from the core buffer.
profiler_get_core_buffer().ResetChunkManager(); profiler_get_core_buffer().ResetChunkManager();
@@ -6686,9 +6680,6 @@ static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity,
auto counter = mozilla::profiler::create_memory_counter(); auto counter = mozilla::profiler::create_memory_counter();
locked_profiler_add_sampled_counter(aLock, counter.get()); locked_profiler_add_sampled_counter(aLock, counter.get());
ActivePS::SetMemoryCounter(std::move(counter), aLock); ActivePS::SetMemoryCounter(std::move(counter), aLock);
# ifdef MOZJEMALLOC_PROFILING_CALLBACKS
register_profiler_memory_callbacks();
# endif
} }
#endif #endif

View File

@@ -33,11 +33,6 @@ if CONFIG["MOZ_GECKO_PROFILER"]:
UNIFIED_SOURCES += [ UNIFIED_SOURCES += [
"core/memory_counter.cpp", "core/memory_counter.cpp",
] ]
if CONFIG["NIGHTLY_BUILD"]:
DEFINES["MOZJEMALLOC_PROFILING_CALLBACKS"] = True
UNIFIED_SOURCES += [
"core/memory_markers.cpp",
]
if CONFIG["MOZ_REPLACE_MALLOC"] and CONFIG["MOZ_PROFILER_MEMORY"]: if CONFIG["MOZ_REPLACE_MALLOC"] and CONFIG["MOZ_PROFILER_MEMORY"]:
SOURCES += [ SOURCES += [
"core/memory_hooks.cpp", # Non-unified because of order of #includes "core/memory_hooks.cpp", # Non-unified because of order of #includes

View File

@@ -922,31 +922,7 @@ void ScheduleIdleMemoryCleanup(uint32_t aWantsLaterDelay) {
"TaskController::IdlePurgeRunner", TimeDuration(), maxPurgeDelay, "TaskController::IdlePurgeRunner", TimeDuration(), maxPurgeDelay,
minPurgeBudget, true, nullptr, nullptr); minPurgeBudget, true, nullptr, nullptr);
} }
} // namespace mozilla
namespace geckoprofiler::markers {
struct IdlePurgePeekMarker : mozilla::BaseMarkerType<IdlePurgePeekMarker> {
static constexpr const char* Name = "IdlePurgePeek";
static constexpr const char* Description = "Check if we should purge memory";
using MS = mozilla::MarkerSchema;
using String8View = mozilla::ProfilerString8View;
static constexpr MS::PayloadField PayloadFields[] = {
{"status", MS::InputType::CString, "Status", MS::Format::String}};
static void StreamJSONMarkerData(
mozilla::baseprofiler::SpliceableJSONWriter& aWriter,
const String8View& aStatus) {
aWriter.StringProperty("status", aStatus);
}
static constexpr MS::Location Locations[] = {MS::Location::MarkerChart,
MS::Location::MarkerTable};
};
} // namespace geckoprofiler::markers
namespace mozilla {
// Check if a purge needs to be scheduled now or later. // Check if a purge needs to be scheduled now or later.
// Both used as timer callback and directly from MayScheduleIdleMemoryCleanup. // Both used as timer callback and directly from MayScheduleIdleMemoryCleanup.
// //
@@ -958,8 +934,12 @@ namespace mozilla {
// (very cheap) check actually runs. // (very cheap) check actually runs.
// //
// aTimer: Not used // aTimer: Not used
// aClosure: Not used // aClosure: Supposed to point to a name literal to be used for profile
// markers.
void CheckIdleMemoryCleanupNeeded(nsITimer* aTimer, void* aClosure) { void CheckIdleMemoryCleanupNeeded(nsITimer* aTimer, void* aClosure) {
MOZ_ASSERT(aClosure);
const char* name = (const char*)aClosure;
uint32_t reuseGracePeriod = uint32_t reuseGracePeriod =
StaticPrefs::memory_lazypurge_reuse_grace_period(); StaticPrefs::memory_lazypurge_reuse_grace_period();
@@ -975,7 +955,7 @@ void CheckIdleMemoryCleanupNeeded(nsITimer* aTimer, void* aClosure) {
auto result = auto result =
moz_may_purge_now(/* aPeekOnly */ true, reuseGracePeriod, Nothing()); moz_may_purge_now(/* aPeekOnly */ true, reuseGracePeriod, Nothing());
switch (result) { switch (result) {
case may_purge_now_result_t::Done: case purge_result_t::Done:
// Currently we unqueue purge requests only: // Currently we unqueue purge requests only:
// if we run moz_may_purge_one_now with aPeekOnly==false and that happens // if we run moz_may_purge_one_now with aPeekOnly==false and that happens
// only in the IdleTaskRunner which cancels itself when done // only in the IdleTaskRunner which cancels itself when done
@@ -984,33 +964,29 @@ void CheckIdleMemoryCleanupNeeded(nsITimer* aTimer, void* aClosure) {
// jemalloc_free_(excess)_dirty_pages or moz_set_max_dirty_page_modifier) // jemalloc_free_(excess)_dirty_pages or moz_set_max_dirty_page_modifier)
// which can happen anytime. // which can happen anytime.
if (sIdleMemoryCleanupRunner || sIdleMemoryCleanupWantsLaterScheduled) { if (sIdleMemoryCleanupRunner || sIdleMemoryCleanupWantsLaterScheduled) {
PROFILER_MARKER("IdlePurgePeek", GCCC, MarkerTiming::InstantNow(), PROFILER_MARKER_TEXT(
IdlePurgePeekMarker, ProfilerString8View::WrapNullTerminatedString(name), OTHER, {},
ProfilerString8View::WrapNullTerminatedString( "Done (Cancel timer or runner)"_ns);
"Done (Cancel timer or runner)"));
CancelIdleMemoryCleanupTimerAndRunner(); CancelIdleMemoryCleanupTimerAndRunner();
} }
break; break;
case may_purge_now_result_t::WantsLater: case purge_result_t::WantsLater:
if (!sIdleMemoryCleanupWantsLaterScheduled) { if (!sIdleMemoryCleanupWantsLaterScheduled) {
PROFILER_MARKER( PROFILER_MARKER_TEXT(
"IdlePurgePeek", GCCC, MarkerTiming::InstantNow(), ProfilerString8View::WrapNullTerminatedString(name), OTHER, {},
IdlePurgePeekMarker, "WantsLater (First schedule of low priority timer)"_ns);
ProfilerString8View::WrapNullTerminatedString(
"WantsLater (First schedule of low priority timer)"));
} }
// We always want to (re-)schedule the timer to prevent it from firing // We always want to (re-)schedule the timer to prevent it from firing
// as much as possible. // as much as possible.
ScheduleWantsLaterTimer(wantsLaterDelay); ScheduleWantsLaterTimer(wantsLaterDelay);
break; break;
case may_purge_now_result_t::NeedsMore: case purge_result_t::NeedsMore:
// We can get here from the main thread going repeatedly idle after we // We can get here from the main thread going repeatedly idle after we
// already scheduled a runner. Just keep it. // already scheduled a runner. Just keep it.
if (!sIdleMemoryCleanupRunner) { if (!sIdleMemoryCleanupRunner) {
PROFILER_MARKER("IdlePurgePeek", GCCC, MarkerTiming::InstantNow(), PROFILER_MARKER_TEXT(
IdlePurgePeekMarker, ProfilerString8View::WrapNullTerminatedString(name), OTHER, {},
ProfilerString8View::WrapNullTerminatedString( "NeedsMore (Schedule as-soon-as-idle cleanup)"_ns);
"NeedsMore (Schedule as-soon-as-idle cleanup)"));
ScheduleIdleMemoryCleanup(wantsLaterDelay); ScheduleIdleMemoryCleanup(wantsLaterDelay);
} else { } else {
MOZ_ASSERT(!sIdleMemoryCleanupWantsLaterScheduled); MOZ_ASSERT(!sIdleMemoryCleanupWantsLaterScheduled);
@@ -1018,35 +994,6 @@ void CheckIdleMemoryCleanupNeeded(nsITimer* aTimer, void* aClosure) {
break; break;
} }
} }
} // namespace mozilla
namespace geckoprofiler::markers {
struct IdlePurgeMarker : mozilla::BaseMarkerType<IdlePurgeMarker> {
static constexpr const char* Name = "IdlePurge";
static constexpr const char* Description =
"Purge memory from mozjemalloc in idle time";
using MS = mozilla::MarkerSchema;
using String8View = mozilla::ProfilerString8View;
static constexpr MS::PayloadField PayloadFields[] = {
{"num_calls", MS::InputType::Uint32, "Number of PurgeNow() calls",
MS::Format::Integer},
{"next", MS::InputType::CString, "Last result", MS::Format::String}};
static void StreamJSONMarkerData(
mozilla::baseprofiler::SpliceableJSONWriter& aWriter, uint32_t aNumCalls,
const String8View& aLastResult) {
aWriter.IntProperty("num_calls", aNumCalls);
aWriter.StringProperty("last_result", aLastResult);
}
static constexpr MS::Location Locations[] = {MS::Location::MarkerChart,
MS::Location::MarkerTable};
};
} // namespace geckoprofiler::markers
namespace mozilla {
// Do some purging until our idle budget is used. // Do some purging until our idle budget is used.
// //
@@ -1061,44 +1008,39 @@ namespace mozilla {
// allowed to consume time. // allowed to consume time.
// aWantsLaterDelay: (Minimum) delay to be used for the WantsLater timer. // aWantsLaterDelay: (Minimum) delay to be used for the WantsLater timer.
bool RunIdleMemoryCleanup(TimeStamp aDeadline, uint32_t aWantsLaterDelay) { bool RunIdleMemoryCleanup(TimeStamp aDeadline, uint32_t aWantsLaterDelay) {
MOZ_ASSERT(!sIdleMemoryCleanupWantsLaterScheduled); AUTO_PROFILER_MARKER_TEXT("RunIdleMemoryCleanup", OTHER, {}, ""_ns);
TimeStamp start_time = TimeStamp::Now(); MOZ_ASSERT(!sIdleMemoryCleanupWantsLaterScheduled);
uint32_t num_calls = 0;
uint32_t reuseGracePeriod = uint32_t reuseGracePeriod =
StaticPrefs::memory_lazypurge_reuse_grace_period(); StaticPrefs::memory_lazypurge_reuse_grace_period();
may_purge_now_result_t result; purge_result_t result;
do { do {
num_calls++;
result = moz_may_purge_now( result = moz_may_purge_now(
/* aPeekOnly */ false, reuseGracePeriod, Some([aDeadline] { /* aPeekOnly */ false, reuseGracePeriod, Some([aDeadline] {
return aDeadline.IsNull() || TimeStamp::Now() <= aDeadline; return aDeadline.IsNull() || TimeStamp::Now() <= aDeadline;
})); }));
} while ((result == may_purge_now_result_t::NeedsMore) && } while ((result == purge_result_t::NeedsMore) &&
(aDeadline.IsNull() || TimeStamp::Now() <= aDeadline)); (aDeadline.IsNull() || TimeStamp::Now() <= aDeadline));
const char* last_result;
switch (result) { switch (result) {
case may_purge_now_result_t::Done: case purge_result_t::Done:
last_result = "Done (Cancel timer and runner)"; PROFILER_MARKER_TEXT("RunIdleMemoryCleanup", OTHER, {},
"Done (Cancel timer and runner)"_ns);
CancelIdleMemoryCleanupTimerAndRunner(); CancelIdleMemoryCleanupTimerAndRunner();
break; break;
case may_purge_now_result_t::WantsLater: case purge_result_t::WantsLater:
last_result = "WantsLater (First schedule of low priority timer)"; PROFILER_MARKER_TEXT(
"RunIdleMemoryCleanup", OTHER, {},
"WantsLater (First schedule of low priority timer)"_ns);
ScheduleWantsLaterTimer(aWantsLaterDelay); ScheduleWantsLaterTimer(aWantsLaterDelay);
break; break;
case may_purge_now_result_t::NeedsMore: case purge_result_t::NeedsMore:
last_result = "NeedsMore (wait for next idle slice)"; PROFILER_MARKER_TEXT("RunIdleMemoryCleanup", OTHER, {},
"NeedsMore (wait for next idle slice)."_ns);
break; break;
} }
PROFILER_MARKER("IdlePurge", GCCC,
MarkerTiming::IntervalUntilNowFrom(start_time),
IdlePurgeMarker, num_calls,
ProfilerString8View::WrapNullTerminatedString(last_result));
return true; return true;
}; };