Bug 958317 - HTTP cache v2: allow yield to more priority levels in IOThread, r=michal

This commit is contained in:
Honza Bambas
2014-02-27 00:11:42 +01:00
parent 3070b230c5
commit c41d5bcdc7
5 changed files with 106 additions and 104 deletions

View File

@@ -42,9 +42,7 @@ namespace net {
#define kOpenHandlesLimit 64
#define kMetadataWriteDelay 5000
#define kEvictionLoopLimit 40 // in milliseconds
#define kRemoveTrashStartDelay 60000 // in milliseconds
#define kRemoveTrashLoopLimit 40 // in milliseconds
bool
CacheFileHandle::DispatchRelease()
@@ -2353,7 +2351,7 @@ CacheFileIOManager::OverLimitEvictionInternal()
MOZ_ASSERT(mIOThread->IsCurrentThread());
// mOverLimitEvicting is accessed only on IO thread, so we can set it to false
// here and set ti to true again once we dispatch another event that will
// here and set it to true again once we dispatch another event that will
// continue with the eviction. The reason why we do so is that we can fail
// early anywhere in this method and the variable will contain a correct
// value. Otherwise we would need to set it to false on every failing place.
@@ -2363,8 +2361,6 @@ CacheFileIOManager::OverLimitEvictionInternal()
return NS_ERROR_NOT_INITIALIZED;
}
TimeStamp start;
while (true) {
uint32_t cacheUsage;
rv = CacheIndex::GetCacheSize(&cacheUsage);
@@ -2380,15 +2376,11 @@ CacheFileIOManager::OverLimitEvictionInternal()
LOG(("CacheFileIOManager::OverLimitEvictionInternal() - Cache size over "
"limit. [cacheSize=%u, limit=%u]", cacheUsage, cacheLimit));
if (start.IsNull()) {
start = TimeStamp::NowLoRes();
} else {
TimeDuration elapsed = TimeStamp::NowLoRes() - start;
if (elapsed.ToMilliseconds() >= kEvictionLoopLimit) {
if (CacheIOThread::YieldAndRerun()) {
LOG(("CacheFileIOManager::OverLimitEvictionInternal() - Breaking loop "
"after %u ms.", static_cast<uint32_t>(elapsed.ToMilliseconds())));
break;
}
"for higher level events."));
mOverLimitEvicting = true;
return NS_OK;
}
SHA1Sum::Hash hash;
@@ -2443,14 +2435,7 @@ CacheFileIOManager::OverLimitEvictionInternal()
}
}
nsCOMPtr<nsIRunnable> ev;
ev = NS_NewRunnableMethod(this,
&CacheFileIOManager::OverLimitEvictionInternal);
rv = mIOThread->Dispatch(ev, CacheIOThread::EVICT);
NS_ENSURE_SUCCESS(rv, rv);
mOverLimitEvicting = true;
NS_NOTREACHED("We should never get here");
return NS_OK;
}
@@ -2628,20 +2613,12 @@ CacheFileIOManager::RemoveTrashInternal()
// we don't have to drop the flag on any possible early return.
mRemovingTrashDirs = false;
TimeStamp start;
while (true) {
if (start.IsNull()) {
start = TimeStamp::NowLoRes();
} else {
static TimeDuration const kLimit = TimeDuration::FromMilliseconds(
kRemoveTrashLoopLimit);
TimeDuration elapsed = TimeStamp::NowLoRes() - start;
if (elapsed >= kLimit) {
LOG(("CacheFileIOManager::RemoveTrashInternal() - Breaking loop after "
"%u ms.", static_cast<uint32_t>(elapsed.ToMilliseconds())));
break;
}
if (CacheIOThread::YieldAndRerun()) {
LOG(("CacheFileIOManager::RemoveTrashInternal() - Breaking loop for "
"higher level events."));
mRemovingTrashDirs = true;
return NS_OK;
}
// Find some trash directory
@@ -2709,14 +2686,7 @@ CacheFileIOManager::RemoveTrashInternal()
}
}
nsCOMPtr<nsIRunnable> ev;
ev = NS_NewRunnableMethod(this,
&CacheFileIOManager::RemoveTrashInternal);
rv = mIOThread->Dispatch(ev, CacheIOThread::EVICT);
NS_ENSURE_SUCCESS(rv, rv);
mRemovingTrashDirs = true;
NS_NOTREACHED("We should never get here");
return NS_OK;
}

View File

@@ -14,19 +14,25 @@
namespace mozilla {
namespace net {
CacheIOThread* CacheIOThread::sSelf = nullptr;
NS_IMPL_ISUPPORTS1(CacheIOThread, nsIThreadObserver)
CacheIOThread::CacheIOThread()
: mMonitor("CacheIOThread")
, mThread(nullptr)
, mLowestLevelWaiting(LAST_LEVEL)
, mCurrentlyExecutingLevel(0)
, mHasXPCOMEvents(false)
, mRerunCurrentEvent(false)
, mShutdown(false)
{
sSelf = this;
}
CacheIOThread::~CacheIOThread()
{
sSelf = nullptr;
#ifdef DEBUG
for (uint32_t level = 0; level < LAST_LEVEL; ++level) {
MOZ_ASSERT(!mEventQueue[level].Length());
@@ -68,6 +74,27 @@ bool CacheIOThread::IsCurrentThread()
return mThread == PR_GetCurrentThread();
}
bool CacheIOThread::YieldInternal()
{
if (!IsCurrentThread()) {
NS_WARNING("Trying to yield to priority events on non-cache2 I/O thread? "
"You probably do something wrong.");
return false;
}
if (mCurrentlyExecutingLevel == XPCOM_LEVEL) {
// Doesn't make any sense, since this handler is the one
// that would be executed as the next one.
return false;
}
if (!EventsPending(mCurrentlyExecutingLevel))
return false;
mRerunCurrentEvent = true;
return true;
}
nsresult CacheIOThread::Shutdown()
{
{
@@ -137,6 +164,8 @@ loopStart:
"net::cache::io::level(xpcom)");
mHasXPCOMEvents = false;
mCurrentlyExecutingLevel = XPCOM_LEVEL;
MonitorAutoUnlock unlock(mMonitor);
bool processedEvent;
@@ -204,6 +233,8 @@ void CacheIOThread::LoopOneLevel(uint32_t aLevel)
events.SwapElements(mEventQueue[aLevel]);
uint32_t length = events.Length();
mCurrentlyExecutingLevel = aLevel;
bool returnEvents = false;
uint32_t index;
{
@@ -217,7 +248,19 @@ void CacheIOThread::LoopOneLevel(uint32_t aLevel)
break;
}
// Drop any previous flagging, only an event on the current level may set
// this flag.
mRerunCurrentEvent = false;
events[index]->Run();
if (mRerunCurrentEvent) {
// The event handler yields to higher priority events and wants to rerun.
returnEvents = true;
break;
}
// Release outside the lock.
events[index] = nullptr;
}
}

View File

@@ -36,12 +36,32 @@ public:
CLOSE,
BUILD_OR_UPDATE_INDEX,
EVICT,
LAST_LEVEL
LAST_LEVEL,
// This is actually executed as the first level, but we want this enum
// value merely as an indicator while other values are used as indexes
// to the queue array. Hence put at end and not as the first.
XPCOM_LEVEL
};
nsresult Init();
nsresult Dispatch(nsIRunnable* aRunnable, uint32_t aLevel);
bool IsCurrentThread();
/**
* Callable only on this thread, checks if there is an event waiting in
* the event queue with a higher execution priority. If so, the result
* is true and the current event handler should break it's work and return
* from Run() method immediately. The event handler will be rerun again
* when all more priority events are processed. Events pending after this
* handler (i.e. the one that called YieldAndRerun()) will not execute sooner
* then this handler is executed w/o a call to YieldAndRerun().
*/
static bool YieldAndRerun()
{
return sSelf ? sSelf->YieldInternal() : false;
}
nsresult Shutdown();
already_AddRefed<nsIEventTarget> Target();
@@ -54,14 +74,19 @@ private:
void ThreadFunc();
void LoopOneLevel(uint32_t aLevel);
bool EventsPending(uint32_t aLastLevel = LAST_LEVEL);
bool YieldInternal();
static CacheIOThread* sSelf;
mozilla::Monitor mMonitor;
PRThread* mThread;
nsCOMPtr<nsIThread> mXPCOMThread;
uint32_t mLowestLevelWaiting;
uint32_t mCurrentlyExecutingLevel;
nsTArray<nsRefPtr<nsIRunnable> > mEventQueue[LAST_LEVEL];
bool mHasXPCOMEvents;
bool mRerunCurrentEvent;
bool mShutdown;
};

View File

@@ -25,8 +25,6 @@
#define kIndexVersion 0x00000001
#define kBuildIndexStartDelay 10000 // in milliseconds
#define kUpdateIndexStartDelay 10000 // in milliseconds
#define kBuildIndexLoopLimit 40 // in milliseconds
#define kUpdateIndexLoopLimit 40 // in milliseconds
const char kIndexName[] = "index";
const char kTempIndexName[] = "index.tmp";
@@ -2369,20 +2367,10 @@ CacheIndex::BuildIndex()
}
}
TimeStamp start;
while (true) {
if (start.IsNull()) {
start = TimeStamp::NowLoRes();
} else {
static TimeDuration const kLimit = TimeDuration::FromMilliseconds(
kBuildIndexLoopLimit);
TimeDuration elapsed = TimeStamp::NowLoRes() - start;
if (elapsed >= kLimit) {
LOG(("CacheIndex::BuildIndex() - Breaking loop after %u ms.",
static_cast<uint32_t>(elapsed.ToMilliseconds())));
break;
}
if (CacheIOThread::YieldAndRerun()) {
LOG(("CacheIndex::BuildIndex() - Breaking loop for higher level events."));
return;
}
nsCOMPtr<nsIFile> file;
@@ -2484,16 +2472,7 @@ CacheIndex::BuildIndex()
}
}
nsRefPtr<CacheIOThread> ioThread = CacheFileIOManager::IOThread();
MOZ_ASSERT(ioThread);
rv = ioThread->Dispatch(this, CacheIOThread::BUILD_OR_UPDATE_INDEX);
if (NS_FAILED(rv)) {
NS_WARNING("CacheIndex::BuildIndex() - Can't dispatch event");
LOG(("CacheIndex::BuildIndex() - Can't dispatch event" ));
FinishBuild(false);
return;
}
NS_NOTREACHED("We should never get here");
}
void
@@ -2620,20 +2599,11 @@ CacheIndex::UpdateIndex()
}
}
TimeStamp start;
while (true) {
if (start.IsNull()) {
start = TimeStamp::NowLoRes();
} else {
static TimeDuration const kLimit = TimeDuration::FromMilliseconds(
kUpdateIndexLoopLimit);
TimeDuration elapsed = TimeStamp::NowLoRes() - start;
if (elapsed >= kLimit) {
LOG(("CacheIndex::UpdateIndex() - Breaking loop after %u ms.",
static_cast<uint32_t>(elapsed.ToMilliseconds())));
break;
}
if (CacheIOThread::YieldAndRerun()) {
LOG(("CacheIndex::UpdateIndex() - Breaking loop for higher level "
"events."));
return;
}
nsCOMPtr<nsIFile> file;
@@ -2769,16 +2739,7 @@ CacheIndex::UpdateIndex()
}
}
nsRefPtr<CacheIOThread> ioThread = CacheFileIOManager::IOThread();
MOZ_ASSERT(ioThread);
rv = ioThread->Dispatch(this, CacheIOThread::BUILD_OR_UPDATE_INDEX);
if (NS_FAILED(rv)) {
NS_WARNING("CacheIndex::UpdateIndex() - Can't dispatch event");
LOG(("CacheIndex::UpdateIndex() - Can't dispatch event" ));
FinishUpdate(false);
return;
}
NS_NOTREACHED("We should never get here");
}
void

View File

@@ -638,13 +638,11 @@ nsresult CacheFilesDeletor::Execute()
}
nsresult rv;
TimeStamp start;
switch (mMode) {
case ALL:
case DOOMED:
// Simply delete all files, don't doom then though the backend
start = TimeStamp::NowLoRes();
while (mEnumerator->HasMore()) {
nsCOMPtr<nsIFile> file;
@@ -665,15 +663,9 @@ nsresult CacheFilesDeletor::Execute()
++mRunning;
if (!(mRunning % (1 << 5)) && mEnumerator->HasMore()) {
TimeStamp now(TimeStamp::NowLoRes());
#define DELETOR_LOOP_LIMIT_MS 200
static TimeDuration const kLimitms = TimeDuration::FromMilliseconds(DELETOR_LOOP_LIMIT_MS);
if ((now - start) > kLimitms) {
LOG((" deleted %u files, breaking %dms loop", mRunning, DELETOR_LOOP_LIMIT_MS));
rv = mIOThread->Dispatch(this, CacheIOThread::EVICT);
return rv;
}
if (CacheIOThread::YieldAndRerun()) {
LOG((" deleted %u files, breaking loop for higher level events."));
return NS_OK;
}
}
@@ -1066,7 +1058,9 @@ CacheStorageService::PurgeOverMemoryLimit()
LOG((" purging took %1.2fms", (TimeStamp::Now() - start).ToMilliseconds()));
mPurging = false;
// When we exit because of yield, leave the flag so this event is not reposted
// from OnMemoryConsumptionChange unnecessarily until we are dequeued again.
mPurging = CacheIOThread::YieldAndRerun();
}
void
@@ -1080,6 +1074,9 @@ CacheStorageService::PurgeExpired()
uint32_t const memoryLimit = CacheObserver::MemoryLimit();
for (uint32_t i = 0; mMemorySize > memoryLimit && i < mExpirationArray.Length();) {
if (CacheIOThread::YieldAndRerun())
return;
nsRefPtr<CacheEntry> entry = mExpirationArray[i];
uint32_t expirationTime = entry->GetExpirationTime();
@@ -1109,6 +1106,9 @@ CacheStorageService::PurgeByFrecency(bool &aFrecencyNeedsSort, uint32_t aWhat)
uint32_t const memoryLimit = CacheObserver::MemoryLimit();
for (uint32_t i = 0; mMemorySize > memoryLimit && i < mFrecencyArray.Length();) {
if (CacheIOThread::YieldAndRerun())
return;
nsRefPtr<CacheEntry> entry = mFrecencyArray[i];
if (entry->Purge(aWhat)) {
@@ -1129,6 +1129,9 @@ CacheStorageService::PurgeAll(uint32_t aWhat)
MOZ_ASSERT(IsOnManagementThread());
for (uint32_t i = 0; i < mFrecencyArray.Length();) {
if (CacheIOThread::YieldAndRerun())
return;
nsRefPtr<CacheEntry> entry = mFrecencyArray[i];
if (entry->Purge(aWhat)) {