Bug 1291292 - Use dynamic chunk allocation for the nursery r=terrence

This commit is contained in:
Jon Coppeard 2016-08-11 17:14:56 +01:00
parent 7acfb399ba
commit 17304689a2
12 changed files with 261 additions and 196 deletions

View file

@ -283,7 +283,6 @@ struct GCSizes
#define FOR_EACH_SIZE(macro) \ #define FOR_EACH_SIZE(macro) \
macro(_, MallocHeap, marker) \ macro(_, MallocHeap, marker) \
macro(_, NonHeap, nurseryCommitted) \ macro(_, NonHeap, nurseryCommitted) \
macro(_, NonHeap, nurseryDecommitted) \
macro(_, MallocHeap, nurseryMallocedBuffers) \ macro(_, MallocHeap, nurseryMallocedBuffers) \
macro(_, MallocHeap, storeBufferVals) \ macro(_, MallocHeap, storeBufferVals) \
macro(_, MallocHeap, storeBufferCells) \ macro(_, MallocHeap, storeBufferCells) \

View file

@ -260,30 +260,6 @@ GCRuntime::checkIncrementalZoneState(ExclusiveContext* cx, T* t)
// /////////// Arena -> Thing Allocator ////////////////////////////////////// // /////////// Arena -> Thing Allocator //////////////////////////////////////
// After pulling a Chunk out of the empty chunks pool, we want to run the
// background allocator to refill it. The code that takes Chunks does so under
// the GC lock. We need to start the background allocation under the helper
// threads lock. To avoid lock inversion we have to delay the start until after
// we are outside the GC lock. This class handles that delay automatically.
class MOZ_RAII js::gc::AutoMaybeStartBackgroundAllocation
{
JSRuntime* runtime;
public:
AutoMaybeStartBackgroundAllocation()
: runtime(nullptr)
{}
void tryToStartBackgroundAllocation(JSRuntime* rt) {
runtime = rt;
}
~AutoMaybeStartBackgroundAllocation() {
if (runtime)
runtime->gc.startBackgroundAllocTaskIfIdle();
}
};
void void
GCRuntime::startBackgroundAllocTaskIfIdle() GCRuntime::startBackgroundAllocTaskIfIdle()
{ {
@ -547,7 +523,7 @@ GCRuntime::getOrAllocChunk(const AutoLockGC& lock,
} }
if (wantBackgroundAllocation(lock)) if (wantBackgroundAllocation(lock))
maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt); maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt->gc);
return chunk; return chunk;
} }

View file

@ -1387,6 +1387,30 @@ class MOZ_RAII AutoEnterIteration {
} }
}; };
// After pulling a Chunk out of the empty chunks pool, we want to run the
// background allocator to refill it. The code that takes Chunks does so under
// the GC lock. We need to start the background allocation under the helper
// threads lock. To avoid lock inversion we have to delay the start until after
// we are outside the GC lock. This class handles that delay automatically.
class MOZ_RAII AutoMaybeStartBackgroundAllocation
{
GCRuntime* gc;
public:
AutoMaybeStartBackgroundAllocation()
: gc(nullptr)
{}
void tryToStartBackgroundAllocation(GCRuntime& gc) {
this->gc = &gc;
}
~AutoMaybeStartBackgroundAllocation() {
if (gc)
gc->startBackgroundAllocTaskIfIdle();
}
};
#ifdef JS_GC_ZEAL #ifdef JS_GC_ZEAL
inline bool inline bool

View file

@ -1002,7 +1002,7 @@ struct Chunk
void decommitAllArenasWithoutUnlocking(const AutoLockGC& lock); void decommitAllArenasWithoutUnlocking(const AutoLockGC& lock);
static Chunk* allocate(JSRuntime* rt); static Chunk* allocate(JSRuntime* rt);
inline void init(JSRuntime* rt); void init(JSRuntime* rt);
private: private:
void decommitAllArenas(JSRuntime* rt); void decommitAllArenas(JSRuntime* rt);

View file

@ -82,16 +82,41 @@ struct js::Nursery::Canary
}; };
#endif #endif
inline void
js::Nursery::NurseryChunk::poisonAndInit(JSRuntime* rt, uint8_t poison)
{
JS_POISON(this, poison, ChunkSize);
init(rt);
}
inline void
js::Nursery::NurseryChunk::init(JSRuntime* rt)
{
new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer);
}
/* static */ inline js::Nursery::NurseryChunk*
js::Nursery::NurseryChunk::fromChunk(Chunk* chunk)
{
return reinterpret_cast<NurseryChunk*>(chunk);
}
inline Chunk*
js::Nursery::NurseryChunk::toChunk(JSRuntime* rt)
{
auto chunk = reinterpret_cast<Chunk*>(this);
chunk->init(rt);
return chunk;
}
js::Nursery::Nursery(JSRuntime* rt) js::Nursery::Nursery(JSRuntime* rt)
: runtime_(rt) : runtime_(rt)
, position_(0) , position_(0)
, currentStart_(0) , currentStartChunk_(0)
, currentStartPosition_(0)
, currentEnd_(0) , currentEnd_(0)
, heapStart_(0)
, heapEnd_(0)
, currentChunk_(0) , currentChunk_(0)
, numActiveChunks_(0) , maxNurseryChunks_(0)
, numNurseryChunks_(0)
, previousPromotionRate_(0) , previousPromotionRate_(0)
, profileThreshold_(0) , profileThreshold_(0)
, enableProfiling_(false) , enableProfiling_(false)
@ -104,13 +129,13 @@ js::Nursery::Nursery(JSRuntime* rt)
{} {}
bool bool
js::Nursery::init(uint32_t maxNurseryBytes) js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGC& lock)
{ {
/* maxNurseryBytes parameter is rounded down to a multiple of chunk size. */ /* maxNurseryBytes parameter is rounded down to a multiple of chunk size. */
numNurseryChunks_ = maxNurseryBytes >> ChunkShift; maxNurseryChunks_ = maxNurseryBytes >> ChunkShift;
/* If no chunks are specified then the nursery is permenantly disabled. */ /* If no chunks are specified then the nursery is permenantly disabled. */
if (numNurseryChunks_ == 0) if (maxNurseryChunks_ == 0)
return true; return true;
if (!mallocedBuffers.init()) if (!mallocedBuffers.init())
@ -119,21 +144,16 @@ js::Nursery::init(uint32_t maxNurseryBytes)
if (!cellsWithUid_.init()) if (!cellsWithUid_.init())
return false; return false;
void* heap = MapAlignedPages(nurserySize(), Alignment);
if (!heap)
return false;
freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp()); freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init()) if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init())
return false; return false;
heapStart_ = uintptr_t(heap); updateNumChunksLocked(1, lock);
heapEnd_ = heapStart_ + nurserySize(); if (numChunks() == 0)
currentStart_ = start(); return false;
numActiveChunks_ = numNurseryChunks_;
JS_POISON(heap, JS_FRESH_NURSERY_PATTERN, nurserySize());
updateNumActiveChunks(1);
setCurrentChunk(0); setCurrentChunk(0);
setStartPosition();
char* env = getenv("JS_GC_PROFILE_NURSERY"); char* env = getenv("JS_GC_PROFILE_NURSERY");
if (env) { if (env) {
@ -150,15 +170,16 @@ js::Nursery::init(uint32_t maxNurseryBytes)
PodZero(&profileTimes_); PodZero(&profileTimes_);
PodZero(&totalTimes_); PodZero(&totalTimes_);
if (!runtime()->gc.storeBuffer.enable())
return false;
MOZ_ASSERT(isEnabled()); MOZ_ASSERT(isEnabled());
return true; return true;
} }
js::Nursery::~Nursery() js::Nursery::~Nursery()
{ {
if (start()) disable();
UnmapPages((void*)start(), nurserySize());
js_delete(freeMallocedBuffersTask); js_delete(freeMallocedBuffersTask);
} }
@ -169,13 +190,20 @@ js::Nursery::enable()
MOZ_ASSERT(!runtime()->gc.isVerifyPreBarriersEnabled()); MOZ_ASSERT(!runtime()->gc.isVerifyPreBarriersEnabled());
if (isEnabled()) if (isEnabled())
return; return;
updateNumActiveChunks(1);
updateNumChunks(1);
if (numChunks() == 0)
return;
setCurrentChunk(0); setCurrentChunk(0);
currentStart_ = position(); setStartPosition();
#ifdef JS_GC_ZEAL #ifdef JS_GC_ZEAL
if (runtime()->hasZealMode(ZealMode::GenerationalGC)) if (runtime()->hasZealMode(ZealMode::GenerationalGC))
enterZealMode(); enterZealMode();
#endif #endif
MOZ_ALWAYS_TRUE(runtime()->gc.storeBuffer.enable());
return;
} }
void void
@ -184,8 +212,9 @@ js::Nursery::disable()
MOZ_ASSERT(isEmpty()); MOZ_ASSERT(isEmpty());
if (!isEnabled()) if (!isEnabled())
return; return;
updateNumActiveChunks(0); updateNumChunks(0);
currentEnd_ = 0; currentEnd_ = 0;
runtime()->gc.storeBuffer.disable();
} }
bool bool
@ -194,15 +223,19 @@ js::Nursery::isEmpty() const
MOZ_ASSERT(runtime_); MOZ_ASSERT(runtime_);
if (!isEnabled()) if (!isEnabled())
return true; return true;
MOZ_ASSERT_IF(!runtime_->hasZealMode(ZealMode::GenerationalGC), currentStart_ == start());
return position() == currentStart_; if (!runtime_->hasZealMode(ZealMode::GenerationalGC)) {
MOZ_ASSERT(currentStartChunk_ == 0);
MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
}
return position() == currentStartPosition_;
} }
#ifdef JS_GC_ZEAL #ifdef JS_GC_ZEAL
void void
js::Nursery::enterZealMode() { js::Nursery::enterZealMode() {
if (isEnabled()) if (isEnabled())
numActiveChunks_ = numNurseryChunks_; updateNumChunks(maxNurseryChunks_);
} }
void void
@ -210,7 +243,7 @@ js::Nursery::leaveZealMode() {
if (isEnabled()) { if (isEnabled()) {
MOZ_ASSERT(isEmpty()); MOZ_ASSERT(isEmpty());
setCurrentChunk(0); setCurrentChunk(0);
currentStart_ = start(); setStartPosition();
} }
} }
#endif // JS_GC_ZEAL #endif // JS_GC_ZEAL
@ -260,7 +293,7 @@ js::Nursery::allocate(size_t size)
{ {
MOZ_ASSERT(isEnabled()); MOZ_ASSERT(isEnabled());
MOZ_ASSERT(!runtime()->isHeapBusy()); MOZ_ASSERT(!runtime()->isHeapBusy());
MOZ_ASSERT(position() >= currentStart_); MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_, position() >= currentStartPosition_);
MOZ_ASSERT(position() % gc::CellSize == 0); MOZ_ASSERT(position() % gc::CellSize == 0);
MOZ_ASSERT(size % gc::CellSize == 0); MOZ_ASSERT(size % gc::CellSize == 0);
@ -271,7 +304,7 @@ js::Nursery::allocate(size_t size)
#endif #endif
if (currentEnd() < position() + size) { if (currentEnd() < position() + size) {
if (currentChunk_ + 1 == numActiveChunks_) if (currentChunk_ + 1 == numChunks())
return nullptr; return nullptr;
setCurrentChunk(currentChunk_ + 1); setCurrentChunk(currentChunk_ + 1);
} }
@ -366,10 +399,10 @@ Nursery::setForwardingPointer(void* oldData, void* newData, bool direct)
{ {
MOZ_ASSERT(isInside(oldData)); MOZ_ASSERT(isInside(oldData));
// Bug 1196210: If a zero-capacity header lands in the last 2 words of the // Bug 1196210: If a zero-capacity header lands in the last 2 words of a
// jemalloc chunk abutting the start of the nursery, the (invalid) newData // jemalloc chunk abutting the start of a nursery chunk, the (invalid)
// pointer will appear to be "inside" the nursery. // newData pointer will appear to be "inside" the nursery.
MOZ_ASSERT(!isInside(newData) || uintptr_t(newData) == heapStart_); MOZ_ASSERT(!isInside(newData) || (uintptr_t(newData) & ChunkMask) == 0);
if (direct) { if (direct) {
*reinterpret_cast<void**>(oldData) = newData; *reinterpret_cast<void**>(oldData) = newData;
@ -539,7 +572,7 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
AutoDisableProxyCheck disableStrictProxyChecking(rt); AutoDisableProxyCheck disableStrictProxyChecking(rt);
mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion; mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
size_t initialUsedSpace = position() - start(); size_t initialUsedSpace = usedSpace();
// Move objects pointed to by roots from the nursery to the major heap. // Move objects pointed to by roots from the nursery to the major heap.
TenuringTracer mover(rt, this); TenuringTracer mover(rt, this);
@ -697,10 +730,10 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
printProfileHeader(); printProfileHeader();
} }
fprintf(stderr, "MinorGC: %20s %5.1f%% %4d ", fprintf(stderr, "MinorGC: %20s %5.1f%% %4u ",
JS::gcreason::ExplainReason(reason), JS::gcreason::ExplainReason(reason),
promotionRate * 100, promotionRate * 100,
numActiveChunks_); numChunks());
printProfileTimes(profileTimes_); printProfileTimes(profileTimes_);
} }
} }
@ -768,40 +801,69 @@ js::Nursery::sweep()
#ifdef JS_GC_ZEAL #ifdef JS_GC_ZEAL
/* Poison the nursery contents so touching a freed object will crash. */ /* Poison the nursery contents so touching a freed object will crash. */
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, nurserySize()); for (unsigned i = 0; i < numChunks(); i++)
for (int i = 0; i < numNurseryChunks_; ++i) chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
initChunk(i);
if (runtime()->hasZealMode(ZealMode::GenerationalGC)) { if (runtime()->hasZealMode(ZealMode::GenerationalGC)) {
MOZ_ASSERT(numActiveChunks_ == numNurseryChunks_);
/* Only reset the alloc point when we are close to the end. */ /* Only reset the alloc point when we are close to the end. */
if (currentChunk_ + 1 == numNurseryChunks_) if (currentChunk_ + 1 == numChunks())
setCurrentChunk(0); setCurrentChunk(0);
} else } else
#endif #endif
{ {
#ifdef JS_CRASH_DIAGNOSTICS #ifdef JS_CRASH_DIAGNOSTICS
JS_POISON((void*)start(), JS_SWEPT_NURSERY_PATTERN, allocationEnd() - start()); for (unsigned i = 0; i < numChunks(); ++i)
for (int i = 0; i < numActiveChunks_; ++i) chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
initChunk(i);
#endif #endif
setCurrentChunk(0); setCurrentChunk(0);
} }
/* Set current start position for isEmpty checks. */ /* Set current start position for isEmpty checks. */
currentStart_ = position(); setStartPosition();
MemProfiler::SweepNursery(runtime()); MemProfiler::SweepNursery(runtime());
} }
size_t
js::Nursery::usedSpace() const
{
MOZ_ASSERT(currentChunk_ >= currentStartChunk_);
MOZ_ASSERT(currentStartPosition_ - chunk(currentStartChunk_).start() <= NurseryChunkUsableSize);
MOZ_ASSERT(position_ - chunk(currentChunk_).start() <= NurseryChunkUsableSize);
if (currentChunk_ == currentStartChunk_)
return position_ - currentStartPosition_;
size_t bytes = (chunk(currentStartChunk_).end() - currentStartPosition_) +
((currentChunk_ - currentStartChunk_ - 1) * NurseryChunkUsableSize) +
position_ - chunk(currentChunk_).start();
MOZ_ASSERT(bytes <= numChunks() * NurseryChunkUsableSize);
return bytes;
}
MOZ_ALWAYS_INLINE void
js::Nursery::setCurrentChunk(unsigned chunkno)
{
MOZ_ASSERT(chunkno < maxChunks());
MOZ_ASSERT(chunkno < numChunks());
currentChunk_ = chunkno;
position_ = chunk(chunkno).start();
currentEnd_ = chunk(chunkno).end();
chunk(chunkno).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
}
MOZ_ALWAYS_INLINE void
js::Nursery::setStartPosition()
{
currentStartChunk_ = currentChunk_;
currentStartPosition_ = position();
}
void void
js::Nursery::growAllocableSpace() js::Nursery::growAllocableSpace()
{ {
#ifdef JS_GC_ZEAL updateNumChunks(Min(numChunks() * 2, maxNurseryChunks_));
MOZ_ASSERT_IF(runtime()->hasZealMode(ZealMode::GenerationalGC),
numActiveChunks_ == numNurseryChunks_);
#endif
updateNumActiveChunks(Min(numActiveChunks_ * 2, numNurseryChunks_));
} }
void void
@ -811,30 +873,51 @@ js::Nursery::shrinkAllocableSpace()
if (runtime()->hasZealMode(ZealMode::GenerationalGC)) if (runtime()->hasZealMode(ZealMode::GenerationalGC))
return; return;
#endif #endif
updateNumActiveChunks(Max(numActiveChunks_ - 1, 1)); updateNumChunks(Max(numChunks() - 1, 1u));
} }
void void
js::Nursery::updateNumActiveChunks(int newCount) js::Nursery::updateNumChunks(unsigned newCount)
{ {
#ifndef JS_GC_ZEAL if (numChunks() != newCount) {
int priorChunks = numActiveChunks_; AutoLockGC lock(runtime());
#endif updateNumChunksLocked(newCount, lock);
numActiveChunks_ = newCount; }
}
// In zeal mode, we want to keep the unused memory poisoned so that we
// will crash sooner. Avoid decommit in that case to avoid having the void
// system zero the pages. js::Nursery::updateNumChunksLocked(unsigned newCount, AutoLockGC& lock)
#ifndef JS_GC_ZEAL {
if (numActiveChunks_ < priorChunks) { // The GC nursery is an optimization and so if we fail to allocate nursery
uintptr_t decommitStart = chunk(numActiveChunks_).start(); // chunks we do not report an error.
uintptr_t decommitSize = chunk(priorChunks - 1).start() + ChunkSize - decommitStart;
MOZ_ASSERT(decommitSize != 0); unsigned priorCount = numChunks();
MOZ_ASSERT(decommitStart == AlignBytes(decommitStart, Alignment)); MOZ_ASSERT(priorCount != newCount);
MOZ_ASSERT(decommitSize == AlignBytes(decommitSize, Alignment));
MarkPagesUnused((void*)decommitStart, decommitSize); AutoMaybeStartBackgroundAllocation maybeBgAlloc;
if (newCount < priorCount) {
// Shrink the nursery and free unused chunks.
for (unsigned i = newCount; i < priorCount; i++)
runtime()->gc.recycleChunk(chunk(i).toChunk(runtime()), lock);
chunks_.shrinkTo(newCount);
return;
}
// Grow the nursery and allocate new chunks.
if (!chunks_.resize(newCount))
return;
for (unsigned i = priorCount; i < newCount; i++) {
auto newChunk = runtime()->gc.getOrAllocChunk(lock, maybeBgAlloc);
if (!newChunk) {
chunks_.shrinkTo(i);
return;
}
chunks_[i] = NurseryChunk::fromChunk(newChunk);
chunk(i).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
} }
#endif // !defined(JS_GC_ZEAL)
} }
void void

View file

@ -121,15 +121,17 @@ class Nursery
explicit Nursery(JSRuntime* rt); explicit Nursery(JSRuntime* rt);
~Nursery(); ~Nursery();
MOZ_MUST_USE bool init(uint32_t maxNurseryBytes); MOZ_MUST_USE bool init(uint32_t maxNurseryBytes, AutoLockGC& lock);
bool exists() const { return numNurseryChunks_ != 0; } unsigned maxChunks() const { return maxNurseryChunks_; }
size_t numChunks() const { return numNurseryChunks_; } unsigned numChunks() const { return chunks_.length(); }
size_t nurserySize() const { return numNurseryChunks_ << ChunkShift; }
bool exists() const { return maxChunks() != 0; }
size_t nurserySize() const { return maxChunks() << ChunkShift; }
void enable(); void enable();
void disable(); void disable();
bool isEnabled() const { return numActiveChunks_ != 0; } bool isEnabled() const { return numChunks() != 0; }
/* Return true if no allocations have been made since the last collection. */ /* Return true if no allocations have been made since the last collection. */
bool isEmpty() const; bool isEmpty() const;
@ -140,7 +142,11 @@ class Nursery
*/ */
MOZ_ALWAYS_INLINE bool isInside(gc::Cell* cellp) const = delete; MOZ_ALWAYS_INLINE bool isInside(gc::Cell* cellp) const = delete;
MOZ_ALWAYS_INLINE bool isInside(const void* p) const { MOZ_ALWAYS_INLINE bool isInside(const void* p) const {
return uintptr_t(p) >= heapStart_ && uintptr_t(p) < heapEnd_; for (auto chunk : chunks_) {
if (uintptr_t(p) - chunk->start() < gc::ChunkSize)
return true;
}
return false;
} }
template<typename T> template<typename T>
bool isInside(const SharedMem<T>& p) const { bool isInside(const SharedMem<T>& p) const {
@ -212,10 +218,7 @@ class Nursery
void queueSweepAction(SweepThunk thunk, void* data); void queueSweepAction(SweepThunk thunk, void* data);
size_t sizeOfHeapCommitted() const { size_t sizeOfHeapCommitted() const {
return numActiveChunks_ * gc::ChunkSize; return numChunks() * gc::ChunkSize;
}
size_t sizeOfHeapDecommitted() const {
return (numNurseryChunks_ - numActiveChunks_) * gc::ChunkSize;
} }
size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const { size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const {
size_t total = 0; size_t total = 0;
@ -225,17 +228,13 @@ class Nursery
return total; return total;
} }
MOZ_ALWAYS_INLINE uintptr_t start() const { size_t usedSpace() const;
return heapStart_;
}
MOZ_ALWAYS_INLINE uintptr_t heapEnd() const {
return heapEnd_;
}
// Free space remaining, not counting chunk trailers. // Free space remaining, not counting chunk trailers.
MOZ_ALWAYS_INLINE size_t approxFreeSpace() const { MOZ_ALWAYS_INLINE size_t freeSpace() const {
return heapEnd_ - position_; MOZ_ASSERT(currentEnd_ - position_ <= NurseryChunkUsableSize);
return (currentEnd_ - position_) +
(numChunks() - currentChunk_ - 1) * NurseryChunkUsableSize;
} }
#ifdef JS_GC_ZEAL #ifdef JS_GC_ZEAL
@ -247,6 +246,22 @@ class Nursery
void printTotalProfileTimes(); void printTotalProfileTimes();
private: private:
/* The amount of space in the mapped nursery available to allocations. */
static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
struct NurseryChunk {
char data[NurseryChunkUsableSize];
gc::ChunkTrailer trailer;
static NurseryChunk* fromChunk(gc::Chunk* chunk);
void init(JSRuntime* rt);
void poisonAndInit(JSRuntime* rt, uint8_t poison);
uintptr_t start() const { return uintptr_t(&data); }
uintptr_t end() const { return uintptr_t(&trailer); }
gc::Chunk* toChunk(JSRuntime* rt);
};
static_assert(sizeof(NurseryChunk) == gc::ChunkSize,
"Nursery chunk size must match gc::Chunk size.");
/* /*
* The start and end pointers are stored under the runtime so that we can * The start and end pointers are stored under the runtime so that we can
* inline the isInsideNursery check into embedder code. Use the start() * inline the isInsideNursery check into embedder code. Use the start()
@ -254,27 +269,24 @@ class Nursery
*/ */
JSRuntime* runtime_; JSRuntime* runtime_;
/* Vector of allocated chunks to allocate from. */
Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
/* Pointer to the first unallocated byte in the nursery. */ /* Pointer to the first unallocated byte in the nursery. */
uintptr_t position_; uintptr_t position_;
/* Pointer to the logical start of the Nursery. */ /* Pointer to the logical start of the Nursery. */
uintptr_t currentStart_; unsigned currentStartChunk_;
uintptr_t currentStartPosition_;
/* Pointer to the last byte of space in the current chunk. */ /* Pointer to the last byte of space in the current chunk. */
uintptr_t currentEnd_; uintptr_t currentEnd_;
/* Pointer to first and last address of the total nursery allocation. */
uintptr_t heapStart_;
uintptr_t heapEnd_;
/* The index of the chunk that is currently being allocated from. */ /* The index of the chunk that is currently being allocated from. */
int currentChunk_; unsigned currentChunk_;
/* The index after the last chunk that we will allocate from. */ /* Maximum number of chunks to allocate for the nursery. */
int numActiveChunks_; unsigned maxNurseryChunks_;
/* Number of chunks allocated for the nursery. */
int numNurseryChunks_;
/* Promotion rate for the previous minor collection. */ /* Promotion rate for the previous minor collection. */
double previousPromotionRate_; double previousPromotionRate_;
@ -346,42 +358,21 @@ class Nursery
Canary* lastCanary_; Canary* lastCanary_;
#endif #endif
/* The amount of space in the mapped nursery available to allocations. */ NurseryChunk* allocChunk();
static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
struct NurseryChunkLayout { NurseryChunk& chunk(unsigned index) const {
char data[NurseryChunkUsableSize]; return *chunks_[index];
gc::ChunkTrailer trailer;
uintptr_t start() const { return uintptr_t(&data); }
uintptr_t end() const { return uintptr_t(&trailer); }
};
static_assert(sizeof(NurseryChunkLayout) == gc::ChunkSize,
"Nursery chunk size must match gc::Chunk size.");
NurseryChunkLayout& chunk(int index) const {
MOZ_ASSERT(index < numNurseryChunks_);
MOZ_ASSERT(start());
return reinterpret_cast<NurseryChunkLayout*>(start())[index];
} }
MOZ_ALWAYS_INLINE void initChunk(int chunkno) { void setCurrentChunk(unsigned chunkno);
gc::StoreBuffer* sb = JS::shadow::Runtime::asShadowRuntime(runtime())->gcStoreBufferPtr(); void setStartPosition();
new (&chunk(chunkno).trailer) gc::ChunkTrailer(runtime(), sb);
}
MOZ_ALWAYS_INLINE void setCurrentChunk(int chunkno) { void updateNumChunks(unsigned newCount);
MOZ_ASSERT(chunkno < numNurseryChunks_); void updateNumChunksLocked(unsigned newCount, AutoLockGC& lock);
MOZ_ASSERT(chunkno < numActiveChunks_);
currentChunk_ = chunkno;
position_ = chunk(chunkno).start();
currentEnd_ = chunk(chunkno).end();
initChunk(chunkno);
}
void updateNumActiveChunks(int newCount);
MOZ_ALWAYS_INLINE uintptr_t allocationEnd() const { MOZ_ALWAYS_INLINE uintptr_t allocationEnd() const {
MOZ_ASSERT(numActiveChunks_ > 0); MOZ_ASSERT(numChunks() > 0);
return chunk(numActiveChunks_ - 1).end(); return chunks_.back()->end();
} }
MOZ_ALWAYS_INLINE uintptr_t currentEnd() const { MOZ_ALWAYS_INLINE uintptr_t currentEnd() const {

View file

@ -138,7 +138,7 @@ js::gc::AllocateWholeCellSet(Arena* arena)
return nullptr; return nullptr;
} }
if (nursery.approxFreeSpace() < ArenaCellSet::NurseryFreeThresholdBytes) if (nursery.freeSpace() < ArenaCellSet::NurseryFreeThresholdBytes)
rt->gc.storeBuffer.setAboutToOverflow(); rt->gc.storeBuffer.setAboutToOverflow();
auto cells = static_cast<ArenaCellSet*>(data); auto cells = static_cast<ArenaCellSet*>(data);

View file

@ -384,7 +384,7 @@ class StoreBuffer
{ {
} }
bool enable(); MOZ_MUST_USE bool enable();
void disable(); void disable();
bool isEnabled() const { return enabled_; } bool isEnabled() const { return enabled_; }

View file

@ -1031,30 +1031,31 @@ GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
if (!rootsHash.init(256)) if (!rootsHash.init(256))
return false; return false;
/* {
* Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes AutoLockGC lock(rt);
* for default backward API compatibility.
*/
AutoLockGC lock(rt);
MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
setMaxMallocBytes(maxbytes);
const char* size = getenv("JSGC_MARK_STACK_LIMIT"); /*
if (size) * Separate gcMaxMallocBytes from gcMaxBytes but initialize to maxbytes
setMarkStackLimit(atoi(size), lock); * for default backward API compatibility.
*/
MOZ_ALWAYS_TRUE(tunables.setParameter(JSGC_MAX_BYTES, maxbytes, lock));
setMaxMallocBytes(maxbytes);
jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD; const char* size = getenv("JSGC_MARK_STACK_LIMIT");
if (size)
setMarkStackLimit(atoi(size), lock);
if (!nursery.init(maxNurseryBytes)) jitReleaseNumber = majorGCNumber + JIT_SCRIPT_RELEASE_TYPES_PERIOD;
return false;
if (!nursery.isEnabled()) { if (!nursery.init(maxNurseryBytes, lock))
MOZ_ASSERT(nursery.nurserySize() == 0);
++rt->gc.generationalDisabled;
} else {
MOZ_ASSERT(nursery.nurserySize() > 0);
if (!storeBuffer.enable())
return false; return false;
if (!nursery.isEnabled()) {
MOZ_ASSERT(nursery.nurserySize() == 0);
++rt->gc.generationalDisabled;
} else {
MOZ_ASSERT(nursery.nurserySize() > 0);
}
} }
#ifdef JS_GC_ZEAL #ifdef JS_GC_ZEAL
@ -6494,7 +6495,6 @@ GCRuntime::disableGenerationalGC()
if (isGenerationalGCEnabled()) { if (isGenerationalGCEnabled()) {
evictNursery(JS::gcreason::API); evictNursery(JS::gcreason::API);
nursery.disable(); nursery.disable();
storeBuffer.disable();
} }
++rt->gc.generationalDisabled; ++rt->gc.generationalDisabled;
} }
@ -6504,10 +6504,8 @@ GCRuntime::enableGenerationalGC()
{ {
MOZ_ASSERT(generationalDisabled > 0); MOZ_ASSERT(generationalDisabled > 0);
--generationalDisabled; --generationalDisabled;
if (generationalDisabled == 0) { if (generationalDisabled == 0)
nursery.enable(); nursery.enable();
storeBuffer.enable();
}
} }
bool bool

View file

@ -532,7 +532,6 @@ JSRuntime::addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::Runtim
rtSizes->gc.marker += gc.marker.sizeOfExcludingThis(mallocSizeOf); rtSizes->gc.marker += gc.marker.sizeOfExcludingThis(mallocSizeOf);
rtSizes->gc.nurseryCommitted += gc.nursery.sizeOfHeapCommitted(); rtSizes->gc.nurseryCommitted += gc.nursery.sizeOfHeapCommitted();
rtSizes->gc.nurseryDecommitted += gc.nursery.sizeOfHeapDecommitted();
rtSizes->gc.nurseryMallocedBuffers += gc.nursery.sizeOfMallocedBuffers(mallocSizeOf); rtSizes->gc.nurseryMallocedBuffers += gc.nursery.sizeOfMallocedBuffers(mallocSizeOf);
gc.storeBuffer.addSizeOfExcludingThis(mallocSizeOf, &rtSizes->gc); gc.storeBuffer.addSizeOfExcludingThis(mallocSizeOf, &rtSizes->gc);
} }

View file

@ -491,16 +491,16 @@ class TypedArrayObjectTemplate : public TypedArrayObject
// If the buffer is for an inline typed object, the data pointer // If the buffer is for an inline typed object, the data pointer
// may be in the nursery, so include a barrier to make sure this // may be in the nursery, so include a barrier to make sure this
// object is updated if that typed object moves. // object is updated if that typed object moves.
if (!IsInsideNursery(obj) && cx->runtime()->gc.nursery.isInside(buffer->dataPointerEither())) { auto ptr = buffer->dataPointerEither();
// Shared buffer data should never be nursery-allocated, so if (!IsInsideNursery(obj) && cx->runtime()->gc.nursery.isInside(ptr)) {
// we need to fail here if isSharedMemory. However, mmap() // Shared buffer data should never be nursery-allocated, so we
// can place a SharedArrayRawBuffer up against the bottom end // need to fail here if isSharedMemory. However, mmap() can
// of the nursery, and a zero-length buffer will erroneously be // place a SharedArrayRawBuffer up against the bottom end of a
// nursery chunk, and a zero-length buffer will erroneously be
// perceived as being inside the nursery; sidestep that. // perceived as being inside the nursery; sidestep that.
if (isSharedMemory) { if (isSharedMemory) {
MOZ_ASSERT(buffer->byteLength() == 0 && MOZ_ASSERT(buffer->byteLength() == 0 &&
cx->runtime()->gc.nursery.start() == (uintptr_t(ptr.unwrapValue()) & gc::ChunkMask) == 0);
buffer->dataPointerEither().unwrapValue());
} else { } else {
cx->runtime()->gc.storeBuffer.putWholeCell(obj); cx->runtime()->gc.storeBuffer.putWholeCell(obj);
} }

View file

@ -2631,11 +2631,6 @@ ReportJSRuntimeExplicitTreeStats(const JS::RuntimeStats& rtStats,
"GC arenas in non-empty chunks that is decommitted, i.e. it takes up " "GC arenas in non-empty chunks that is decommitted, i.e. it takes up "
"address space but no physical memory or swap space."); "address space but no physical memory or swap space.");
REPORT_BYTES(rtPath2 + NS_LITERAL_CSTRING("runtime/gc/nursery-decommitted"),
KIND_NONHEAP, rtStats.runtime.gc.nurseryDecommitted,
"Memory allocated to the GC's nursery that is decommitted, i.e. it takes up "
"address space but no physical memory or swap space.");
REPORT_GC_BYTES(rtPath + NS_LITERAL_CSTRING("gc-heap/unused-chunks"), REPORT_GC_BYTES(rtPath + NS_LITERAL_CSTRING("gc-heap/unused-chunks"),
rtStats.gcHeapUnusedChunks, rtStats.gcHeapUnusedChunks,
"Empty GC chunks which will soon be released unless claimed for new " "Empty GC chunks which will soon be released unless claimed for new "