forked from mirrors/gecko-dev
		
	Bug 504462 - Merge tamarin's CodeAlloc into tracemonkey, r=gal.
This commit is contained in:
		
							parent
							
								
									f6574847b1
								
							
						
					
					
						commit
						146159019c
					
				
					 22 changed files with 944 additions and 961 deletions
				
			
		| 
						 | 
					@ -216,6 +216,8 @@ VPATH		+= $(srcdir)/nanojit
 | 
				
			||||||
INSTALLED_HEADERS += \
 | 
					INSTALLED_HEADERS += \
 | 
				
			||||||
		jsbuiltins.h    \
 | 
							jsbuiltins.h    \
 | 
				
			||||||
		Assembler.h     \
 | 
							Assembler.h     \
 | 
				
			||||||
 | 
							Allocator.h     \
 | 
				
			||||||
 | 
							CodeAlloc.h     \
 | 
				
			||||||
		LIR.h		\
 | 
							LIR.h		\
 | 
				
			||||||
		avmplus.h	\
 | 
							avmplus.h	\
 | 
				
			||||||
		Fragmento.h	\
 | 
							Fragmento.h	\
 | 
				
			||||||
| 
						 | 
					@ -228,6 +230,8 @@ INSTALLED_HEADERS += \
 | 
				
			||||||
CPPSRCS += \
 | 
					CPPSRCS += \
 | 
				
			||||||
		jstracer.cpp \
 | 
							jstracer.cpp \
 | 
				
			||||||
		Assembler.cpp  \
 | 
							Assembler.cpp  \
 | 
				
			||||||
 | 
							Allocator.cpp  \
 | 
				
			||||||
 | 
							CodeAlloc.cpp  \
 | 
				
			||||||
		Fragmento.cpp  \
 | 
							Fragmento.cpp  \
 | 
				
			||||||
		LIR.cpp        \
 | 
							LIR.cpp        \
 | 
				
			||||||
		RegAlloc.cpp   \
 | 
							RegAlloc.cpp   \
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -95,11 +95,14 @@ typedef struct VMSideExit VMSideExit;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef __cplusplus
 | 
					#ifdef __cplusplus
 | 
				
			||||||
namespace nanojit {
 | 
					namespace nanojit {
 | 
				
			||||||
 | 
					    class Assembler;
 | 
				
			||||||
 | 
					    class CodeAlloc;
 | 
				
			||||||
    class Fragment;
 | 
					    class Fragment;
 | 
				
			||||||
    class Fragmento;
 | 
					    class Fragmento;
 | 
				
			||||||
    class LirBuffer;
 | 
					    class LirBuffer;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
class TraceRecorder;
 | 
					class TraceRecorder;
 | 
				
			||||||
 | 
					class VMAllocator;
 | 
				
			||||||
extern "C++" { template<typename T> class Queue; }
 | 
					extern "C++" { template<typename T> class Queue; }
 | 
				
			||||||
typedef Queue<uint16> SlotList;
 | 
					typedef Queue<uint16> SlotList;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -140,6 +143,9 @@ struct JSTraceMonitor {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    CLS(nanojit::LirBuffer) lirbuf;
 | 
					    CLS(nanojit::LirBuffer) lirbuf;
 | 
				
			||||||
    CLS(nanojit::Fragmento) fragmento;
 | 
					    CLS(nanojit::Fragmento) fragmento;
 | 
				
			||||||
 | 
					    CLS(VMAllocator)        allocator;   // A chunk allocator for LIR.
 | 
				
			||||||
 | 
					    CLS(nanojit::CodeAlloc) codeAlloc;   // A general allocator for native code.
 | 
				
			||||||
 | 
					    CLS(nanojit::Assembler) assembler;
 | 
				
			||||||
    CLS(TraceRecorder)      recorder;
 | 
					    CLS(TraceRecorder)      recorder;
 | 
				
			||||||
    jsval                   *reservedDoublePool;
 | 
					    jsval                   *reservedDoublePool;
 | 
				
			||||||
    jsval                   *reservedDoublePoolPtr;
 | 
					    jsval                   *reservedDoublePoolPtr;
 | 
				
			||||||
| 
						 | 
					@ -171,9 +177,12 @@ struct JSTraceMonitor {
 | 
				
			||||||
    JSPackedBool            useReservedObjects;
 | 
					    JSPackedBool            useReservedObjects;
 | 
				
			||||||
    JSObject                *reservedObjects;
 | 
					    JSObject                *reservedObjects;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Fragmento for the regular expression compiler. This is logically
 | 
					    /* Parts for the regular expression compiler. This is logically
 | 
				
			||||||
     * a distinct compiler but needs to be managed in exactly the same
 | 
					     * a distinct compiler but needs to be managed in exactly the same
 | 
				
			||||||
     * way as the real tracing Fragmento. */
 | 
					     * way as the trace compiler. */
 | 
				
			||||||
 | 
					    CLS(VMAllocator)        reAllocator;
 | 
				
			||||||
 | 
					    CLS(nanojit::CodeAlloc) reCodeAlloc;
 | 
				
			||||||
 | 
					    CLS(nanojit::Assembler) reAssembler;
 | 
				
			||||||
    CLS(nanojit::LirBuffer) reLirBuf;
 | 
					    CLS(nanojit::LirBuffer) reLirBuf;
 | 
				
			||||||
    CLS(nanojit::Fragmento) reFragmento;
 | 
					    CLS(nanojit::Fragmento) reFragmento;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2468,6 +2468,7 @@ class RegExpNativeCompiler {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    LIns* compileFlat(RENode *&node, LIns* pos, LInsList& fails)
 | 
					    LIns* compileFlat(RENode *&node, LIns* pos, LInsList& fails)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
 | 
					        VMAllocator *alloc = JS_TRACE_MONITOR(cx).reAllocator;
 | 
				
			||||||
#ifdef USE_DOUBLE_CHAR_MATCH
 | 
					#ifdef USE_DOUBLE_CHAR_MATCH
 | 
				
			||||||
        if (node->u.flat.length == 1) {
 | 
					        if (node->u.flat.length == 1) {
 | 
				
			||||||
            if (node->next && node->next->op == REOP_FLAT &&
 | 
					            if (node->next && node->next->op == REOP_FLAT &&
 | 
				
			||||||
| 
						 | 
					@ -2483,7 +2484,7 @@ class RegExpNativeCompiler {
 | 
				
			||||||
        } else {
 | 
					        } else {
 | 
				
			||||||
            size_t i;
 | 
					            size_t i;
 | 
				
			||||||
            for (i = 0; i < node->u.flat.length - 1; i += 2) {
 | 
					            for (i = 0; i < node->u.flat.length - 1; i += 2) {
 | 
				
			||||||
                if (fragment->lirbuf->outOMem())
 | 
					                if (alloc->outOfMemory())
 | 
				
			||||||
                    return 0;
 | 
					                    return 0;
 | 
				
			||||||
                pos = compileFlatDoubleChar(((jschar*) node->kid)[i],
 | 
					                pos = compileFlatDoubleChar(((jschar*) node->kid)[i],
 | 
				
			||||||
                                            ((jschar*) node->kid)[i+1],
 | 
					                                            ((jschar*) node->kid)[i+1],
 | 
				
			||||||
| 
						 | 
					@ -2501,7 +2502,7 @@ class RegExpNativeCompiler {
 | 
				
			||||||
            return compileFlatSingleChar(node->u.flat.chr, pos, fails);
 | 
					            return compileFlatSingleChar(node->u.flat.chr, pos, fails);
 | 
				
			||||||
        } else {
 | 
					        } else {
 | 
				
			||||||
            for (size_t i = 0; i < node->u.flat.length; i++) {
 | 
					            for (size_t i = 0; i < node->u.flat.length; i++) {
 | 
				
			||||||
                if (fragment->lirbuf->outOMem())
 | 
					                if (alloc->outOfMemory())
 | 
				
			||||||
                    return 0;
 | 
					                    return 0;
 | 
				
			||||||
                pos = compileFlatSingleChar(((jschar*) node->kid)[i], pos, fails);
 | 
					                pos = compileFlatSingleChar(((jschar*) node->kid)[i], pos, fails);
 | 
				
			||||||
                if (!pos)
 | 
					                if (!pos)
 | 
				
			||||||
| 
						 | 
					@ -2530,7 +2531,7 @@ class RegExpNativeCompiler {
 | 
				
			||||||
        if (!charSet->converted && !ProcessCharSet(cx, re, charSet))
 | 
					        if (!charSet->converted && !ProcessCharSet(cx, re, charSet))
 | 
				
			||||||
            return NULL;
 | 
					            return NULL;
 | 
				
			||||||
        LIns* skip = lirBufWriter->insSkip(bitmapLen);
 | 
					        LIns* skip = lirBufWriter->insSkip(bitmapLen);
 | 
				
			||||||
        if (fragment->lirbuf->outOMem())
 | 
					        if (JS_TRACE_MONITOR(cx).reAllocator->outOfMemory())
 | 
				
			||||||
            return NULL;
 | 
					            return NULL;
 | 
				
			||||||
        void* bitmapData = skip->payload();
 | 
					        void* bitmapData = skip->payload();
 | 
				
			||||||
        memcpy(bitmapData, charSet->u.bits, bitmapLen);
 | 
					        memcpy(bitmapData, charSet->u.bits, bitmapLen);
 | 
				
			||||||
| 
						 | 
					@ -2928,8 +2929,9 @@ class RegExpNativeCompiler {
 | 
				
			||||||
     */
 | 
					     */
 | 
				
			||||||
    LIns *compileNode(RENode *node, LIns *pos, bool atEnd, LInsList &fails)
 | 
					    LIns *compileNode(RENode *node, LIns *pos, bool atEnd, LInsList &fails)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
 | 
					        VMAllocator *alloc = JS_TRACE_MONITOR(cx).reAllocator;
 | 
				
			||||||
        for (; pos && node; node = node->next) {
 | 
					        for (; pos && node; node = node->next) {
 | 
				
			||||||
            if (fragment->lirbuf->outOMem())
 | 
					            if (alloc->outOfMemory())
 | 
				
			||||||
                return NULL;
 | 
					                return NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            bool childNextIsEnd = atEnd && !node->next;
 | 
					            bool childNextIsEnd = atEnd && !node->next;
 | 
				
			||||||
| 
						 | 
					@ -3002,7 +3004,7 @@ class RegExpNativeCompiler {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        /* Failed to match on first character, so fail whole match. */
 | 
					        /* Failed to match on first character, so fail whole match. */
 | 
				
			||||||
        lir->ins1(LIR_ret, lir->insImm(0));
 | 
					        lir->ins1(LIR_ret, lir->insImm(0));
 | 
				
			||||||
        return !fragment->lirbuf->outOMem();
 | 
					        return !JS_TRACE_MONITOR(cx).reAllocator->outOfMemory();
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Compile normal regular expressions that can match starting at any char. */
 | 
					    /* Compile normal regular expressions that can match starting at any char. */
 | 
				
			||||||
| 
						 | 
					@ -3018,7 +3020,7 @@ class RegExpNativeCompiler {
 | 
				
			||||||
        lir->insStorei(lir->ins2(LIR_piadd, start, lir->insImm(2)), state,
 | 
					        lir->insStorei(lir->ins2(LIR_piadd, start, lir->insImm(2)), state,
 | 
				
			||||||
                       offsetof(REGlobalData, skipped));
 | 
					                       offsetof(REGlobalData, skipped));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        return !fragment->lirbuf->outOMem();
 | 
					        return !JS_TRACE_MONITOR(cx).reAllocator->outOfMemory();
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    inline LIns*
 | 
					    inline LIns*
 | 
				
			||||||
| 
						 | 
					@ -3060,10 +3062,12 @@ class RegExpNativeCompiler {
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        GuardRecord* guard = NULL;
 | 
					        GuardRecord* guard = NULL;
 | 
				
			||||||
        LIns* pos;
 | 
					        LIns* pos;
 | 
				
			||||||
 | 
					        Assembler *assm;
 | 
				
			||||||
        bool oom = false;
 | 
					        bool oom = false;
 | 
				
			||||||
        const jschar* re_chars;
 | 
					        const jschar* re_chars;
 | 
				
			||||||
        size_t re_length;
 | 
					        size_t re_length;
 | 
				
			||||||
        Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento;
 | 
					        Fragmento* fragmento = JS_TRACE_MONITOR(cx).reFragmento;
 | 
				
			||||||
 | 
					        VMAllocator *alloc = JS_TRACE_MONITOR(cx).reAllocator;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        re->source->getCharsAndLength(re_chars, re_length);
 | 
					        re->source->getCharsAndLength(re_chars, re_length);
 | 
				
			||||||
        /*
 | 
					        /*
 | 
				
			||||||
| 
						 | 
					@ -3078,7 +3082,7 @@ class RegExpNativeCompiler {
 | 
				
			||||||
        this->cx = cx;
 | 
					        this->cx = cx;
 | 
				
			||||||
        /* At this point we have an empty fragment. */
 | 
					        /* At this point we have an empty fragment. */
 | 
				
			||||||
        LirBuffer* lirbuf = fragment->lirbuf;
 | 
					        LirBuffer* lirbuf = fragment->lirbuf;
 | 
				
			||||||
        if (lirbuf->outOMem())
 | 
					        if (alloc->outOfMemory())
 | 
				
			||||||
            goto fail;
 | 
					            goto fail;
 | 
				
			||||||
        /* FIXME Use bug 463260 smart pointer when available. */
 | 
					        /* FIXME Use bug 463260 smart pointer when available. */
 | 
				
			||||||
        lir = lirBufWriter = new (&gc) LirBufWriter(lirbuf);
 | 
					        lir = lirBufWriter = new (&gc) LirBufWriter(lirbuf);
 | 
				
			||||||
| 
						 | 
					@ -3116,11 +3120,12 @@ class RegExpNativeCompiler {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        guard = insertGuard(re_chars, re_length);
 | 
					        guard = insertGuard(re_chars, re_length);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (lirbuf->outOMem())
 | 
					        if (alloc->outOfMemory())
 | 
				
			||||||
            goto fail;
 | 
					            goto fail;
 | 
				
			||||||
        ::compile(fragmento->assm(), fragment);
 | 
					        assm = JS_TRACE_MONITOR(cx).reAssembler;
 | 
				
			||||||
        if (fragmento->assm()->error() != nanojit::None) {
 | 
					        ::compile(JS_TRACE_MONITOR(cx).reFragmento, assm, fragment);
 | 
				
			||||||
            oom = fragmento->assm()->error() == nanojit::OutOMem;
 | 
					        if (assm->error() != nanojit::None) {
 | 
				
			||||||
 | 
					            oom = assm->error() == nanojit::OutOMem;
 | 
				
			||||||
            goto fail;
 | 
					            goto fail;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3131,10 +3136,12 @@ class RegExpNativeCompiler {
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
        return JS_TRUE;
 | 
					        return JS_TRUE;
 | 
				
			||||||
    fail:
 | 
					    fail:
 | 
				
			||||||
        if (lirbuf->outOMem() || oom ||
 | 
					        if (alloc->outOfMemory() || oom ||
 | 
				
			||||||
            js_OverfullFragmento(&JS_TRACE_MONITOR(cx), fragmento)) {
 | 
					            js_OverfullFragmento(&JS_TRACE_MONITOR(cx), fragmento)) {
 | 
				
			||||||
            fragmento->clearFrags();
 | 
					            fragmento->clearFrags();
 | 
				
			||||||
            lirbuf->rewind();
 | 
					            JS_TRACE_MONITOR(cx).reCodeAlloc->sweep();
 | 
				
			||||||
 | 
					            alloc->reset();
 | 
				
			||||||
 | 
					            lirbuf->clear();
 | 
				
			||||||
        } else {
 | 
					        } else {
 | 
				
			||||||
            if (!guard) insertGuard(re_chars, re_length);
 | 
					            if (!guard) insertGuard(re_chars, re_length);
 | 
				
			||||||
            re->flags |= JSREG_NOCOMPILE;
 | 
					            re->flags |= JSREG_NOCOMPILE;
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -261,6 +261,38 @@ static GC gc = GC();
 | 
				
			||||||
static avmplus::AvmCore s_core = avmplus::AvmCore();
 | 
					static avmplus::AvmCore s_core = avmplus::AvmCore();
 | 
				
			||||||
static avmplus::AvmCore* core = &s_core;
 | 
					static avmplus::AvmCore* core = &s_core;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Allocator SPI implementation. */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void*
 | 
				
			||||||
 | 
					nanojit::Allocator::allocChunk(size_t nbytes)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    VMAllocator *vma = (VMAllocator*)this;
 | 
				
			||||||
 | 
					    JS_ASSERT(!vma->outOfMemory());
 | 
				
			||||||
 | 
					    void *p = malloc(nbytes);
 | 
				
			||||||
 | 
					    if (!p) {
 | 
				
			||||||
 | 
					        JS_ASSERT(nbytes < sizeof(vma->mReserve));
 | 
				
			||||||
 | 
					        vma->mOutOfMemory = true;
 | 
				
			||||||
 | 
					        p = (void*) &vma->mReserve[0];
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					    vma->mSize += nbytes;
 | 
				
			||||||
 | 
					    return p;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void
 | 
				
			||||||
 | 
					nanojit::Allocator::freeChunk(void *p) {
 | 
				
			||||||
 | 
					    VMAllocator *vma = (VMAllocator*)this;
 | 
				
			||||||
 | 
					    if (p != &vma->mReserve[0])
 | 
				
			||||||
 | 
					        free(p);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void
 | 
				
			||||||
 | 
					nanojit::Allocator::postReset() {
 | 
				
			||||||
 | 
					    VMAllocator *vma = (VMAllocator*)this;
 | 
				
			||||||
 | 
					    vma->mOutOfMemory = false;
 | 
				
			||||||
 | 
					    vma->mSize = 0;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef JS_JIT_SPEW
 | 
					#ifdef JS_JIT_SPEW
 | 
				
			||||||
static void
 | 
					static void
 | 
				
			||||||
DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc);
 | 
					DumpPeerStability(JSTraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc);
 | 
				
			||||||
| 
						 | 
					@ -3160,7 +3192,8 @@ TraceRecorder::snapshot(ExitType exitType)
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (sizeof(VMSideExit) + (stackSlots + ngslots) * sizeof(JSTraceType) >= NJ_MAX_SKIP_PAYLOAD_SZB) {
 | 
					    if (sizeof(VMSideExit) + (stackSlots + ngslots) * sizeof(JSTraceType) >
 | 
				
			||||||
 | 
					        LirBuffer::MAX_SKIP_PAYLOAD_SZB) {
 | 
				
			||||||
        /*
 | 
					        /*
 | 
				
			||||||
         * ::snapshot() is infallible in the sense that callers don't
 | 
					         * ::snapshot() is infallible in the sense that callers don't
 | 
				
			||||||
         * expect errors; but this is a trace-aborting error condition. So
 | 
					         * expect errors; but this is a trace-aborting error condition. So
 | 
				
			||||||
| 
						 | 
					@ -3232,7 +3265,7 @@ TraceRecorder::guard(bool expected, LIns* cond, VMSideExit* exit)
 | 
				
			||||||
    /*
 | 
					    /*
 | 
				
			||||||
     * BIG FAT WARNING: If compilation fails we don't reset the lirbuf, so it's
 | 
					     * BIG FAT WARNING: If compilation fails we don't reset the lirbuf, so it's
 | 
				
			||||||
     * safe to keep references to the side exits here. If we ever start
 | 
					     * safe to keep references to the side exits here. If we ever start
 | 
				
			||||||
     * rewinding those lirbufs, we have to make sure we purge the side exits
 | 
					     * clearing those lirbufs, we have to make sure we purge the side exits
 | 
				
			||||||
     * that then no longer will be in valid memory.
 | 
					     * that then no longer will be in valid memory.
 | 
				
			||||||
     */
 | 
					     */
 | 
				
			||||||
    if (exit->exitType == LOOP_EXIT)
 | 
					    if (exit->exitType == LOOP_EXIT)
 | 
				
			||||||
| 
						 | 
					@ -3267,7 +3300,7 @@ TraceRecorder::copy(VMSideExit* copy)
 | 
				
			||||||
    /*
 | 
					    /*
 | 
				
			||||||
     * BIG FAT WARNING: If compilation fails we don't reset the lirbuf, so it's
 | 
					     * BIG FAT WARNING: If compilation fails we don't reset the lirbuf, so it's
 | 
				
			||||||
     * safe to keep references to the side exits here. If we ever start
 | 
					     * safe to keep references to the side exits here. If we ever start
 | 
				
			||||||
     * rewinding those lirbufs, we have to make sure we purge the side exits
 | 
					     * clearing those lirbufs, we have to make sure we purge the side exits
 | 
				
			||||||
     * that then no longer will be in valid memory.
 | 
					     * that then no longer will be in valid memory.
 | 
				
			||||||
     */
 | 
					     */
 | 
				
			||||||
    if (exit->exitType == LOOP_EXIT)
 | 
					    if (exit->exitType == LOOP_EXIT)
 | 
				
			||||||
| 
						 | 
					@ -3314,7 +3347,7 @@ FlushJITCache(JSContext* cx)
 | 
				
			||||||
        JS_ASSERT(fragmento->labels);
 | 
					        JS_ASSERT(fragmento->labels);
 | 
				
			||||||
        fragmento->labels->clear();
 | 
					        fragmento->labels->clear();
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
        tm->lirbuf->rewind();
 | 
					
 | 
				
			||||||
        for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
 | 
					        for (size_t i = 0; i < FRAGMENT_TABLE_SIZE; ++i) {
 | 
				
			||||||
            VMFragment* f = tm->vmfragments[i];
 | 
					            VMFragment* f = tm->vmfragments[i];
 | 
				
			||||||
            while (f) {
 | 
					            while (f) {
 | 
				
			||||||
| 
						 | 
					@ -3329,6 +3362,10 @@ FlushJITCache(JSContext* cx)
 | 
				
			||||||
            tm->globalStates[i].globalSlots->clear();
 | 
					            tm->globalStates[i].globalSlots->clear();
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    tm->allocator->reset();
 | 
				
			||||||
 | 
					    tm->codeAlloc->sweep();
 | 
				
			||||||
 | 
					    tm->lirbuf->clear();
 | 
				
			||||||
    tm->needFlush = JS_FALSE;
 | 
					    tm->needFlush = JS_FALSE;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3352,14 +3389,15 @@ TraceRecorder::compile(JSTraceMonitor* tm)
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    if (anchor && anchor->exitType != CASE_EXIT)
 | 
					    if (anchor && anchor->exitType != CASE_EXIT)
 | 
				
			||||||
        ++treeInfo->branchCount;
 | 
					        ++treeInfo->branchCount;
 | 
				
			||||||
    if (lirbuf->outOMem()) {
 | 
					    if (tm->allocator->outOfMemory())
 | 
				
			||||||
        fragmento->assm()->setError(nanojit::OutOMem);
 | 
					 | 
				
			||||||
        return;
 | 
					        return;
 | 
				
			||||||
    }
 | 
					
 | 
				
			||||||
    ::compile(fragmento->assm(), fragment);
 | 
					    Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
 | 
				
			||||||
    if (fragmento->assm()->error() == nanojit::OutOMem)
 | 
					    ::compile(fragmento, assm, fragment);
 | 
				
			||||||
 | 
					    if (assm->error() == nanojit::OutOMem)
 | 
				
			||||||
        return;
 | 
					        return;
 | 
				
			||||||
    if (fragmento->assm()->error() != nanojit::None) {
 | 
					
 | 
				
			||||||
 | 
					    if (assm->error() != nanojit::None) {
 | 
				
			||||||
        debug_only_print0(LC_TMTracer, "Blacklisted: error during compilation\n");
 | 
					        debug_only_print0(LC_TMTracer, "Blacklisted: error during compilation\n");
 | 
				
			||||||
        Blacklist((jsbytecode*) fragment->root->ip);
 | 
					        Blacklist((jsbytecode*) fragment->root->ip);
 | 
				
			||||||
        return;
 | 
					        return;
 | 
				
			||||||
| 
						 | 
					@ -3369,10 +3407,10 @@ TraceRecorder::compile(JSTraceMonitor* tm)
 | 
				
			||||||
    if (anchor) {
 | 
					    if (anchor) {
 | 
				
			||||||
#ifdef NANOJIT_IA32
 | 
					#ifdef NANOJIT_IA32
 | 
				
			||||||
        if (anchor->exitType == CASE_EXIT)
 | 
					        if (anchor->exitType == CASE_EXIT)
 | 
				
			||||||
            fragmento->assm()->patch(anchor, anchor->switchInfo);
 | 
					            assm->patch(anchor, anchor->switchInfo);
 | 
				
			||||||
        else
 | 
					        else
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
            fragmento->assm()->patch(anchor);
 | 
					            assm->patch(anchor);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    JS_ASSERT(fragment->code());
 | 
					    JS_ASSERT(fragment->code());
 | 
				
			||||||
    JS_ASSERT(!fragment->vmprivate);
 | 
					    JS_ASSERT(!fragment->vmprivate);
 | 
				
			||||||
| 
						 | 
					@ -3392,7 +3430,7 @@ TraceRecorder::compile(JSTraceMonitor* tm)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static bool
 | 
					static bool
 | 
				
			||||||
JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTree,
 | 
					JoinPeersIfCompatible(Assembler* assm, Fragment* stableFrag, TreeInfo* stableTree,
 | 
				
			||||||
                      VMSideExit* exit)
 | 
					                      VMSideExit* exit)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    JS_ASSERT(exit->numStackSlots == stableTree->nStackTypes);
 | 
					    JS_ASSERT(exit->numStackSlots == stableTree->nStackTypes);
 | 
				
			||||||
| 
						 | 
					@ -3404,7 +3442,7 @@ JoinPeersIfCompatible(Fragmento* frago, Fragment* stableFrag, TreeInfo* stableTr
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    exit->target = stableFrag;
 | 
					    exit->target = stableFrag;
 | 
				
			||||||
    frago->assm()->patch(exit);
 | 
					    assm->patch(exit);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    stableTree->dependentTrees.addUnique(exit->from->root);
 | 
					    stableTree->dependentTrees.addUnique(exit->from->root);
 | 
				
			||||||
    ((TreeInfo*)exit->from->root->vmprivate)->linkedTrees.addUnique(stableFrag);
 | 
					    ((TreeInfo*)exit->from->root->vmprivate)->linkedTrees.addUnique(stableFrag);
 | 
				
			||||||
| 
						 | 
					@ -3732,8 +3770,8 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus& cons
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    compile(traceMonitor);
 | 
					    compile(traceMonitor);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (fragmento->assm()->error() != nanojit::None)
 | 
					    Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
 | 
				
			||||||
        return false;
 | 
					    if (assm->error() != nanojit::None)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    peer = getLoop(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc);
 | 
					    peer = getLoop(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc);
 | 
				
			||||||
    JS_ASSERT(peer);
 | 
					    JS_ASSERT(peer);
 | 
				
			||||||
| 
						 | 
					@ -3781,7 +3819,8 @@ TraceRecorder::joinEdgesToEntry(Fragmento* fragmento, VMFragment* peer_root)
 | 
				
			||||||
            uexit = ti->unstableExits;
 | 
					            uexit = ti->unstableExits;
 | 
				
			||||||
            unext = &ti->unstableExits;
 | 
					            unext = &ti->unstableExits;
 | 
				
			||||||
            while (uexit != NULL) {
 | 
					            while (uexit != NULL) {
 | 
				
			||||||
                bool remove = JoinPeersIfCompatible(fragmento, fragment, treeInfo, uexit->exit);
 | 
					                Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
 | 
				
			||||||
 | 
					                bool remove = JoinPeersIfCompatible(assm, fragment, treeInfo, uexit->exit);
 | 
				
			||||||
                JS_ASSERT(!remove || fragment != peer);
 | 
					                JS_ASSERT(!remove || fragment != peer);
 | 
				
			||||||
                debug_only_stmt(
 | 
					                debug_only_stmt(
 | 
				
			||||||
                    if (remove) {
 | 
					                    if (remove) {
 | 
				
			||||||
| 
						 | 
					@ -3871,7 +3910,8 @@ TraceRecorder::endLoop(VMSideExit* exit)
 | 
				
			||||||
        lir->insGuard(LIR_x, NULL, createGuardRecord(exit));
 | 
					        lir->insGuard(LIR_x, NULL, createGuardRecord(exit));
 | 
				
			||||||
    compile(traceMonitor);
 | 
					    compile(traceMonitor);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (traceMonitor->fragmento->assm()->error() != nanojit::None)
 | 
					    Assembler *assm = traceMonitor->assembler;
 | 
				
			||||||
 | 
					    if (assm->error() != nanojit::None)
 | 
				
			||||||
        return;
 | 
					        return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    VMFragment* root = (VMFragment*)fragment->root;
 | 
					    VMFragment* root = (VMFragment*)fragment->root;
 | 
				
			||||||
| 
						 | 
					@ -4198,7 +4238,8 @@ DeleteRecorder(JSContext* cx)
 | 
				
			||||||
    tm->recorder = NULL;
 | 
					    tm->recorder = NULL;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* If we ran out of memory, flush the code cache. */
 | 
					    /* If we ran out of memory, flush the code cache. */
 | 
				
			||||||
    if (JS_TRACE_MONITOR(cx).fragmento->assm()->error() == OutOMem ||
 | 
					    Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
 | 
				
			||||||
 | 
					    if (assm->error() == OutOMem ||
 | 
				
			||||||
        js_OverfullFragmento(tm, tm->fragmento)) {
 | 
					        js_OverfullFragmento(tm, tm->fragmento)) {
 | 
				
			||||||
        FlushJITCache(cx);
 | 
					        FlushJITCache(cx);
 | 
				
			||||||
        return false;
 | 
					        return false;
 | 
				
			||||||
| 
						 | 
					@ -4297,7 +4338,8 @@ StartRecorder(JSContext* cx, VMSideExit* anchor, Fragment* f, TreeInfo* ti,
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Clear any leftover error state. */
 | 
					    /* Clear any leftover error state. */
 | 
				
			||||||
    tm->fragmento->assm()->setError(None);
 | 
					    Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
 | 
				
			||||||
 | 
					    assm->setError(None);
 | 
				
			||||||
    return true;
 | 
					    return true;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -4310,10 +4352,9 @@ TrashTree(JSContext* cx, Fragment* f)
 | 
				
			||||||
        return;
 | 
					        return;
 | 
				
			||||||
    AUDIT(treesTrashed);
 | 
					    AUDIT(treesTrashed);
 | 
				
			||||||
    debug_only_print0(LC_TMTracer, "Trashing tree info.\n");
 | 
					    debug_only_print0(LC_TMTracer, "Trashing tree info.\n");
 | 
				
			||||||
    Fragmento* fragmento = JS_TRACE_MONITOR(cx).fragmento;
 | 
					 | 
				
			||||||
    TreeInfo* ti = (TreeInfo*)f->vmprivate;
 | 
					    TreeInfo* ti = (TreeInfo*)f->vmprivate;
 | 
				
			||||||
    f->vmprivate = NULL;
 | 
					    f->vmprivate = NULL;
 | 
				
			||||||
    f->releaseCode(fragmento);
 | 
					    f->releaseCode(JS_TRACE_MONITOR(cx).codeAlloc);
 | 
				
			||||||
    Fragment** data = ti->dependentTrees.data();
 | 
					    Fragment** data = ti->dependentTrees.data();
 | 
				
			||||||
    unsigned length = ti->dependentTrees.length();
 | 
					    unsigned length = ti->dependentTrees.length();
 | 
				
			||||||
    for (unsigned n = 0; n < length; ++n)
 | 
					    for (unsigned n = 0; n < length; ++n)
 | 
				
			||||||
| 
						 | 
					@ -4565,7 +4606,7 @@ RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f, jsbytecode* outer,
 | 
				
			||||||
    f->root = f;
 | 
					    f->root = f;
 | 
				
			||||||
    f->lirbuf = tm->lirbuf;
 | 
					    f->lirbuf = tm->lirbuf;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (f->lirbuf->outOMem() || js_OverfullFragmento(tm, tm->fragmento)) {
 | 
					    if (tm->allocator->outOfMemory() || js_OverfullFragmento(tm, tm->fragmento)) {
 | 
				
			||||||
        Backoff(cx, (jsbytecode*) f->root->ip);
 | 
					        Backoff(cx, (jsbytecode*) f->root->ip);
 | 
				
			||||||
        FlushJITCache(cx);
 | 
					        FlushJITCache(cx);
 | 
				
			||||||
        debug_only_print0(LC_TMTracer,
 | 
					        debug_only_print0(LC_TMTracer,
 | 
				
			||||||
| 
						 | 
					@ -4709,7 +4750,8 @@ AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, jsb
 | 
				
			||||||
            if (ti->nGlobalTypes() < ti->globalSlots->length())
 | 
					            if (ti->nGlobalTypes() < ti->globalSlots->length())
 | 
				
			||||||
                SpecializeTreesToMissingGlobals(cx, globalObj, ti);
 | 
					                SpecializeTreesToMissingGlobals(cx, globalObj, ti);
 | 
				
			||||||
            exit->target = f;
 | 
					            exit->target = f;
 | 
				
			||||||
            tm->fragmento->assm()->patch(exit);
 | 
					            Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
 | 
				
			||||||
 | 
					            assm->patch(exit);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            /* Now erase this exit from the unstable exit list. */
 | 
					            /* Now erase this exit from the unstable exit list. */
 | 
				
			||||||
            UnstableExit** tail = &from_ti->unstableExits;
 | 
					            UnstableExit** tail = &from_ti->unstableExits;
 | 
				
			||||||
| 
						 | 
					@ -5909,6 +5951,8 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount)
 | 
				
			||||||
JS_REQUIRES_STACK JSRecordingStatus
 | 
					JS_REQUIRES_STACK JSRecordingStatus
 | 
				
			||||||
TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op)
 | 
					TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    Assembler *assm = JS_TRACE_MONITOR(cx).assembler;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Process needFlush and deepAbort() requests now. */
 | 
					    /* Process needFlush and deepAbort() requests now. */
 | 
				
			||||||
    if (JS_TRACE_MONITOR(cx).needFlush) {
 | 
					    if (JS_TRACE_MONITOR(cx).needFlush) {
 | 
				
			||||||
        FlushJITCache(cx);
 | 
					        FlushJITCache(cx);
 | 
				
			||||||
| 
						 | 
					@ -5984,14 +6028,15 @@ TraceRecorder::monitorRecording(JSContext* cx, TraceRecorder* tr, JSOp op)
 | 
				
			||||||
        return JSRS_STOP;
 | 
					        return JSRS_STOP;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (JS_TRACE_MONITOR(cx).fragmento->assm()->error()) {
 | 
					    if (assm->error()) {
 | 
				
			||||||
        js_AbortRecording(cx, "error during recording");
 | 
					        js_AbortRecording(cx, "error during recording");
 | 
				
			||||||
        return JSRS_STOP;
 | 
					        return JSRS_STOP;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (tr->lirbuf->outOMem() ||
 | 
					    if (tr->traceMonitor->allocator->outOfMemory() ||
 | 
				
			||||||
        js_OverfullFragmento(&JS_TRACE_MONITOR(cx), JS_TRACE_MONITOR(cx).fragmento)) {
 | 
					        js_OverfullFragmento(&JS_TRACE_MONITOR(cx),
 | 
				
			||||||
        js_AbortRecording(cx, "no more LIR memory");
 | 
					                             JS_TRACE_MONITOR(cx).fragmento)) {
 | 
				
			||||||
 | 
					        js_AbortRecording(cx, "no more memory");
 | 
				
			||||||
        FlushJITCache(cx);
 | 
					        FlushJITCache(cx);
 | 
				
			||||||
        return JSRS_STOP;
 | 
					        return JSRS_STOP;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
| 
						 | 
					@ -6351,14 +6396,23 @@ js_InitJIT(JSTraceMonitor *tm)
 | 
				
			||||||
                          JS_DHASH_DEFAULT_CAPACITY(PC_HASH_COUNT));
 | 
					                          JS_DHASH_DEFAULT_CAPACITY(PC_HASH_COUNT));
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (!tm->allocator)
 | 
				
			||||||
 | 
					        tm->allocator = new VMAllocator();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (!tm->codeAlloc)
 | 
				
			||||||
 | 
					        tm->codeAlloc = new (&gc) CodeAlloc();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (!tm->assembler)
 | 
				
			||||||
 | 
					        tm->assembler = new (&gc) Assembler(tm->codeAlloc, core, &js_LogController);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (!tm->fragmento) {
 | 
					    if (!tm->fragmento) {
 | 
				
			||||||
        JS_ASSERT(!tm->reservedDoublePool);
 | 
					        JS_ASSERT(!tm->reservedDoublePool);
 | 
				
			||||||
        Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32);
 | 
					        Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32, tm->codeAlloc);
 | 
				
			||||||
        verbose_only(fragmento->labels = new (&gc) LabelMap(core);)
 | 
					        verbose_only(fragmento->labels = new (&gc) LabelMap(core, *tm->allocator);)
 | 
				
			||||||
        tm->fragmento = fragmento;
 | 
					        tm->fragmento = fragmento;
 | 
				
			||||||
        tm->lirbuf = new (&gc) LirBuffer(fragmento);
 | 
					        tm->lirbuf = new (&gc) LirBuffer(*tm->allocator);
 | 
				
			||||||
#ifdef DEBUG
 | 
					#ifdef DEBUG
 | 
				
			||||||
        tm->lirbuf->names = new (&gc) LirNameMap(&gc, tm->fragmento->labels);
 | 
					        tm->lirbuf->names = new (&gc) LirNameMap(&gc, *tm->allocator, tm->fragmento->labels);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
        for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
 | 
					        for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
 | 
				
			||||||
            tm->globalStates[i].globalShape = -1;
 | 
					            tm->globalStates[i].globalShape = -1;
 | 
				
			||||||
| 
						 | 
					@ -6368,13 +6422,23 @@ js_InitJIT(JSTraceMonitor *tm)
 | 
				
			||||||
        tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS];
 | 
					        tm->reservedDoublePoolPtr = tm->reservedDoublePool = new jsval[MAX_NATIVE_STACK_SLOTS];
 | 
				
			||||||
        memset(tm->vmfragments, 0, sizeof(tm->vmfragments));
 | 
					        memset(tm->vmfragments, 0, sizeof(tm->vmfragments));
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (!tm->reAllocator)
 | 
				
			||||||
 | 
					        tm->reAllocator = new VMAllocator();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (!tm->reCodeAlloc)
 | 
				
			||||||
 | 
					        tm->reCodeAlloc = new (&gc) CodeAlloc();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (!tm->reAssembler)
 | 
				
			||||||
 | 
					        tm->reAssembler = new (&gc) Assembler(tm->reCodeAlloc, core, &js_LogController);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (!tm->reFragmento) {
 | 
					    if (!tm->reFragmento) {
 | 
				
			||||||
        Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32);
 | 
					        Fragmento* fragmento = new (&gc) Fragmento(core, &js_LogController, 32, tm->reCodeAlloc);
 | 
				
			||||||
        verbose_only(fragmento->labels = new (&gc) LabelMap(core);)
 | 
					        verbose_only(fragmento->labels = new (&gc) LabelMap(core, *tm->reAllocator);)
 | 
				
			||||||
        tm->reFragmento = fragmento;
 | 
					        tm->reFragmento = fragmento;
 | 
				
			||||||
        tm->reLirBuf = new (&gc) LirBuffer(fragmento);
 | 
					        tm->reLirBuf = new (&gc) LirBuffer(*tm->reAllocator);
 | 
				
			||||||
#ifdef DEBUG
 | 
					#ifdef DEBUG
 | 
				
			||||||
        tm->reLirBuf->names = new (&gc) LirNameMap(&gc, fragmento->labels);
 | 
					        tm->reLirBuf->names = new (&gc) LirNameMap(&gc, *tm->reAllocator, fragmento->labels);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
#if !defined XP_WIN
 | 
					#if !defined XP_WIN
 | 
				
			||||||
| 
						 | 
					@ -6436,7 +6500,16 @@ js_FinishJIT(JSTraceMonitor *tm)
 | 
				
			||||||
        delete tm->reLirBuf;
 | 
					        delete tm->reLirBuf;
 | 
				
			||||||
        verbose_only(delete tm->reFragmento->labels;)
 | 
					        verbose_only(delete tm->reFragmento->labels;)
 | 
				
			||||||
        delete tm->reFragmento;
 | 
					        delete tm->reFragmento;
 | 
				
			||||||
 | 
					        delete tm->reAllocator;
 | 
				
			||||||
 | 
					        delete tm->reAssembler;
 | 
				
			||||||
 | 
					        delete tm->reCodeAlloc;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					    if (tm->assembler)
 | 
				
			||||||
 | 
					        delete tm->assembler;
 | 
				
			||||||
 | 
					    if (tm->codeAlloc)
 | 
				
			||||||
 | 
					        delete tm->codeAlloc;
 | 
				
			||||||
 | 
					    if (tm->allocator)
 | 
				
			||||||
 | 
					        delete tm->allocator;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void
 | 
					void
 | 
				
			||||||
| 
						 | 
					@ -6546,7 +6619,7 @@ bool
 | 
				
			||||||
js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
 | 
					js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    /*
 | 
					    /*
 | 
				
			||||||
     * You might imagine the outOMem flag on the lirbuf is sufficient
 | 
					     * You might imagine the outOfMemory flag on the allocator is sufficient
 | 
				
			||||||
     * to model the notion of "running out of memory", but there are actually
 | 
					     * to model the notion of "running out of memory", but there are actually
 | 
				
			||||||
     * two separate issues involved:
 | 
					     * two separate issues involved:
 | 
				
			||||||
     *
 | 
					     *
 | 
				
			||||||
| 
						 | 
					@ -6560,26 +6633,28 @@ js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
 | 
				
			||||||
     * safely shut down and signal the rest of spidermonkey when it
 | 
					     * safely shut down and signal the rest of spidermonkey when it
 | 
				
			||||||
     * does. Condition 2 happens quite regularly.
 | 
					     * does. Condition 2 happens quite regularly.
 | 
				
			||||||
     *
 | 
					     *
 | 
				
			||||||
     * Presently, the code in this file doesn't check the outOMem condition
 | 
					     * Presently, the code in this file doesn't check the outOfMemory condition
 | 
				
			||||||
     * often enough, and frequently misuses the unchecked results of
 | 
					     * often enough, and frequently misuses the unchecked results of
 | 
				
			||||||
     * lirbuffer insertions on the asssumption that it will notice the
 | 
					     * lirbuffer insertions on the asssumption that it will notice the
 | 
				
			||||||
     * outOMem flag "soon enough" when it returns to the monitorRecording
 | 
					     * outOfMemory flag "soon enough" when it returns to the monitorRecording
 | 
				
			||||||
     * function. This turns out to be a false assumption if we use outOMem
 | 
					     * function. This turns out to be a false assumption if we use outOfMemory
 | 
				
			||||||
     * to signal condition 2: we regularly provoke "passing our intended
 | 
					     * to signal condition 2: we regularly provoke "passing our intended
 | 
				
			||||||
     * size" and regularly fail to notice it in time to prevent writing
 | 
					     * size" and regularly fail to notice it in time to prevent writing
 | 
				
			||||||
     * over the end of an artificially self-limited LIR buffer.
 | 
					     * over the end of an artificially self-limited LIR buffer.
 | 
				
			||||||
     *
 | 
					     *
 | 
				
			||||||
     * To mitigate, though not completely solve, this problem, we're
 | 
					     * To mitigate, though not completely solve, this problem, we're
 | 
				
			||||||
     * modeling the two forms of memory exhaustion *separately* for the
 | 
					     * modeling the two forms of memory exhaustion *separately* for the
 | 
				
			||||||
     * time being: condition 1 is handled by the outOMem flag inside
 | 
					     * time being: condition 1 is handled by the outOfMemory flag inside
 | 
				
			||||||
     * nanojit, and condition 2 is being handled independently *here*. So
 | 
					     * nanojit, and condition 2 is being handled independently *here*. So
 | 
				
			||||||
     * we construct our fragmentos to use all available memory they like,
 | 
					     * we construct our fragmentos to use all available memory they like,
 | 
				
			||||||
     * and only report outOMem to us when there is literally no OS memory
 | 
					     * and only report outOfMemory to us when there is literally no OS memory
 | 
				
			||||||
     * left. Merely purging our cache when we hit our highwater mark is
 | 
					     * left. Merely purging our cache when we hit our highwater mark is
 | 
				
			||||||
     * handled by the (few) callers of this function.
 | 
					     * handled by the (few) callers of this function.
 | 
				
			||||||
     *
 | 
					     *
 | 
				
			||||||
     */
 | 
					     */
 | 
				
			||||||
    jsuint maxsz = tm->maxCodeCacheBytes;
 | 
					    jsuint maxsz = tm->maxCodeCacheBytes;
 | 
				
			||||||
 | 
					    VMAllocator *allocator = tm->allocator;
 | 
				
			||||||
 | 
					    CodeAlloc *codeAlloc = tm->codeAlloc;
 | 
				
			||||||
    if (fragmento == tm->fragmento) {
 | 
					    if (fragmento == tm->fragmento) {
 | 
				
			||||||
        if (tm->prohibitFlush)
 | 
					        if (tm->prohibitFlush)
 | 
				
			||||||
            return false;
 | 
					            return false;
 | 
				
			||||||
| 
						 | 
					@ -6591,8 +6666,10 @@ js_OverfullFragmento(JSTraceMonitor* tm, Fragmento *fragmento)
 | 
				
			||||||
         * code caches.
 | 
					         * code caches.
 | 
				
			||||||
         */
 | 
					         */
 | 
				
			||||||
        maxsz /= 16;
 | 
					        maxsz /= 16;
 | 
				
			||||||
 | 
					        allocator = tm->reAllocator;
 | 
				
			||||||
 | 
					        codeAlloc = tm->reCodeAlloc;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    return (fragmento->cacheUsed() > maxsz);
 | 
					    return (codeAlloc->size() + allocator->size() > maxsz);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
JS_FORCES_STACK JS_FRIEND_API(void)
 | 
					JS_FORCES_STACK JS_FRIEND_API(void)
 | 
				
			||||||
| 
						 | 
					@ -8866,7 +8943,8 @@ TraceRecorder::newArray(JSObject* ctor, uint32 argc, jsval* argv, jsval* rval)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // arr->dslots[i] = box_jsval(vp[i]);  for i in 0..argc
 | 
					        // arr->dslots[i] = box_jsval(vp[i]);  for i in 0..argc
 | 
				
			||||||
        LIns *dslots_ins = NULL;
 | 
					        LIns *dslots_ins = NULL;
 | 
				
			||||||
        for (uint32 i = 0; i < argc && !lirbuf->outOMem(); i++) {
 | 
					        VMAllocator *alloc = traceMonitor->allocator;
 | 
				
			||||||
 | 
					        for (uint32 i = 0; i < argc && !alloc->outOfMemory(); i++) {
 | 
				
			||||||
            LIns *elt_ins = get(argv + i);
 | 
					            LIns *elt_ins = get(argv + i);
 | 
				
			||||||
            box_jsval(argv[i], elt_ins);
 | 
					            box_jsval(argv[i], elt_ins);
 | 
				
			||||||
            stobj_set_dslot(arr_ins, i, dslots_ins, elt_ins, "set_array_elt");
 | 
					            stobj_set_dslot(arr_ins, i, dslots_ins, elt_ins, "set_array_elt");
 | 
				
			||||||
| 
						 | 
					@ -9212,6 +9290,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode)
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    lir->insStorei(this_ins, invokevp_ins, 1 * sizeof(jsval));
 | 
					    lir->insStorei(this_ins, invokevp_ins, 1 * sizeof(jsval));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    VMAllocator *alloc = traceMonitor->allocator;
 | 
				
			||||||
    // Populate argv.
 | 
					    // Populate argv.
 | 
				
			||||||
    for (uintN n = 2; n < 2 + argc; n++) {
 | 
					    for (uintN n = 2; n < 2 + argc; n++) {
 | 
				
			||||||
        LIns* i = get(&vp[n]);
 | 
					        LIns* i = get(&vp[n]);
 | 
				
			||||||
| 
						 | 
					@ -9220,7 +9299,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // For a very long argument list we might run out of LIR space, so
 | 
					        // For a very long argument list we might run out of LIR space, so
 | 
				
			||||||
        // check inside the loop.
 | 
					        // check inside the loop.
 | 
				
			||||||
        if (lirbuf->outOMem())
 | 
					        if (alloc->outOfMemory())
 | 
				
			||||||
            ABORT_TRACE("out of memory in argument list");
 | 
					            ABORT_TRACE("out of memory in argument list");
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -9230,7 +9309,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode)
 | 
				
			||||||
        for (uintN n = 2 + argc; n < vplen; n++) {
 | 
					        for (uintN n = 2 + argc; n < vplen; n++) {
 | 
				
			||||||
            lir->insStorei(undef_ins, invokevp_ins, n * sizeof(jsval));
 | 
					            lir->insStorei(undef_ins, invokevp_ins, n * sizeof(jsval));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            if (lirbuf->outOMem())
 | 
					            if (alloc->outOfMemory())
 | 
				
			||||||
                ABORT_TRACE("out of memory in extra slots");
 | 
					                ABORT_TRACE("out of memory in extra slots");
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
| 
						 | 
					@ -9928,7 +10007,7 @@ TraceRecorder::record_JSOP_GETELEM()
 | 
				
			||||||
                        // The entry type map is not necessarily up-to-date, so we capture a new type map
 | 
					                        // The entry type map is not necessarily up-to-date, so we capture a new type map
 | 
				
			||||||
                        // for this point in the code.
 | 
					                        // for this point in the code.
 | 
				
			||||||
                        unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
 | 
					                        unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
 | 
				
			||||||
                        if (stackSlots * sizeof(JSTraceType) > NJ_MAX_SKIP_PAYLOAD_SZB)
 | 
					                        if (stackSlots * sizeof(JSTraceType) > LirBuffer::MAX_SKIP_PAYLOAD_SZB)
 | 
				
			||||||
                            ABORT_TRACE("|arguments| requires saving too much stack");
 | 
					                            ABORT_TRACE("|arguments| requires saving too much stack");
 | 
				
			||||||
                        JSTraceType* typemap = (JSTraceType*) lir->insSkip(stackSlots * sizeof(JSTraceType))->payload();
 | 
					                        JSTraceType* typemap = (JSTraceType*) lir->insSkip(stackSlots * sizeof(JSTraceType))->payload();
 | 
				
			||||||
                        DetermineTypesVisitor detVisitor(*this, typemap);
 | 
					                        DetermineTypesVisitor detVisitor(*this, typemap);
 | 
				
			||||||
| 
						 | 
					@ -10375,7 +10454,7 @@ TraceRecorder::interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc,
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // Generate a type map for the outgoing frame and stash it in the LIR
 | 
					    // Generate a type map for the outgoing frame and stash it in the LIR
 | 
				
			||||||
    unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
 | 
					    unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
 | 
				
			||||||
    if (sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType) > NJ_MAX_SKIP_PAYLOAD_SZB)
 | 
					    if (sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType) > LirBuffer::MAX_SKIP_PAYLOAD_SZB)
 | 
				
			||||||
        ABORT_TRACE("interpreted function call requires saving too much stack");
 | 
					        ABORT_TRACE("interpreted function call requires saving too much stack");
 | 
				
			||||||
    LIns* data = lir->insSkip(sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType));
 | 
					    LIns* data = lir->insSkip(sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType));
 | 
				
			||||||
    FrameInfo* fi = (FrameInfo*)data->payload();
 | 
					    FrameInfo* fi = (FrameInfo*)data->payload();
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -392,6 +392,33 @@ struct VMSideExit : public nanojit::SideExit
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					struct VMAllocator : public nanojit::Allocator
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					public:
 | 
				
			||||||
 | 
					    VMAllocator() : mOutOfMemory(false), mSize(0)
 | 
				
			||||||
 | 
					    {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    size_t size() {
 | 
				
			||||||
 | 
					        return mSize;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    bool outOfMemory() {
 | 
				
			||||||
 | 
					        return mOutOfMemory;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    bool mOutOfMemory;
 | 
				
			||||||
 | 
					    size_t mSize;
 | 
				
			||||||
 | 
					    /*
 | 
				
			||||||
 | 
					     * FIXME: Area the LIR spills into if we encounter an OOM mid-way
 | 
				
			||||||
 | 
					     * through compilation; we must check mOutOfMemory before we run out
 | 
				
			||||||
 | 
					     * of mReserve, otherwise we're in undefined territory. This area
 | 
				
			||||||
 | 
					     * used to be one page, now 16 to be "safer". This is a temporary
 | 
				
			||||||
 | 
					     * and quite unsatisfactory approach to handling OOM in Nanojit.
 | 
				
			||||||
 | 
					     */
 | 
				
			||||||
 | 
					    uintptr_t mReserve[0x10000];
 | 
				
			||||||
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
struct FrameInfo {
 | 
					struct FrameInfo {
 | 
				
			||||||
    JSObject*       callee;     // callee function object
 | 
					    JSObject*       callee;     // callee function object
 | 
				
			||||||
    JSObject*       block;      // caller block chain head
 | 
					    JSObject*       block;      // caller block chain head
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
							
								
								
									
										92
									
								
								js/src/nanojit/Allocator.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										92
									
								
								js/src/nanojit/Allocator.cpp
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,92 @@
 | 
				
			||||||
 | 
					/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
 | 
				
			||||||
 | 
					/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
 | 
				
			||||||
 | 
					/* ***** BEGIN LICENSE BLOCK *****
 | 
				
			||||||
 | 
					 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The contents of this file are subject to the Mozilla Public License Version
 | 
				
			||||||
 | 
					 * 1.1 (the "License"); you may not use this file except in compliance with
 | 
				
			||||||
 | 
					 * the License. You may obtain a copy of the License at
 | 
				
			||||||
 | 
					 * http://www.mozilla.org/MPL/
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Software distributed under the License is distributed on an "AS IS" basis,
 | 
				
			||||||
 | 
					 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
 | 
				
			||||||
 | 
					 * for the specific language governing rights and limitations under the
 | 
				
			||||||
 | 
					 * License.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The Original Code is [Open Source Virtual Machine].
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The Initial Developer of the Original Code is
 | 
				
			||||||
 | 
					 * Adobe System Incorporated.
 | 
				
			||||||
 | 
					 * Portions created by the Initial Developer are Copyright (C) 2004-2007
 | 
				
			||||||
 | 
					 * the Initial Developer. All Rights Reserved.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Contributor(s):
 | 
				
			||||||
 | 
					 *   Adobe AS3 Team
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Alternatively, the contents of this file may be used under the terms of
 | 
				
			||||||
 | 
					 * either the GNU General Public License Version 2 or later (the "GPL"), or
 | 
				
			||||||
 | 
					 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
 | 
				
			||||||
 | 
					 * in which case the provisions of the GPL or the LGPL are applicable instead
 | 
				
			||||||
 | 
					 * of those above. If you wish to allow use of your version of this file only
 | 
				
			||||||
 | 
					 * under the terms of either the GPL or the LGPL, and not to allow others to
 | 
				
			||||||
 | 
					 * use your version of this file under the terms of the MPL, indicate your
 | 
				
			||||||
 | 
					 * decision by deleting the provisions above and replace them with the notice
 | 
				
			||||||
 | 
					 * and other provisions required by the GPL or the LGPL. If you do not delete
 | 
				
			||||||
 | 
					 * the provisions above, a recipient may use your version of this file under
 | 
				
			||||||
 | 
					 * the terms of any one of the MPL, the GPL or the LGPL.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * ***** END LICENSE BLOCK ***** */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "nanojit.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					namespace nanojit
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    Allocator::Allocator()
 | 
				
			||||||
 | 
					        : current_chunk(NULL)
 | 
				
			||||||
 | 
					        , current_top(NULL)
 | 
				
			||||||
 | 
					        , current_limit(NULL)
 | 
				
			||||||
 | 
					    { }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    Allocator::~Allocator()
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					        reset();
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    void Allocator::reset()
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					        Chunk *c = current_chunk;
 | 
				
			||||||
 | 
					        while (c) {
 | 
				
			||||||
 | 
					            Chunk *prev = c->prev;
 | 
				
			||||||
 | 
					            this->freeChunk(c);
 | 
				
			||||||
 | 
					            c = prev;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					        current_chunk = NULL;
 | 
				
			||||||
 | 
					        current_top = NULL;
 | 
				
			||||||
 | 
					        current_limit = NULL;
 | 
				
			||||||
 | 
					        postReset();
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    void* Allocator::allocSlow(size_t nbytes)
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					        NanoAssert((nbytes & 7) == 0);
 | 
				
			||||||
 | 
					        fill(nbytes);
 | 
				
			||||||
 | 
					        NanoAssert(current_top + nbytes <= current_limit);
 | 
				
			||||||
 | 
					        void* p = current_top;
 | 
				
			||||||
 | 
					        current_top += nbytes;
 | 
				
			||||||
 | 
					        return p;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    void Allocator::fill(size_t nbytes)
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					        const size_t minChunk = 2000;
 | 
				
			||||||
 | 
					        if (nbytes < minChunk)
 | 
				
			||||||
 | 
					            nbytes = minChunk;
 | 
				
			||||||
 | 
					        size_t chunkbytes = sizeof(Chunk) + nbytes - sizeof(int64_t);
 | 
				
			||||||
 | 
					        void* mem = allocChunk(chunkbytes);
 | 
				
			||||||
 | 
					        Chunk* chunk = (Chunk*) mem;
 | 
				
			||||||
 | 
					        chunk->prev = current_chunk;
 | 
				
			||||||
 | 
					        current_chunk = chunk;
 | 
				
			||||||
 | 
					        current_top = (char*)chunk->data;
 | 
				
			||||||
 | 
					        current_limit = (char*)mem + chunkbytes;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
							
								
								
									
										107
									
								
								js/src/nanojit/Allocator.h
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								js/src/nanojit/Allocator.h
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,107 @@
 | 
				
			||||||
 | 
					/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
 | 
				
			||||||
 | 
					/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
 | 
				
			||||||
 | 
					/* ***** BEGIN LICENSE BLOCK *****
 | 
				
			||||||
 | 
					 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The contents of this file are subject to the Mozilla Public License Version
 | 
				
			||||||
 | 
					 * 1.1 (the "License"); you may not use this file except in compliance with
 | 
				
			||||||
 | 
					 * the License. You may obtain a copy of the License at
 | 
				
			||||||
 | 
					 * http://www.mozilla.org/MPL/
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Software distributed under the License is distributed on an "AS IS" basis,
 | 
				
			||||||
 | 
					 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
 | 
				
			||||||
 | 
					 * for the specific language governing rights and limitations under the
 | 
				
			||||||
 | 
					 * License.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The Original Code is [Open Source Virtual Machine].
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * The Initial Developer of the Original Code is
 | 
				
			||||||
 | 
					 * Adobe System Incorporated.
 | 
				
			||||||
 | 
					 * Portions created by the Initial Developer are Copyright (C) 2009
 | 
				
			||||||
 | 
					 * the Initial Developer. All Rights Reserved.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Contributor(s):
 | 
				
			||||||
 | 
					 *   Adobe AS3 Team
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Alternatively, the contents of this file may be used under the terms of
 | 
				
			||||||
 | 
					 * either the GNU General Public License Version 2 or later (the "GPL"), or
 | 
				
			||||||
 | 
					 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
 | 
				
			||||||
 | 
					 * in which case the provisions of the GPL or the LGPL are applicable instead
 | 
				
			||||||
 | 
					 * of those above. If you wish to allow use of your version of this file only
 | 
				
			||||||
 | 
					 * under the terms of either the GPL or the LGPL, and not to allow others to
 | 
				
			||||||
 | 
					 * use your version of this file under the terms of the MPL, indicate your
 | 
				
			||||||
 | 
					 * decision by deleting the provisions above and replace them with the notice
 | 
				
			||||||
 | 
					 * and other provisions required by the GPL or the LGPL. If you do not delete
 | 
				
			||||||
 | 
					 * the provisions above, a recipient may use your version of this file under
 | 
				
			||||||
 | 
					 * the terms of any one of the MPL, the GPL or the LGPL.
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * ***** END LICENSE BLOCK ***** */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifndef __nanojit_Allocator__
 | 
				
			||||||
 | 
					#define __nanojit_Allocator__
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					namespace nanojit
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    /**
 | 
				
			||||||
 | 
					     * Allocator is a bump-pointer allocator with an SPI for getting more
 | 
				
			||||||
 | 
					     * memory from embedder-implemented allocator, such as malloc()/free().
 | 
				
			||||||
 | 
					     *
 | 
				
			||||||
 | 
					     * allocations never return NULL.  The implementation of allocChunk()
 | 
				
			||||||
 | 
					     * is expected to perform a longjmp or exception when an allocation can't
 | 
				
			||||||
 | 
					     * proceed.
 | 
				
			||||||
 | 
					     */
 | 
				
			||||||
 | 
					    class Allocator {
 | 
				
			||||||
 | 
					    public:
 | 
				
			||||||
 | 
					        Allocator();
 | 
				
			||||||
 | 
					        virtual ~Allocator();
 | 
				
			||||||
 | 
					        void reset();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        /** alloc memory, never return null. */
 | 
				
			||||||
 | 
					        void* alloc(size_t nbytes) {
 | 
				
			||||||
 | 
					            nbytes = (nbytes + 7) & ~7; // round up
 | 
				
			||||||
 | 
					            if (current_top + nbytes <= current_limit) {
 | 
				
			||||||
 | 
					                void *p = current_top;
 | 
				
			||||||
 | 
					                current_top += nbytes;
 | 
				
			||||||
 | 
					                return p;
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					            return allocSlow(nbytes);
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    private:
 | 
				
			||||||
 | 
					        void* allocSlow(size_t nbytes);
 | 
				
			||||||
 | 
					        void fill(size_t minbytes);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        class Chunk {
 | 
				
			||||||
 | 
					        public:
 | 
				
			||||||
 | 
					            Chunk* prev;
 | 
				
			||||||
 | 
					            int64_t data[1]; // int64_t forces 8-byte alignment.
 | 
				
			||||||
 | 
					        };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        Chunk* current_chunk;
 | 
				
			||||||
 | 
					        char* current_top;
 | 
				
			||||||
 | 
					        char* current_limit;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    // allocator SPI
 | 
				
			||||||
 | 
					    private:
 | 
				
			||||||
 | 
					        /** allocate another block from a host provided allocator */
 | 
				
			||||||
 | 
					        void* allocChunk(size_t nbytes);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        /** free back to the same allocator */
 | 
				
			||||||
 | 
					        void freeChunk(void*);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        /** hook for post-reset action. */
 | 
				
			||||||
 | 
					        void postReset();
 | 
				
			||||||
 | 
					    };
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/** global new overload enabling this pattern:  new (allocator) T(...) */
 | 
				
			||||||
 | 
					inline void* operator new(size_t size, nanojit::Allocator &a) {
 | 
				
			||||||
 | 
					    return a.alloc(size);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/** global new[] overload enabling this pattern: new (allocator) T[] */
 | 
				
			||||||
 | 
					inline void* operator new[](size_t size, nanojit::Allocator& a) {
 | 
				
			||||||
 | 
					    return a.alloc(size);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif // __nanojit_Allocator__
 | 
				
			||||||
| 
						 | 
					@ -45,15 +45,6 @@
 | 
				
			||||||
#include "portapi_nanojit.h"
 | 
					#include "portapi_nanojit.h"
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(AVMPLUS_UNIX) && defined(AVMPLUS_ARM)
 | 
					 | 
				
			||||||
#include <asm/unistd.h>
 | 
					 | 
				
			||||||
extern "C" void __clear_cache(void *BEG, void *END);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef AVMPLUS_SPARC
 | 
					 | 
				
			||||||
extern  "C"    void sync_instruction_memory(caddr_t v, u_int len);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
namespace nanojit
 | 
					namespace nanojit
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    int UseSoftfloat = 0;
 | 
					    int UseSoftfloat = 0;
 | 
				
			||||||
| 
						 | 
					@ -168,20 +159,19 @@ namespace nanojit
 | 
				
			||||||
     *
 | 
					     *
 | 
				
			||||||
     *    - merging paths ( build a graph? ), possibly use external rep to drive codegen
 | 
					     *    - merging paths ( build a graph? ), possibly use external rep to drive codegen
 | 
				
			||||||
     */
 | 
					     */
 | 
				
			||||||
    Assembler::Assembler(Fragmento* frago, LogControl* logc)
 | 
					    Assembler::Assembler(CodeAlloc* codeAlloc, AvmCore *core, LogControl* logc)
 | 
				
			||||||
        : hasLoop(0)
 | 
					        : hasLoop(0)
 | 
				
			||||||
        , _frago(frago)
 | 
					        , codeList(0)
 | 
				
			||||||
        , _gc(frago->core()->gc)
 | 
					        , core(core)
 | 
				
			||||||
        , config(frago->core()->config)
 | 
					        , _codeAlloc(codeAlloc)
 | 
				
			||||||
 | 
					        , config(core->config)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        AvmCore *core = frago->core();
 | 
					 | 
				
			||||||
        nInit(core);
 | 
					        nInit(core);
 | 
				
			||||||
        verbose_only( _logc = logc; )
 | 
					        verbose_only( _logc = logc; )
 | 
				
			||||||
        verbose_only( _outputCache = 0; )
 | 
					        verbose_only( _outputCache = 0; )
 | 
				
			||||||
        verbose_only( outlineEOL[0] = '\0'; )
 | 
					        verbose_only( outlineEOL[0] = '\0'; )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        internalReset();
 | 
					        reset();
 | 
				
			||||||
        pageReset();
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Assembler::arReset()
 | 
					    void Assembler::arReset()
 | 
				
			||||||
| 
						 | 
					@ -258,132 +248,53 @@ namespace nanojit
 | 
				
			||||||
        return i->isconst() || i->isconstq() || i->isop(LIR_ialloc);
 | 
					        return i->isconst() || i->isconstq() || i->isop(LIR_ialloc);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Assembler::internalReset()
 | 
					    void Assembler::codeAlloc(NIns *&start, NIns *&end, NIns *&eip)
 | 
				
			||||||
 | 
					    {
 | 
				
			||||||
 | 
					        // save the block we just filled
 | 
				
			||||||
 | 
					        if (start)
 | 
				
			||||||
 | 
					            CodeAlloc::add(codeList, start, end);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        // CodeAlloc contract: allocations never fail
 | 
				
			||||||
 | 
					        _codeAlloc->alloc(start, end);
 | 
				
			||||||
 | 
					        VALGRIND_DISCARD_TRANSLATIONS(start, uintptr_t(end) - uintptr_t(start));
 | 
				
			||||||
 | 
					        NanoAssert(uintptr_t(end) - uintptr_t(start) >= (size_t)LARGEST_UNDERRUN_PROT);
 | 
				
			||||||
 | 
					        eip = end;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    void Assembler::reset()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        // readies for a brand spanking new code generation pass.
 | 
					        // readies for a brand spanking new code generation pass.
 | 
				
			||||||
 | 
					        _nIns = 0;
 | 
				
			||||||
 | 
					        _nExitIns = 0;
 | 
				
			||||||
 | 
					        _startingIns = 0;
 | 
				
			||||||
 | 
					        codeStart = codeEnd = 0;
 | 
				
			||||||
 | 
					        exitStart = exitEnd = 0;
 | 
				
			||||||
 | 
					        _stats.pages = 0;
 | 
				
			||||||
 | 
					        codeList = 0;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        nativePageReset();
 | 
				
			||||||
        registerResetAll();
 | 
					        registerResetAll();
 | 
				
			||||||
        arReset();
 | 
					        arReset();
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    NIns* Assembler::pageAlloc(bool exitPage)
 | 
					#ifdef _DEBUG
 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        Page*& list = (exitPage) ? _nativeExitPages : _nativePages;
 | 
					 | 
				
			||||||
        Page* page = _frago->pageAlloc();
 | 
					 | 
				
			||||||
        if (page)
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            page->next = list;
 | 
					 | 
				
			||||||
            list = page;
 | 
					 | 
				
			||||||
            nMarkExecute(page, PAGE_READ|PAGE_WRITE|PAGE_EXEC);
 | 
					 | 
				
			||||||
            _stats.pages++;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        else
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            // return a location that is 'safe' to write to while we are out of mem
 | 
					 | 
				
			||||||
            setError(OutOMem);
 | 
					 | 
				
			||||||
            return _startingIns;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        return &page->code[sizeof(page->code)/sizeof(NIns)]; // just past the end
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Assembler::pageReset()
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        pagesFree(_nativePages);
 | 
					 | 
				
			||||||
        pagesFree(_nativeExitPages);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        _nIns = 0;
 | 
					 | 
				
			||||||
        _nExitIns = 0;
 | 
					 | 
				
			||||||
        _startingIns = 0;
 | 
					 | 
				
			||||||
        _stats.pages = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        nativePageReset();
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Assembler::pagesFree(Page*& page)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        while(page)
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            Page *next = page->next;  // pull next ptr prior to free
 | 
					 | 
				
			||||||
            _frago->pageFree(page);
 | 
					 | 
				
			||||||
            page = next;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    #define bytesFromTop(x)        ( (size_t)(x) - (size_t)pageTop(x) )
 | 
					 | 
				
			||||||
    #define bytesToBottom(x)    ( (size_t)pageBottom(x) - (size_t)(x) )
 | 
					 | 
				
			||||||
    #define bytesBetween(x,y)    ( (size_t)(x) - (size_t)(y) )
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    int32_t Assembler::codeBytes()
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        // start and end on same page?
 | 
					 | 
				
			||||||
        size_t exit = 0;
 | 
					 | 
				
			||||||
        int32_t pages = _stats.pages;
 | 
					 | 
				
			||||||
        if (_nExitIns-1 == _stats.codeExitStart)
 | 
					 | 
				
			||||||
            ;
 | 
					 | 
				
			||||||
        else if (samepage(_nExitIns,_stats.codeExitStart))
 | 
					 | 
				
			||||||
            exit = bytesBetween(_stats.codeExitStart, _nExitIns);
 | 
					 | 
				
			||||||
        else
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            pages--;
 | 
					 | 
				
			||||||
            exit = ((intptr_t)_stats.codeExitStart & (NJ_PAGE_SIZE-1)) ? bytesFromTop(_stats.codeExitStart)+1 : 0;
 | 
					 | 
				
			||||||
            exit += bytesToBottom(_nExitIns)+1;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        size_t main = 0;
 | 
					 | 
				
			||||||
        if (_nIns-1 == _stats.codeStart)
 | 
					 | 
				
			||||||
            ;
 | 
					 | 
				
			||||||
        else if (samepage(_nIns,_stats.codeStart))
 | 
					 | 
				
			||||||
            main = bytesBetween(_stats.codeStart, _nIns);
 | 
					 | 
				
			||||||
        else
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            pages--;
 | 
					 | 
				
			||||||
            main = ((intptr_t)_stats.codeStart & (NJ_PAGE_SIZE-1)) ? bytesFromTop(_stats.codeStart)+1 : 0;
 | 
					 | 
				
			||||||
            main += bytesToBottom(_nIns)+1;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        //nj_dprintf("size %d, exit is %d, main is %d, page count %d, sizeof %d\n", (int)((pages) * NJ_PAGE_SIZE + main + exit),(int)exit, (int)main, (int)_stats.pages, (int)sizeof(Page));
 | 
					 | 
				
			||||||
        return (pages) * NJ_PAGE_SIZE + main + exit;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    #undef bytesFromTop
 | 
					 | 
				
			||||||
    #undef bytesToBottom
 | 
					 | 
				
			||||||
    #undef byteBetween
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    Page* Assembler::handoverPages(bool exitPages)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        Page*& list = (exitPages) ? _nativeExitPages : _nativePages;
 | 
					 | 
				
			||||||
        NIns*& ins =  (exitPages) ? _nExitIns : _nIns;
 | 
					 | 
				
			||||||
        Page* start = list;
 | 
					 | 
				
			||||||
        list = 0;
 | 
					 | 
				
			||||||
        ins = 0;
 | 
					 | 
				
			||||||
        return start;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    #ifdef _DEBUG
 | 
					 | 
				
			||||||
    bool Assembler::onPage(NIns* where, bool exitPages)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        Page* page = (exitPages) ? _nativeExitPages : _nativePages;
 | 
					 | 
				
			||||||
        bool on = false;
 | 
					 | 
				
			||||||
        while(page)
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            if (samepage(where-1,page))
 | 
					 | 
				
			||||||
                on = true;
 | 
					 | 
				
			||||||
            page = page->next;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        return on;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Assembler::pageValidate()
 | 
					    void Assembler::pageValidate()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        NanoAssert(!error());
 | 
					        if (error()) return;
 | 
				
			||||||
        // _nIns and _nExitIns need to be at least on one of these pages
 | 
					        // _nIns needs to be at least on one of these pages
 | 
				
			||||||
        NanoAssertMsg( onPage(_nIns)&& onPage(_nExitIns,true), "Native instruction pointer overstep paging bounds; check overrideProtect for last instruction");
 | 
					        NanoAssertMsg(_inExit ? containsPtr(exitStart, exitEnd, _nIns) : containsPtr(codeStart, codeEnd, _nIns),
 | 
				
			||||||
 | 
					                     "Native instruction pointer overstep paging bounds; check overrideProtect for last instruction");
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    #endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    #ifdef _DEBUG
 | 
					    #ifdef _DEBUG
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Assembler::resourceConsistencyCheck()
 | 
					    void Assembler::resourceConsistencyCheck()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        NanoAssert(!error());
 | 
					        if (error()) return;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef NANOJIT_IA32
 | 
					#ifdef NANOJIT_IA32
 | 
				
			||||||
        NanoAssert((_allocator.active[FST0] && _fpuStkDepth == -1) ||
 | 
					        NanoAssert((_allocator.active[FST0] && _fpuStkDepth == -1) ||
 | 
				
			||||||
| 
						 | 
					@ -678,11 +589,6 @@ namespace nanojit
 | 
				
			||||||
        {
 | 
					        {
 | 
				
			||||||
            RegAlloc* captured = _branchStateMap->get(exit);
 | 
					            RegAlloc* captured = _branchStateMap->get(exit);
 | 
				
			||||||
            intersectRegisterState(*captured);
 | 
					            intersectRegisterState(*captured);
 | 
				
			||||||
            verbose_only(
 | 
					 | 
				
			||||||
                verbose_outputf("## merging trunk with %s",
 | 
					 | 
				
			||||||
                    _frago->labels->format(exit->target));
 | 
					 | 
				
			||||||
                verbose_outputf("%010lx:", (unsigned long)_nIns);
 | 
					 | 
				
			||||||
            )
 | 
					 | 
				
			||||||
            at = exit->target->fragEntry;
 | 
					            at = exit->target->fragEntry;
 | 
				
			||||||
            NanoAssert(at != 0);
 | 
					            NanoAssert(at != 0);
 | 
				
			||||||
            _branchStateMap->remove(exit);
 | 
					            _branchStateMap->remove(exit);
 | 
				
			||||||
| 
						 | 
					@ -705,7 +611,7 @@ namespace nanojit
 | 
				
			||||||
        swapptrs();
 | 
					        swapptrs();
 | 
				
			||||||
        _inExit = true;
 | 
					        _inExit = true;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        //verbose_only( verbose_outputf("         LIR_xend swapptrs, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) );
 | 
					        // verbose_only( verbose_outputf("         LIR_xend swapptrs, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) );
 | 
				
			||||||
        debug_only( _sv_fpuStkDepth = _fpuStkDepth; _fpuStkDepth = 0; )
 | 
					        debug_only( _sv_fpuStkDepth = _fpuStkDepth; _fpuStkDepth = 0; )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        nFragExit(guard);
 | 
					        nFragExit(guard);
 | 
				
			||||||
| 
						 | 
					@ -727,7 +633,7 @@ namespace nanojit
 | 
				
			||||||
        swapptrs();
 | 
					        swapptrs();
 | 
				
			||||||
        _inExit = false;
 | 
					        _inExit = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        //verbose_only( verbose_outputf("         LIR_xt/xf swapptrs, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) );
 | 
					        // verbose_only( verbose_outputf("         LIR_xt/xf swapptrs, _nIns is now %08X(%08X), _nExitIns is now %08X(%08X)",_nIns, *_nIns,_nExitIns,*_nExitIns) );
 | 
				
			||||||
        verbose_only( verbose_outputf("%010lx:", (unsigned long)jmpTarget);)
 | 
					        verbose_only( verbose_outputf("%010lx:", (unsigned long)jmpTarget);)
 | 
				
			||||||
        verbose_only( verbose_outputf("----------------------------------- ## BEGIN exit block (LIR_xt|LIR_xf)") );
 | 
					        verbose_only( verbose_outputf("----------------------------------- ## BEGIN exit block (LIR_xt|LIR_xf)") );
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -743,7 +649,15 @@ namespace nanojit
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Assembler::beginAssembly(Fragment *frag, RegAllocMap* branchStateMap)
 | 
					    void Assembler::beginAssembly(Fragment *frag, RegAllocMap* branchStateMap)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        internalReset();
 | 
					        reset();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        NanoAssert(codeList == 0);
 | 
				
			||||||
 | 
					        NanoAssert(codeStart == 0);
 | 
				
			||||||
 | 
					        NanoAssert(codeEnd == 0);
 | 
				
			||||||
 | 
					        NanoAssert(exitStart == 0);
 | 
				
			||||||
 | 
					        NanoAssert(exitEnd == 0);
 | 
				
			||||||
 | 
					        NanoAssert(_nIns == 0);
 | 
				
			||||||
 | 
					        NanoAssert(_nExitIns == 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        _thisfrag = frag;
 | 
					        _thisfrag = frag;
 | 
				
			||||||
        _activation.lowwatermark = 1;
 | 
					        _activation.lowwatermark = 1;
 | 
				
			||||||
| 
						 | 
					@ -789,7 +703,6 @@ namespace nanojit
 | 
				
			||||||
    void Assembler::assemble(Fragment* frag,  NInsList& loopJumps)
 | 
					    void Assembler::assemble(Fragment* frag,  NInsList& loopJumps)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        if (error()) return;
 | 
					        if (error()) return;
 | 
				
			||||||
        AvmCore *core = _frago->core();
 | 
					 | 
				
			||||||
        _thisfrag = frag;
 | 
					        _thisfrag = frag;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // Used for debug printing, if needed
 | 
					        // Used for debug printing, if needed
 | 
				
			||||||
| 
						 | 
					@ -844,15 +757,13 @@ namespace nanojit
 | 
				
			||||||
        )
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        verbose_only(_thisfrag->compileNbr++; )
 | 
					        verbose_only(_thisfrag->compileNbr++; )
 | 
				
			||||||
        verbose_only(_frago->_stats.compiles++; )
 | 
					 | 
				
			||||||
        verbose_only(_frago->_stats.totalCompiles++; )
 | 
					 | 
				
			||||||
        _inExit = false;
 | 
					        _inExit = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        LabelStateMap labels(_gc);
 | 
					        LabelStateMap labels(_gc);
 | 
				
			||||||
        NInsMap patches(_gc);
 | 
					        NInsMap patches(_gc);
 | 
				
			||||||
        gen(prev, loopJumps, labels, patches);
 | 
					        gen(prev, loopJumps, labels, patches);
 | 
				
			||||||
        frag->loopEntry = _nIns;
 | 
					        frag->loopEntry = _nIns;
 | 
				
			||||||
        //frag->outbound = core->config.tree_opt? _latestGuard : 0;
 | 
					        //frag->outbound = config.tree_opt? _latestGuard : 0;
 | 
				
			||||||
        //nj_dprintf("assemble frag %X entry %X\n", (int)frag, (int)frag->fragEntry);
 | 
					        //nj_dprintf("assemble frag %X entry %X\n", (int)frag, (int)frag->fragEntry);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (!error()) {
 | 
					        if (!error()) {
 | 
				
			||||||
| 
						 | 
					@ -891,8 +802,13 @@ namespace nanojit
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        // don't try to patch code if we are in an error state since we might have partially
 | 
					        // don't try to patch code if we are in an error state since we might have partially
 | 
				
			||||||
        // overwritten the code cache already
 | 
					        // overwritten the code cache already
 | 
				
			||||||
        if (error())
 | 
					        if (error()) {
 | 
				
			||||||
 | 
					            // something went wrong, release all allocated code memory
 | 
				
			||||||
 | 
					            _codeAlloc->freeAll(codeList);
 | 
				
			||||||
 | 
					            _codeAlloc->free(exitStart, exitEnd);
 | 
				
			||||||
 | 
					            _codeAlloc->free(codeStart, codeEnd);
 | 
				
			||||||
            return;
 | 
					            return;
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        NIns* SOT = 0;
 | 
					        NIns* SOT = 0;
 | 
				
			||||||
        if (frag->isRoot()) {
 | 
					        if (frag->isRoot()) {
 | 
				
			||||||
| 
						 | 
					@ -910,93 +826,40 @@ namespace nanojit
 | 
				
			||||||
            nPatchBranch(loopJump, SOT);
 | 
					            nPatchBranch(loopJump, SOT);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        NIns* fragEntry = 0;
 | 
					        NIns* fragEntry = genPrologue();
 | 
				
			||||||
 | 
					        verbose_only( outputAddr=true; )
 | 
				
			||||||
 | 
					        verbose_only( asm_output("[prologue]"); )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (!error())
 | 
					        // check for resource leaks
 | 
				
			||||||
        {
 | 
					        debug_only(
 | 
				
			||||||
            fragEntry = genPrologue();
 | 
					            for(uint32_t i=_activation.lowwatermark;i<_activation.highwatermark; i++) {
 | 
				
			||||||
            verbose_only( outputAddr=true; )
 | 
					                NanoAssertMsgf(_activation.entry[i] == 0, "frame entry %d wasn't freed\n",-4*i);
 | 
				
			||||||
            verbose_only( asm_output("[prologue]"); )
 | 
					            }
 | 
				
			||||||
        }
 | 
					        )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // something bad happened?
 | 
					        // save used parts of current block on fragment's code list, free the rest
 | 
				
			||||||
        if (!error())
 | 
					#ifdef NANOJIT_ARM
 | 
				
			||||||
        {
 | 
					        _codeAlloc->addRemainder(codeList, exitStart, exitEnd, _nExitSlot, _nExitIns);
 | 
				
			||||||
            // check for resource leaks
 | 
					        _codeAlloc->addRemainder(codeList, codeStart, codeEnd, _nSlot, _nIns);
 | 
				
			||||||
            debug_only(
 | 
					#else
 | 
				
			||||||
                for(uint32_t i=_activation.lowwatermark;i<_activation.highwatermark; i++) {
 | 
					        _codeAlloc->addRemainder(codeList, exitStart, exitEnd, exitStart, _nExitIns);
 | 
				
			||||||
                    NanoAssertMsgf(_activation.entry[i] == 0, "frame entry %d wasn't freed",-4*i);
 | 
					        _codeAlloc->addRemainder(codeList, codeStart, codeEnd, codeStart, _nIns);
 | 
				
			||||||
                }
 | 
					 | 
				
			||||||
            )
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            frag->fragEntry = fragEntry;
 | 
					 | 
				
			||||||
            NIns* code = _nIns;
 | 
					 | 
				
			||||||
#ifdef PERFM
 | 
					 | 
				
			||||||
            _nvprof("code", codeBytes());  // requires that all pages are released between begin/endAssembly()otherwise we double count
 | 
					 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
            // let the fragment manage the pages if we're using trees and there are branches
 | 
					 | 
				
			||||||
            Page* manage = (_frago->core()->config.tree_opt) ? handoverPages() : 0;
 | 
					 | 
				
			||||||
            frag->setCode(code, manage); // root of tree should manage all pages
 | 
					 | 
				
			||||||
            //nj_dprintf("endAssembly frag %X entry %X\n", (int)frag, (int)frag->fragEntry);
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        else
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            // In case of failure, reset _nIns ready for the next assembly run.
 | 
					 | 
				
			||||||
            resetInstructionPointer();
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
        NanoAssertMsgf(error() || _fpuStkDepth == 0,"_fpuStkDepth %d",_fpuStkDepth);
 | 
					        // at this point all our new code is in the d-cache and not the i-cache,
 | 
				
			||||||
 | 
					        // so flush the i-cache on cpu's that need it.
 | 
				
			||||||
 | 
					        _codeAlloc->flushICache(codeList);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        internalReset();  // clear the reservation tables and regalloc
 | 
					        // save entry point pointers
 | 
				
			||||||
 | 
					        frag->fragEntry = fragEntry;
 | 
				
			||||||
 | 
					        frag->setCode(_nIns);
 | 
				
			||||||
 | 
					        // PERFM_NVPROF("code", CodeAlloc::size(codeList));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        NanoAssertMsgf(_fpuStkDepth == 0,"_fpuStkDepth %d\n",_fpuStkDepth);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        debug_only( pageValidate(); )
 | 
				
			||||||
        NanoAssert( !_branchStateMap || _branchStateMap->isEmpty());
 | 
					        NanoAssert( !_branchStateMap || _branchStateMap->isEmpty());
 | 
				
			||||||
        _branchStateMap = 0;
 | 
					        _branchStateMap = 0;
 | 
				
			||||||
 | 
					 | 
				
			||||||
        // Tell Valgrind that new code has been generated, and it must flush
 | 
					 | 
				
			||||||
        // any translations it has for the memory range generated into.
 | 
					 | 
				
			||||||
        VALGRIND_DISCARD_TRANSLATIONS(pageTop(_nIns-1),     NJ_PAGE_SIZE);
 | 
					 | 
				
			||||||
        VALGRIND_DISCARD_TRANSLATIONS(pageTop(_nExitIns-1), NJ_PAGE_SIZE);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef AVMPLUS_ARM
 | 
					 | 
				
			||||||
        // If we've modified the code, we need to flush so we don't end up trying
 | 
					 | 
				
			||||||
        // to execute junk
 | 
					 | 
				
			||||||
# if defined(UNDER_CE)
 | 
					 | 
				
			||||||
        FlushInstructionCache(GetCurrentProcess(), NULL, NULL);
 | 
					 | 
				
			||||||
# elif defined(AVMPLUS_UNIX)
 | 
					 | 
				
			||||||
        for (int i = 0; i < 2; i++) {
 | 
					 | 
				
			||||||
            Page *p = (i == 0) ? _nativePages : _nativeExitPages;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            Page *first = p;
 | 
					 | 
				
			||||||
            while (p) {
 | 
					 | 
				
			||||||
                if (!p->next || p->next != p+1) {
 | 
					 | 
				
			||||||
                    __clear_cache((char*)first, (char*)(p+1));
 | 
					 | 
				
			||||||
                    first = p->next;
 | 
					 | 
				
			||||||
                }
 | 
					 | 
				
			||||||
                p = p->next;
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
# endif
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef AVMPLUS_SPARC
 | 
					 | 
				
			||||||
        // Clear Instruction Cache
 | 
					 | 
				
			||||||
        for (int i = 0; i < 2; i++) {
 | 
					 | 
				
			||||||
            Page *p = (i == 0) ? _nativePages : _nativeExitPages;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            Page *first = p;
 | 
					 | 
				
			||||||
            while (p) {
 | 
					 | 
				
			||||||
                if (!p->next || p->next != p+1) {
 | 
					 | 
				
			||||||
                    sync_instruction_memory((char *)first, NJ_PAGE_SIZE);
 | 
					 | 
				
			||||||
                    first = p->next;
 | 
					 | 
				
			||||||
                }
 | 
					 | 
				
			||||||
                p = p->next;
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# ifdef AVMPLUS_PORTING_API
 | 
					 | 
				
			||||||
        NanoJIT_PortAPI_FlushInstructionCache(_nIns, _startingIns);
 | 
					 | 
				
			||||||
        NanoJIT_PortAPI_FlushInstructionCache(_nExitIns, _endJit2Addr);
 | 
					 | 
				
			||||||
# endif
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Assembler::copyRegisters(RegAlloc* copyTo)
 | 
					    void Assembler::copyRegisters(RegAlloc* copyTo)
 | 
				
			||||||
| 
						 | 
					@ -2010,8 +1873,6 @@ namespace nanojit
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    #endif // verbose
 | 
					    #endif // verbose
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    #endif /* FEATURE_NANOJIT */
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#if defined(FEATURE_NANOJIT) || defined(NJ_VERBOSE)
 | 
					#if defined(FEATURE_NANOJIT) || defined(NJ_VERBOSE)
 | 
				
			||||||
    uint32_t CallInfo::_count_args(uint32_t mask) const
 | 
					    uint32_t CallInfo::_count_args(uint32_t mask) const
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -97,8 +97,6 @@ namespace nanojit
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    class Fragmento;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    // error codes
 | 
					    // error codes
 | 
				
			||||||
    enum AssmError
 | 
					    enum AssmError
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
| 
						 | 
					@ -171,7 +169,7 @@ namespace nanojit
 | 
				
			||||||
            LogControl* _logc;
 | 
					            LogControl* _logc;
 | 
				
			||||||
            #endif
 | 
					            #endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            Assembler(Fragmento* frago, LogControl* logc);
 | 
					            Assembler(CodeAlloc* codeAlloc, AvmCore* core, LogControl* logc);
 | 
				
			||||||
            ~Assembler() {}
 | 
					            ~Assembler() {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            void        assemble(Fragment* frag, NInsList& loopJumps);
 | 
					            void        assemble(Fragment* frag, NInsList& loopJumps);
 | 
				
			||||||
| 
						 | 
					@ -186,12 +184,9 @@ namespace nanojit
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
            AssmError   error()    { return _err; }
 | 
					            AssmError   error()    { return _err; }
 | 
				
			||||||
            void        setError(AssmError e) { _err = e; }
 | 
					            void        setError(AssmError e) { _err = e; }
 | 
				
			||||||
            void        pageReset();
 | 
					            void        reset();
 | 
				
			||||||
            int32_t        codeBytes();
 | 
					 | 
				
			||||||
            Page*        handoverPages(bool exitPages=false);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
            debug_only ( void        pageValidate(); )
 | 
					            debug_only ( void        pageValidate(); )
 | 
				
			||||||
            debug_only ( bool        onPage(NIns* where, bool exitPages=false); )
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
            // support calling out from a fragment ; used to debug the jit
 | 
					            // support calling out from a fragment ; used to debug the jit
 | 
				
			||||||
            debug_only( void        resourceConsistencyCheck(); )
 | 
					            debug_only( void        resourceConsistencyCheck(); )
 | 
				
			||||||
| 
						 | 
					@ -199,6 +194,7 @@ namespace nanojit
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            Stats        _stats;
 | 
					            Stats        _stats;
 | 
				
			||||||
            int hasLoop;
 | 
					            int hasLoop;
 | 
				
			||||||
 | 
					            CodeList*   codeList;                   // finished blocks of code.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        private:
 | 
					        private:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -233,9 +229,7 @@ namespace nanojit
 | 
				
			||||||
            void        resetInstructionPointer();
 | 
					            void        resetInstructionPointer();
 | 
				
			||||||
            void        recordStartingInstructionPointer();
 | 
					            void        recordStartingInstructionPointer();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            NIns*        pageAlloc(bool exitPage=false);
 | 
					            void        codeAlloc(NIns *&start, NIns *&end, NIns *&eip);
 | 
				
			||||||
            void        pagesFree(Page*& list);
 | 
					 | 
				
			||||||
            void        internalReset();
 | 
					 | 
				
			||||||
            bool        canRemat(LIns*);
 | 
					            bool        canRemat(LIns*);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            Reservation* getresv(LIns *x) {
 | 
					            Reservation* getresv(LIns *x) {
 | 
				
			||||||
| 
						 | 
					@ -243,17 +237,19 @@ namespace nanojit
 | 
				
			||||||
                return r->used ? r : 0;
 | 
					                return r->used ? r : 0;
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            DWB(Fragmento*)        _frago;
 | 
					            AvmCore             *core;
 | 
				
			||||||
 | 
					            DWB(CodeAlloc*)     _codeAlloc;
 | 
				
			||||||
            avmplus::GC*        _gc;
 | 
					            avmplus::GC*        _gc;
 | 
				
			||||||
            DWB(Fragment*)        _thisfrag;
 | 
					            DWB(Fragment*)        _thisfrag;
 | 
				
			||||||
            RegAllocMap*        _branchStateMap;
 | 
					            RegAllocMap*        _branchStateMap;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            NIns        *codeStart, *codeEnd;       // current block we're adding code to
 | 
				
			||||||
 | 
					            NIns        *exitStart, *exitEnd;       // current block for exit stubs
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            NIns*        _nIns;            // current native instruction
 | 
					            NIns*        _nIns;            // current native instruction
 | 
				
			||||||
            NIns*        _nExitIns;        // current instruction in exit fragment page
 | 
					            NIns*        _nExitIns;        // current instruction in exit fragment page
 | 
				
			||||||
            NIns*        _startingIns;    // starting location of code compilation for error handling
 | 
					            NIns*        _startingIns;    // starting location of code compilation for error handling
 | 
				
			||||||
            NIns*       _epilogue;
 | 
					            NIns*       _epilogue;
 | 
				
			||||||
            Page*        _nativePages;    // list of NJ_PAGE_SIZE pages that have been alloc'd
 | 
					 | 
				
			||||||
            Page*        _nativeExitPages; // list of pages that have been allocated for exit code
 | 
					 | 
				
			||||||
            AssmError    _err;            // 0 = means assemble() appears ok, otherwise it failed
 | 
					            AssmError    _err;            // 0 = means assemble() appears ok, otherwise it failed
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            AR            _activation;
 | 
					            AR            _activation;
 | 
				
			||||||
| 
						 | 
					@ -307,19 +303,10 @@ namespace nanojit
 | 
				
			||||||
            void        assignParamRegs();
 | 
					            void        assignParamRegs();
 | 
				
			||||||
            void        handleLoopCarriedExprs(InsList& pending_lives);
 | 
					            void        handleLoopCarriedExprs(InsList& pending_lives);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            // flag values for nMarkExecute
 | 
					 | 
				
			||||||
            enum
 | 
					 | 
				
			||||||
            {
 | 
					 | 
				
			||||||
                PAGE_READ = 0x0,    // here only for clarity: all permissions include READ
 | 
					 | 
				
			||||||
                PAGE_WRITE = 0x01,
 | 
					 | 
				
			||||||
                PAGE_EXEC = 0x02
 | 
					 | 
				
			||||||
            };
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            // platform specific implementation (see NativeXXX.cpp file)
 | 
					            // platform specific implementation (see NativeXXX.cpp file)
 | 
				
			||||||
            void        nInit(AvmCore *);
 | 
					            void        nInit(AvmCore *);
 | 
				
			||||||
            Register    nRegisterAllocFromSet(int32_t set);
 | 
					            Register    nRegisterAllocFromSet(int32_t set);
 | 
				
			||||||
            void        nRegisterResetAll(RegAlloc& a);
 | 
					            void        nRegisterResetAll(RegAlloc& a);
 | 
				
			||||||
            void        nMarkExecute(Page* page, int flags);
 | 
					 | 
				
			||||||
            NIns*        nPatchBranch(NIns* branch, NIns* location);
 | 
					            NIns*        nPatchBranch(NIns* branch, NIns* location);
 | 
				
			||||||
            void        nFragExit(LIns* guard);
 | 
					            void        nFragExit(LIns* guard);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -47,11 +47,20 @@
 | 
				
			||||||
namespace nanojit
 | 
					namespace nanojit
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    static const bool verbose = false;
 | 
					    static const bool verbose = false;
 | 
				
			||||||
 | 
					#if defined(NANOJIT_ARM)
 | 
				
			||||||
 | 
					    // ARM requires single-page allocations, due to the constant pool that
 | 
				
			||||||
 | 
					    // lives on each page that must be reachable by a 4kb pcrel load.
 | 
				
			||||||
    static const int pagesPerAlloc = 1;
 | 
					    static const int pagesPerAlloc = 1;
 | 
				
			||||||
    static const int bytesPerAlloc = pagesPerAlloc * GCHeap::kBlockSize;
 | 
					#else
 | 
				
			||||||
 | 
					    static const int pagesPerAlloc = 16;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					    static const int bytesPerPage = 4096;
 | 
				
			||||||
 | 
					    static const int bytesPerAlloc = pagesPerAlloc * bytesPerPage;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    CodeAlloc::CodeAlloc(GCHeap* heap)
 | 
					    CodeAlloc::CodeAlloc()
 | 
				
			||||||
        : heap(heap), heapblocks(0)
 | 
					        : heapblocks(0)
 | 
				
			||||||
 | 
					        , availblocks(0)
 | 
				
			||||||
 | 
					        , totalAllocated(0)
 | 
				
			||||||
    {}
 | 
					    {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    CodeAlloc::~CodeAlloc() {
 | 
					    CodeAlloc::~CodeAlloc() {
 | 
				
			||||||
| 
						 | 
					@ -62,14 +71,15 @@ namespace nanojit
 | 
				
			||||||
            CodeList* next = b->next;
 | 
					            CodeList* next = b->next;
 | 
				
			||||||
            void *mem = firstBlock(b);
 | 
					            void *mem = firstBlock(b);
 | 
				
			||||||
            VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
 | 
					            VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
 | 
				
			||||||
            heap->Free(mem);
 | 
					            freeCodeChunk(mem, bytesPerAlloc);
 | 
				
			||||||
 | 
					            totalAllocated -= bytesPerAlloc;
 | 
				
			||||||
            b = next;
 | 
					            b = next;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    CodeList* CodeAlloc::firstBlock(CodeList* term) {
 | 
					    CodeList* CodeAlloc::firstBlock(CodeList* term) {
 | 
				
			||||||
        // fragile but correct as long as we allocate one block at a time.
 | 
					        char* end = (char*)alignUp(term, bytesPerPage);
 | 
				
			||||||
        return (CodeList*) alignTo(term, bytesPerAlloc);
 | 
					        return (CodeList*) (end - bytesPerAlloc);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    int round(size_t x) {
 | 
					    int round(size_t x) {
 | 
				
			||||||
| 
						 | 
					@ -96,23 +106,20 @@ namespace nanojit
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void CodeAlloc::alloc(NIns* &start, NIns* &end) {
 | 
					    void CodeAlloc::alloc(NIns* &start, NIns* &end) {
 | 
				
			||||||
        // search each heap block for a free block
 | 
					        //  Reuse a block if possible.
 | 
				
			||||||
        for (CodeList* hb = heapblocks; hb != 0; hb = hb->next) {
 | 
					        if (availblocks) {
 | 
				
			||||||
            // check each block to see if it's free and big enough
 | 
					            CodeList* b = removeBlock(availblocks);
 | 
				
			||||||
            for (CodeList* b = hb->lower; b != 0; b = b->lower) {
 | 
					            b->isFree = false;
 | 
				
			||||||
                if (b->isFree && b->size() >= minAllocSize) {
 | 
					            start = b->start();
 | 
				
			||||||
                    // good block
 | 
					            end = b->end;
 | 
				
			||||||
                    b->isFree = false;
 | 
					            if (verbose)
 | 
				
			||||||
                    start = b->start();
 | 
					                avmplus::AvmLog("alloc %p-%p %d\n", start, end, int(end-start));
 | 
				
			||||||
                    end = b->end;
 | 
					            return;
 | 
				
			||||||
                    if (verbose)
 | 
					 | 
				
			||||||
                        avmplus::AvmLog("alloc %p-%p %d\n", start, end, int(end-start));
 | 
					 | 
				
			||||||
                    return;
 | 
					 | 
				
			||||||
                }
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        // no suitable block found, get more memory
 | 
					        // no suitable block found, get more memory
 | 
				
			||||||
        void *mem = heap->Alloc(pagesPerAlloc);  // allocations never fail
 | 
					        void *mem = allocCodeChunk(bytesPerAlloc); // allocations never fail
 | 
				
			||||||
 | 
					        totalAllocated += bytesPerAlloc;
 | 
				
			||||||
 | 
					        NanoAssert(mem != NULL); // see allocCodeChunk contract in CodeAlloc.h
 | 
				
			||||||
        _nvprof("alloc page", uintptr_t(mem)>>12);
 | 
					        _nvprof("alloc page", uintptr_t(mem)>>12);
 | 
				
			||||||
        VMPI_setPageProtection(mem, bytesPerAlloc, true/*executable*/, true/*writable*/);
 | 
					        VMPI_setPageProtection(mem, bytesPerAlloc, true/*executable*/, true/*writable*/);
 | 
				
			||||||
        CodeList* b = addMem(mem, bytesPerAlloc);
 | 
					        CodeList* b = addMem(mem, bytesPerAlloc);
 | 
				
			||||||
| 
						 | 
					@ -124,48 +131,96 @@ namespace nanojit
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void CodeAlloc::free(NIns* start, NIns *end) {
 | 
					    void CodeAlloc::free(NIns* start, NIns *end) {
 | 
				
			||||||
 | 
					        NanoAssert(heapblocks);
 | 
				
			||||||
        CodeList *blk = getBlock(start, end);
 | 
					        CodeList *blk = getBlock(start, end);
 | 
				
			||||||
        if (verbose)
 | 
					        if (verbose)
 | 
				
			||||||
            avmplus::AvmLog("free %p-%p %d\n", start, end, (int)blk->size());
 | 
					            avmplus::AvmLog("free %p-%p %d\n", start, end, (int)blk->size());
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        AvmAssert(!blk->isFree);
 | 
					        AvmAssert(!blk->isFree);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // coalesce
 | 
					        // coalesce adjacent blocks.
 | 
				
			||||||
 | 
					        bool already_on_avail_list;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (blk->lower && blk->lower->isFree) {
 | 
					        if (blk->lower && blk->lower->isFree) {
 | 
				
			||||||
            // combine blk into blk->lower (destroy blk)
 | 
					            // combine blk into blk->lower (destroy blk)
 | 
				
			||||||
            CodeList* lower = blk->lower;
 | 
					            CodeList* lower = blk->lower;
 | 
				
			||||||
            CodeList* higher = blk->higher;
 | 
					            CodeList* higher = blk->higher;
 | 
				
			||||||
 | 
					            already_on_avail_list = lower->size() >= minAllocSize;
 | 
				
			||||||
            lower->higher = higher;
 | 
					            lower->higher = higher;
 | 
				
			||||||
            higher->lower = lower;
 | 
					            higher->lower = lower;
 | 
				
			||||||
            debug_only( sanity_check();)
 | 
					 | 
				
			||||||
            blk = lower;
 | 
					            blk = lower;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        // the last block in each heapblock is never free, therefore blk->higher != null
 | 
					        else
 | 
				
			||||||
 | 
					            already_on_avail_list = false;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        // the last block in each heapblock is a terminator block,
 | 
				
			||||||
 | 
					        // which is never free, therefore blk->higher != null
 | 
				
			||||||
        if (blk->higher->isFree) {
 | 
					        if (blk->higher->isFree) {
 | 
				
			||||||
            // combine blk->higher into blk (destroy blk->higher)
 | 
					 | 
				
			||||||
            CodeList *higher = blk->higher->higher;
 | 
					            CodeList *higher = blk->higher->higher;
 | 
				
			||||||
 | 
					            CodeList *coalescedBlock = blk->higher;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            if ( coalescedBlock->size() >= minAllocSize ) {
 | 
				
			||||||
 | 
					                // Unlink higher from the available block chain.
 | 
				
			||||||
 | 
					                if ( availblocks == coalescedBlock ) {
 | 
				
			||||||
 | 
					                    removeBlock(availblocks);
 | 
				
			||||||
 | 
					                }
 | 
				
			||||||
 | 
					                else {
 | 
				
			||||||
 | 
					                    CodeList* free_block = availblocks;
 | 
				
			||||||
 | 
					                    while ( free_block && free_block->next != coalescedBlock) {
 | 
				
			||||||
 | 
					                        NanoAssert(free_block->size() >= minAllocSize);
 | 
				
			||||||
 | 
					                        NanoAssert(free_block->isFree);
 | 
				
			||||||
 | 
					                        NanoAssert(free_block->next);
 | 
				
			||||||
 | 
					                        free_block = free_block->next;
 | 
				
			||||||
 | 
					                    }
 | 
				
			||||||
 | 
					                    NanoAssert(free_block && free_block->next == coalescedBlock);
 | 
				
			||||||
 | 
					                    free_block->next = coalescedBlock->next;
 | 
				
			||||||
 | 
					                }
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            // combine blk->higher into blk (destroy blk->higher)
 | 
				
			||||||
            blk->higher = higher;
 | 
					            blk->higher = higher;
 | 
				
			||||||
            higher->lower = blk;
 | 
					            higher->lower = blk;
 | 
				
			||||||
            debug_only(sanity_check();)
 | 
					 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        blk->isFree = true;
 | 
					        blk->isFree = true;
 | 
				
			||||||
        NanoAssert(!blk->lower || !blk->lower->isFree);
 | 
					        NanoAssert(!blk->lower || !blk->lower->isFree);
 | 
				
			||||||
        NanoAssert(blk->higher && !blk->higher->isFree);
 | 
					        NanoAssert(blk->higher && !blk->higher->isFree);
 | 
				
			||||||
        //memset(blk->start(), 0xCC, blk->size()); // INT 3 instruction
 | 
					        //memset(blk->start(), 0xCC, blk->size()); // INT 3 instruction
 | 
				
			||||||
 | 
					        if ( !already_on_avail_list && blk->size() >= minAllocSize )
 | 
				
			||||||
 | 
					            addBlock(availblocks, blk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        NanoAssert(heapblocks);
 | 
				
			||||||
 | 
					        debug_only(sanity_check();)
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void CodeAlloc::sweep() {
 | 
					    void CodeAlloc::sweep() {
 | 
				
			||||||
        debug_only(sanity_check();)
 | 
					        debug_only(sanity_check();)
 | 
				
			||||||
        CodeList** prev = &heapblocks;
 | 
					
 | 
				
			||||||
 | 
					        // Pass #1: remove fully-coalesced blocks from availblocks.
 | 
				
			||||||
 | 
					        CodeList** prev = &availblocks;
 | 
				
			||||||
 | 
					        for (CodeList* ab = availblocks; ab != 0; ab = *prev) {
 | 
				
			||||||
 | 
					            NanoAssert(ab->higher != 0);
 | 
				
			||||||
 | 
					            NanoAssert(ab->isFree);
 | 
				
			||||||
 | 
					            if (!ab->higher->higher && !ab->lower) {
 | 
				
			||||||
 | 
					                *prev = ab->next;
 | 
				
			||||||
 | 
					                debug_only(ab->next = 0;)
 | 
				
			||||||
 | 
					            } else {
 | 
				
			||||||
 | 
					                prev = &ab->next;
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        // Pass #2: remove same blocks from heapblocks, and free them.
 | 
				
			||||||
 | 
					        prev = &heapblocks;
 | 
				
			||||||
        for (CodeList* hb = heapblocks; hb != 0; hb = *prev) {
 | 
					        for (CodeList* hb = heapblocks; hb != 0; hb = *prev) {
 | 
				
			||||||
            NanoAssert(hb->lower != 0);
 | 
					            NanoAssert(hb->lower != 0);
 | 
				
			||||||
            if (!hb->lower->lower && hb->lower->isFree) {
 | 
					            if (!hb->lower->lower && hb->lower->isFree) {
 | 
				
			||||||
 | 
					                NanoAssert(!hb->lower->next);
 | 
				
			||||||
                // whole page is unused
 | 
					                // whole page is unused
 | 
				
			||||||
                void* mem = hb->lower;
 | 
					                void* mem = hb->lower;
 | 
				
			||||||
                *prev = hb->next;
 | 
					                *prev = hb->next;
 | 
				
			||||||
                _nvprof("free page",1);
 | 
					                _nvprof("free page",1);
 | 
				
			||||||
                VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
 | 
					                VMPI_setPageProtection(mem, bytesPerAlloc, false /* executable */, true /* writable */);
 | 
				
			||||||
                heap->Free(mem);
 | 
					                freeCodeChunk(mem, bytesPerAlloc);
 | 
				
			||||||
 | 
					                totalAllocated -= bytesPerAlloc;
 | 
				
			||||||
            } else {
 | 
					            } else {
 | 
				
			||||||
                prev = &hb->next;
 | 
					                prev = &hb->next;
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
| 
						 | 
					@ -185,13 +240,17 @@ extern "C" void __clear_cache(char *BEG, char *END);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef AVMPLUS_SPARC
 | 
					#ifdef AVMPLUS_SPARC
 | 
				
			||||||
extern  "C"    void sync_instruction_memory(caddr_t v, u_int len);
 | 
					extern  "C" void sync_instruction_memory(caddr_t v, u_int len);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined NANOJIT_IA32 || defined NANOJIT_X64
 | 
					#if defined NANOJIT_IA32 || defined NANOJIT_X64
 | 
				
			||||||
    // intel chips have dcache/icache interlock
 | 
					    // intel chips have dcache/icache interlock
 | 
				
			||||||
    void CodeAlloc::flushICache(CodeList* &)
 | 
					    void CodeAlloc::flushICache(CodeList* &blocks) {
 | 
				
			||||||
    {}
 | 
					        // Tell Valgrind that new code has been generated, and it must flush
 | 
				
			||||||
 | 
					        // any translations it has for the memory range generated into.
 | 
				
			||||||
 | 
					        for (CodeList *b = blocks; b != 0; b = b->next)
 | 
				
			||||||
 | 
					            VALGRIND_DISCARD_TRANSLATIONS(b->start(), b->size());
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#elif defined NANOJIT_ARM && defined UNDER_CE
 | 
					#elif defined NANOJIT_ARM && defined UNDER_CE
 | 
				
			||||||
    // on arm/winmo, just flush the whole icache
 | 
					    // on arm/winmo, just flush the whole icache
 | 
				
			||||||
| 
						 | 
					@ -275,6 +334,7 @@ extern  "C"    void sync_instruction_memory(caddr_t v, u_int len);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    CodeList* CodeAlloc::removeBlock(CodeList* &blocks) {
 | 
					    CodeList* CodeAlloc::removeBlock(CodeList* &blocks) {
 | 
				
			||||||
        CodeList* b = blocks;
 | 
					        CodeList* b = blocks;
 | 
				
			||||||
 | 
					        NanoAssert(b);
 | 
				
			||||||
        blocks = b->next;
 | 
					        blocks = b->next;
 | 
				
			||||||
        b->next = 0;
 | 
					        b->next = 0;
 | 
				
			||||||
        return b;
 | 
					        return b;
 | 
				
			||||||
| 
						 | 
					@ -350,6 +410,10 @@ extern  "C"    void sync_instruction_memory(caddr_t v, u_int len);
 | 
				
			||||||
        return size;
 | 
					        return size;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    size_t CodeAlloc::size() {
 | 
				
			||||||
 | 
					        return totalAllocated;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    bool CodeAlloc::contains(const CodeList* blocks, NIns* p) {
 | 
					    bool CodeAlloc::contains(const CodeList* blocks, NIns* p) {
 | 
				
			||||||
        for (const CodeList *b = blocks; b != 0; b = b->next) {
 | 
					        for (const CodeList *b = blocks; b != 0; b = b->next) {
 | 
				
			||||||
            _nvprof("block contains",1);
 | 
					            _nvprof("block contains",1);
 | 
				
			||||||
| 
						 | 
					@ -394,6 +458,33 @@ extern  "C"    void sync_instruction_memory(caddr_t v, u_int len);
 | 
				
			||||||
                NanoAssert(b->higher->lower == b);
 | 
					                NanoAssert(b->higher->lower == b);
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					        for (CodeList* avail = this->availblocks; avail; avail = avail->next) {
 | 
				
			||||||
 | 
					            NanoAssert(avail->isFree && avail->size() >= minAllocSize);
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        #if CROSS_CHECK_FREE_LIST
 | 
				
			||||||
 | 
					        for(CodeList* term = heapblocks; term; term = term->next) {
 | 
				
			||||||
 | 
					            for(CodeList* hb = term->lower; hb; hb = hb->lower) {
 | 
				
			||||||
 | 
					                if (hb->isFree && hb->size() >= minAllocSize) {
 | 
				
			||||||
 | 
					                    bool found_on_avail = false;
 | 
				
			||||||
 | 
					                    for (CodeList* avail = this->availblocks; !found_on_avail && avail; avail = avail->next) {
 | 
				
			||||||
 | 
					                        found_on_avail = avail == hb;
 | 
				
			||||||
 | 
					                    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                    NanoAssert(found_on_avail);
 | 
				
			||||||
 | 
					                }
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					        for (CodeList* avail = this->availblocks; avail; avail = avail->next) {
 | 
				
			||||||
 | 
					            bool found_in_heapblocks = false;
 | 
				
			||||||
 | 
					            for(CodeList* term = heapblocks; !found_in_heapblocks && term; term = term->next) {
 | 
				
			||||||
 | 
					                for(CodeList* hb = term->lower; !found_in_heapblocks && hb; hb = hb->lower) {
 | 
				
			||||||
 | 
					                    found_in_heapblocks = hb == avail;
 | 
				
			||||||
 | 
					                }
 | 
				
			||||||
 | 
					            }
 | 
				
			||||||
 | 
					            NanoAssert(found_in_heapblocks);
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					        #endif /* CROSS_CHECK_FREE_LIST */
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    #endif
 | 
					    #endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -42,6 +42,9 @@
 | 
				
			||||||
 | 
					
 | 
				
			||||||
namespace nanojit
 | 
					namespace nanojit
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    // Temporary tracemonkey hack until namespaces are sorted out.
 | 
				
			||||||
 | 
					    using namespace MMgc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /** return true if ptr is in the range [start, end) */
 | 
					    /** return true if ptr is in the range [start, end) */
 | 
				
			||||||
    inline bool containsPtr(const NIns* start, const NIns* end, const NIns* ptr) {
 | 
					    inline bool containsPtr(const NIns* start, const NIns* end, const NIns* ptr) {
 | 
				
			||||||
        return ptr >= start && ptr < end;
 | 
					        return ptr >= start && ptr < end;
 | 
				
			||||||
| 
						 | 
					@ -104,9 +107,15 @@ namespace nanojit
 | 
				
			||||||
        static const size_t sizeofMinBlock = offsetof(CodeList, code);
 | 
					        static const size_t sizeofMinBlock = offsetof(CodeList, code);
 | 
				
			||||||
        static const size_t minAllocSize = LARGEST_UNDERRUN_PROT;
 | 
					        static const size_t minAllocSize = LARGEST_UNDERRUN_PROT;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        GCHeap* heap;
 | 
					        /** Terminator blocks.  All active and free allocations
 | 
				
			||||||
 | 
					            are reachable by traversing this chain and each
 | 
				
			||||||
 | 
					            element's lower chain. */
 | 
				
			||||||
        CodeList* heapblocks;
 | 
					        CodeList* heapblocks;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        /** Reusable blocks. */
 | 
				
			||||||
 | 
					        CodeList* availblocks;
 | 
				
			||||||
 | 
					        size_t totalAllocated;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        /** remove one block from a list */
 | 
					        /** remove one block from a list */
 | 
				
			||||||
        static CodeList* removeBlock(CodeList* &list);
 | 
					        static CodeList* removeBlock(CodeList* &list);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -125,8 +134,22 @@ namespace nanojit
 | 
				
			||||||
        /** find the beginning of the heapblock terminated by term */
 | 
					        /** find the beginning of the heapblock terminated by term */
 | 
				
			||||||
        static CodeList* firstBlock(CodeList* term);
 | 
					        static CodeList* firstBlock(CodeList* term);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        //
 | 
				
			||||||
 | 
					        // CodeAlloc's SPI.  Implementations must be defined by nanojit embedder.
 | 
				
			||||||
 | 
					        // allocation failures should cause an exception or longjmp; nanojit
 | 
				
			||||||
 | 
					        // intentionally does not check for null.
 | 
				
			||||||
 | 
					        //
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        /** allocate nbytes of memory to hold code.  Never return null! */
 | 
				
			||||||
 | 
					        void* allocCodeChunk(size_t nbytes);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        /** free a block previously allocated by allocCodeMem.  nbytes will
 | 
				
			||||||
 | 
					         * match the previous allocCodeMem, but is provided here as well
 | 
				
			||||||
 | 
					         * to mirror the mmap()/munmap() api. */
 | 
				
			||||||
 | 
					        void freeCodeChunk(void* addr, size_t nbytes);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    public:
 | 
					    public:
 | 
				
			||||||
        CodeAlloc(GCHeap*);
 | 
					        CodeAlloc();
 | 
				
			||||||
        ~CodeAlloc();
 | 
					        ~CodeAlloc();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        /** allocate some memory for code, return pointers to the region. */
 | 
					        /** allocate some memory for code, return pointers to the region. */
 | 
				
			||||||
| 
						 | 
					@ -157,6 +180,9 @@ namespace nanojit
 | 
				
			||||||
        /** return the number of bytes in all the code blocks in "code", including block overhead */
 | 
					        /** return the number of bytes in all the code blocks in "code", including block overhead */
 | 
				
			||||||
        static size_t size(const CodeList* code);
 | 
					        static size_t size(const CodeList* code);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        /** return the total number of bytes held by this CodeAlloc. */
 | 
				
			||||||
 | 
					        size_t size();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        /** print out stats about heap usage */
 | 
					        /** print out stats about heap usage */
 | 
				
			||||||
        void logStats();
 | 
					        void logStats();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -58,17 +58,16 @@ namespace nanojit
 | 
				
			||||||
    /**
 | 
					    /**
 | 
				
			||||||
     * This is the main control center for creating and managing fragments.
 | 
					     * This is the main control center for creating and managing fragments.
 | 
				
			||||||
     */
 | 
					     */
 | 
				
			||||||
    Fragmento::Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2)
 | 
					    Fragmento::Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2, CodeAlloc* codeAlloc)
 | 
				
			||||||
        :
 | 
					        :
 | 
				
			||||||
#ifdef NJ_VERBOSE
 | 
					#ifdef NJ_VERBOSE
 | 
				
			||||||
          enterCounts(NULL),
 | 
					          enterCounts(NULL),
 | 
				
			||||||
          mergeCounts(NULL),
 | 
					          mergeCounts(NULL),
 | 
				
			||||||
          labels(NULL),
 | 
					          labels(NULL),
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					          _core(core),
 | 
				
			||||||
 | 
					          _codeAlloc(codeAlloc),
 | 
				
			||||||
          _frags(core->GetGC()),
 | 
					          _frags(core->GetGC()),
 | 
				
			||||||
          _freePages(core->GetGC(), 1024),
 | 
					 | 
				
			||||||
          _allocList(core->GetGC()),
 | 
					 | 
				
			||||||
          _gcHeap(NULL),
 | 
					 | 
				
			||||||
          _max_pages(1 << (calcSaneCacheSize(cacheSizeLog2) - NJ_LOG2_PAGE_SIZE)),
 | 
					          _max_pages(1 << (calcSaneCacheSize(cacheSizeLog2) - NJ_LOG2_PAGE_SIZE)),
 | 
				
			||||||
          _pagesGrowth(1)
 | 
					          _pagesGrowth(1)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
| 
						 | 
					@ -99,119 +98,21 @@ namespace nanojit
 | 
				
			||||||
        _allocList.set_meminfo_name("Fragmento._allocList");
 | 
					        _allocList.set_meminfo_name("Fragmento._allocList");
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
        NanoAssert(_max_pages > _pagesGrowth); // shrink growth if needed
 | 
					        NanoAssert(_max_pages > _pagesGrowth); // shrink growth if needed
 | 
				
			||||||
        _core = core;
 | 
					        verbose_only( enterCounts = NJ_NEW(core->gc, BlockHist)(core->gc); )
 | 
				
			||||||
        GC *gc = core->GetGC();
 | 
					        verbose_only( mergeCounts = NJ_NEW(core->gc, BlockHist)(core->gc); )
 | 
				
			||||||
        _assm = NJ_NEW(gc, nanojit::Assembler)(this, logc);
 | 
					 | 
				
			||||||
        verbose_only( enterCounts = NJ_NEW(gc, BlockHist)(gc); )
 | 
					 | 
				
			||||||
        verbose_only( mergeCounts = NJ_NEW(gc, BlockHist)(gc); )
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
        memset(&_stats, 0, sizeof(_stats));
 | 
					        memset(&_stats, 0, sizeof(_stats));
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    Fragmento::~Fragmento()
 | 
					    Fragmento::~Fragmento()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        AllocEntry *entry;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        clearFrags();
 | 
					        clearFrags();
 | 
				
			||||||
        _frags.clear();
 | 
					 | 
				
			||||||
        _freePages.clear();
 | 
					 | 
				
			||||||
        while( _allocList.size() > 0 )
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            //nj_dprintf("dealloc %x\n", (intptr_t)_allocList.get(_allocList.size()-1));
 | 
					 | 
				
			||||||
#ifdef MEMORY_INFO
 | 
					 | 
				
			||||||
            ChangeSizeExplicit("NanoJitMem", -1, _gcHeap->Size(_allocList.last()));
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
            entry = _allocList.removeLast();
 | 
					 | 
				
			||||||
            _gcHeap->Free( entry->page, entry->allocSize );
 | 
					 | 
				
			||||||
            NJ_DELETE(entry);
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        NJ_DELETE(_assm);
 | 
					 | 
				
			||||||
#if defined(NJ_VERBOSE)
 | 
					#if defined(NJ_VERBOSE)
 | 
				
			||||||
        NJ_DELETE(enterCounts);
 | 
					        NJ_DELETE(enterCounts);
 | 
				
			||||||
        NJ_DELETE(mergeCounts);
 | 
					        NJ_DELETE(mergeCounts);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Fragmento::trackPages()
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        const uint32_t pageUse = _stats.pages - _freePages.size();
 | 
					 | 
				
			||||||
        if (_stats.maxPageUse < pageUse)
 | 
					 | 
				
			||||||
            _stats.maxPageUse = pageUse;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    Page* Fragmento::pageAlloc()
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        NanoAssert(sizeof(Page) == NJ_PAGE_SIZE);
 | 
					 | 
				
			||||||
        if (!_freePages.size()) {
 | 
					 | 
				
			||||||
            pagesGrow(_pagesGrowth);    // try to get more mem
 | 
					 | 
				
			||||||
            if ((_pagesGrowth << 1) < _max_pages)
 | 
					 | 
				
			||||||
                _pagesGrowth <<= 1;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        trackPages();
 | 
					 | 
				
			||||||
        Page* page = 0;
 | 
					 | 
				
			||||||
        if (_freePages.size())
 | 
					 | 
				
			||||||
            page = _freePages.removeLast();
 | 
					 | 
				
			||||||
        return page;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Fragmento::pagesRelease(PageList& l)
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
        _freePages.add(l);
 | 
					 | 
				
			||||||
        l.clear();
 | 
					 | 
				
			||||||
        NanoAssert(_freePages.size() <= _stats.pages);
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Fragmento::pageFree(Page* page)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        _freePages.add(page);
 | 
					 | 
				
			||||||
        NanoAssert(_freePages.size() <= _stats.pages);
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Fragmento::pagesGrow(int32_t count)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        NanoAssert(!_freePages.size());
 | 
					 | 
				
			||||||
        MMGC_MEM_TYPE("NanojitFragmentoMem");
 | 
					 | 
				
			||||||
        Page* memory = 0;
 | 
					 | 
				
			||||||
        GC *gc = _core->GetGC();
 | 
					 | 
				
			||||||
        if (_stats.pages < _max_pages)
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            AllocEntry *entry;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            // make sure we don't grow beyond _max_pages
 | 
					 | 
				
			||||||
            if (_stats.pages + count > _max_pages)
 | 
					 | 
				
			||||||
                count = _max_pages - _stats.pages;
 | 
					 | 
				
			||||||
            if (count < 0)
 | 
					 | 
				
			||||||
                count = 0;
 | 
					 | 
				
			||||||
            // @todo nastiness that needs a fix'n
 | 
					 | 
				
			||||||
            _gcHeap = gc->GetGCHeap();
 | 
					 | 
				
			||||||
            NanoAssert(int32_t(NJ_PAGE_SIZE)<=_gcHeap->kNativePageSize);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            // convert _max_pages to gc page count
 | 
					 | 
				
			||||||
            int32_t gcpages = (count*NJ_PAGE_SIZE) / _gcHeap->kNativePageSize;
 | 
					 | 
				
			||||||
            MMGC_MEM_TYPE("NanoJitMem");
 | 
					 | 
				
			||||||
            memory = (Page*)_gcHeap->Alloc(gcpages);
 | 
					 | 
				
			||||||
#ifdef MEMORY_INFO
 | 
					 | 
				
			||||||
            ChangeSizeExplicit("NanoJitMem", 1, _gcHeap->Size(memory));
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
            NanoAssert((uintptr_t)memory == pageTop(memory));
 | 
					 | 
				
			||||||
            //nj_dprintf("head alloc of %d at %x of %d pages using nj page size of %d\n", gcpages, (intptr_t)memory, (intptr_t)_gcHeap->kNativePageSize, NJ_PAGE_SIZE);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            entry = NJ_NEW(gc, AllocEntry);
 | 
					 | 
				
			||||||
            entry->page = memory;
 | 
					 | 
				
			||||||
            entry->allocSize = gcpages;
 | 
					 | 
				
			||||||
            _allocList.add(entry);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            _stats.pages += count;
 | 
					 | 
				
			||||||
            Page* page = memory;
 | 
					 | 
				
			||||||
            while(--count >= 0)
 | 
					 | 
				
			||||||
            {
 | 
					 | 
				
			||||||
                //nj_dprintf("Fragmento::pageGrow adding page %x ; %d\n", (unsigned)page, _freePages.size()+1);
 | 
					 | 
				
			||||||
                _freePages.add(page++);
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
            trackPages();
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // Clear the fragment. This *does not* remove the fragment from the
 | 
					    // Clear the fragment. This *does not* remove the fragment from the
 | 
				
			||||||
    // map--the caller must take care of this.
 | 
					    // map--the caller must take care of this.
 | 
				
			||||||
| 
						 | 
					@ -220,19 +121,16 @@ namespace nanojit
 | 
				
			||||||
        Fragment *peer = f->peer;
 | 
					        Fragment *peer = f->peer;
 | 
				
			||||||
        while (peer) {
 | 
					        while (peer) {
 | 
				
			||||||
            Fragment *next = peer->peer;
 | 
					            Fragment *next = peer->peer;
 | 
				
			||||||
            peer->releaseTreeMem(this);
 | 
					            peer->releaseTreeMem(_codeAlloc);
 | 
				
			||||||
            NJ_DELETE(peer);
 | 
					            NJ_DELETE(peer);
 | 
				
			||||||
            peer = next;
 | 
					            peer = next;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        f->releaseTreeMem(this);
 | 
					        f->releaseTreeMem(_codeAlloc);
 | 
				
			||||||
        NJ_DELETE(f);
 | 
					        NJ_DELETE(f);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Fragmento::clearFrags()
 | 
					    void Fragmento::clearFrags()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        // reclaim any dangling native pages
 | 
					 | 
				
			||||||
        _assm->pageReset();
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        while (!_frags.isEmpty()) {
 | 
					        while (!_frags.isEmpty()) {
 | 
				
			||||||
            clearFragment(_frags.removeLast());
 | 
					            clearFragment(_frags.removeLast());
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
| 
						 | 
					@ -244,11 +142,6 @@ namespace nanojit
 | 
				
			||||||
        //nj_dprintf("Fragmento.clearFrags %d free pages of %d\n", _stats.freePages, _stats.pages);
 | 
					        //nj_dprintf("Fragmento.clearFrags %d free pages of %d\n", _stats.freePages, _stats.pages);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    Assembler* Fragmento::assm()
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        return _assm;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    AvmCore* Fragmento::core()
 | 
					    AvmCore* Fragmento::core()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        return _core;
 | 
					        return _core;
 | 
				
			||||||
| 
						 | 
					@ -299,195 +192,6 @@ namespace nanojit
 | 
				
			||||||
        return f;
 | 
					        return f;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef NJ_VERBOSE
 | 
					 | 
				
			||||||
    struct fragstats {
 | 
					 | 
				
			||||||
        int size;
 | 
					 | 
				
			||||||
        uint64_t traceDur;
 | 
					 | 
				
			||||||
        uint64_t interpDur;
 | 
					 | 
				
			||||||
        int lir, lirbytes;
 | 
					 | 
				
			||||||
    };
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Fragmento::dumpFragStats(Fragment *f, int level, fragstats &stat)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        char buf[50];
 | 
					 | 
				
			||||||
        sprintf(buf, "%*c%s", 1+level, ' ', labels->format(f));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        int called = f->hits();
 | 
					 | 
				
			||||||
        if (called >= 0)
 | 
					 | 
				
			||||||
            called += f->_called;
 | 
					 | 
				
			||||||
        else
 | 
					 | 
				
			||||||
            called = -(1<<f->blacklistLevel) - called - 1;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        uint32_t main = f->_native - f->_exitNative;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        char cause[200];
 | 
					 | 
				
			||||||
        if (f->_token && strcmp(f->_token,"loop")==0)
 | 
					 | 
				
			||||||
            sprintf(cause,"%s %d", f->_token, f->xjumpCount);
 | 
					 | 
				
			||||||
        else if (f->_token) {
 | 
					 | 
				
			||||||
            if (f->eot_target) {
 | 
					 | 
				
			||||||
                sprintf(cause,"%s %s", f->_token, labels->format(f->eot_target));
 | 
					 | 
				
			||||||
            } else {
 | 
					 | 
				
			||||||
                strcpy(cause, f->_token);
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        else
 | 
					 | 
				
			||||||
            cause[0] = 0;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        _assm->outputf("%-10s %7d %6d %6d %6d %4d %9llu %9llu %-12s %s", buf,
 | 
					 | 
				
			||||||
            called, f->guardCount, main, f->_native, f->compileNbr, f->traceTicks/1000, f->interpTicks/1000,
 | 
					 | 
				
			||||||
            cause, labels->format(f->ip));
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        stat.size += main;
 | 
					 | 
				
			||||||
        stat.traceDur += f->traceTicks;
 | 
					 | 
				
			||||||
        stat.interpDur += f->interpTicks;
 | 
					 | 
				
			||||||
        stat.lir += f->_lir;
 | 
					 | 
				
			||||||
        stat.lirbytes += f->_lirbytes;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        for (Fragment *x = f->branches; x != 0; x = x->nextbranch)
 | 
					 | 
				
			||||||
            if (x->kind != MergeTrace)
 | 
					 | 
				
			||||||
                dumpFragStats(x,level+1,stat);
 | 
					 | 
				
			||||||
        for (Fragment *x = f->branches; x != 0; x = x->nextbranch)
 | 
					 | 
				
			||||||
            if (x->kind == MergeTrace)
 | 
					 | 
				
			||||||
                dumpFragStats(x,level+1,stat);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        if (f->isAnchor() && f->branches != 0) {
 | 
					 | 
				
			||||||
            _assm->output("");
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    class DurData { public:
 | 
					 | 
				
			||||||
        DurData(): frag(0), traceDur(0), interpDur(0), size(0) {}
 | 
					 | 
				
			||||||
        DurData(int): frag(0), traceDur(0), interpDur(0), size(0) {}
 | 
					 | 
				
			||||||
        DurData(Fragment* f, uint64_t td, uint64_t id, int32_t s)
 | 
					 | 
				
			||||||
            : frag(f), traceDur(td), interpDur(id), size(s) {}
 | 
					 | 
				
			||||||
        Fragment* frag;
 | 
					 | 
				
			||||||
        uint64_t traceDur;
 | 
					 | 
				
			||||||
        uint64_t interpDur;
 | 
					 | 
				
			||||||
        int32_t size;
 | 
					 | 
				
			||||||
    };
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Fragmento::dumpRatio(const char *label, BlockHist *hist)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        int total=0, unique=0;
 | 
					 | 
				
			||||||
        for (int i = 0, n=hist->size(); i < n; i++) {
 | 
					 | 
				
			||||||
            const void * id = hist->keyAt(i);
 | 
					 | 
				
			||||||
            int c = hist->get(id);
 | 
					 | 
				
			||||||
            if (c > 1) {
 | 
					 | 
				
			||||||
                //_assm->outputf("%d %X", c, id);
 | 
					 | 
				
			||||||
                unique += 1;
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
            else if (c == 1) {
 | 
					 | 
				
			||||||
                unique += 1;
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
            total += c;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        _assm->outputf("%s total %d unique %d ratio %.1f%", label, total, unique, double(total)/unique);
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Fragmento::dumpStats()
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        _assm->output("");
 | 
					 | 
				
			||||||
        dumpRatio("inline", enterCounts);
 | 
					 | 
				
			||||||
        dumpRatio("merges", mergeCounts);
 | 
					 | 
				
			||||||
        _assm->outputf("abc %d il %d (%.1fx) abc+il %d (%.1fx)",
 | 
					 | 
				
			||||||
            _stats.abcsize, _stats.ilsize, (double)_stats.ilsize/_stats.abcsize,
 | 
					 | 
				
			||||||
            _stats.abcsize + _stats.ilsize,
 | 
					 | 
				
			||||||
            double(_stats.abcsize+_stats.ilsize)/_stats.abcsize);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        int32_t count = _frags.size();
 | 
					 | 
				
			||||||
        int32_t pages =  _stats.pages;
 | 
					 | 
				
			||||||
        int32_t maxPageUse =  _stats.maxPageUse;
 | 
					 | 
				
			||||||
        int32_t free = _freePages.size();
 | 
					 | 
				
			||||||
        int32_t flushes = _stats.flushes;
 | 
					 | 
				
			||||||
        if (!count)
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            _assm->outputf("No fragments in cache, %d flushes", flushes);
 | 
					 | 
				
			||||||
            return;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        _assm->outputf("\nFragment statistics");
 | 
					 | 
				
			||||||
        _assm->outputf("  loop trees:     %d", count);
 | 
					 | 
				
			||||||
        _assm->outputf("  flushes:        %d", flushes);
 | 
					 | 
				
			||||||
        _assm->outputf("  compiles:       %d / %d", _stats.compiles, _stats.totalCompiles);
 | 
					 | 
				
			||||||
        _assm->outputf("  used:           %dk / %dk", (pages-free)<<(NJ_LOG2_PAGE_SIZE-10), pages<<(NJ_LOG2_PAGE_SIZE-10));
 | 
					 | 
				
			||||||
        _assm->outputf("  maxPageUse:     %dk", (maxPageUse)<<(NJ_LOG2_PAGE_SIZE-10));
 | 
					 | 
				
			||||||
        _assm->output("\ntrace         calls guards   main native  gen   T-trace  T-interp");
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        avmplus::SortedMap<uint64_t, DurData, avmplus::LIST_NonGCObjects> durs(_core->gc);
 | 
					 | 
				
			||||||
        uint64_t totaldur=0;
 | 
					 | 
				
			||||||
        fragstats totalstat = { 0,0,0,0,0 };
 | 
					 | 
				
			||||||
        for (int32_t i=0; i<count; i++)
 | 
					 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            Fragment *f = _frags.at(i);
 | 
					 | 
				
			||||||
            while (true) {
 | 
					 | 
				
			||||||
                fragstats stat = { 0,0,0,0,0 };
 | 
					 | 
				
			||||||
                dumpFragStats(f, 0, stat);
 | 
					 | 
				
			||||||
                if (stat.lir) {
 | 
					 | 
				
			||||||
                    totalstat.lir += stat.lir;
 | 
					 | 
				
			||||||
                    totalstat.lirbytes += stat.lirbytes;
 | 
					 | 
				
			||||||
                }
 | 
					 | 
				
			||||||
                uint64_t bothDur = stat.traceDur + stat.interpDur;
 | 
					 | 
				
			||||||
                if (bothDur) {
 | 
					 | 
				
			||||||
                    totalstat.interpDur += stat.interpDur;
 | 
					 | 
				
			||||||
                    totalstat.traceDur += stat.traceDur;
 | 
					 | 
				
			||||||
                    totalstat.size += stat.size;
 | 
					 | 
				
			||||||
                    totaldur += bothDur;
 | 
					 | 
				
			||||||
                    while (durs.containsKey(bothDur)) bothDur++;
 | 
					 | 
				
			||||||
                    DurData d(f, stat.traceDur, stat.interpDur, stat.size);
 | 
					 | 
				
			||||||
                    durs.put(bothDur, d);
 | 
					 | 
				
			||||||
                }
 | 
					 | 
				
			||||||
                if (!f->peer)
 | 
					 | 
				
			||||||
                    break;
 | 
					 | 
				
			||||||
                f = f->peer;
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
        uint64_t totaltrace = totalstat.traceDur;
 | 
					 | 
				
			||||||
        int totalsize = totalstat.size;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        _assm->outputf("");
 | 
					 | 
				
			||||||
        _assm->outputf("lirbytes %d / lir %d = %.1f bytes/lir", totalstat.lirbytes,
 | 
					 | 
				
			||||||
            totalstat.lir, double(totalstat.lirbytes)/totalstat.lir);
 | 
					 | 
				
			||||||
        _assm->outputf("       trace         interp");
 | 
					 | 
				
			||||||
        _assm->outputf("%9lld (%2d%%)  %9lld (%2d%%)",
 | 
					 | 
				
			||||||
            totaltrace/1000, int(100.0*totaltrace/totaldur),
 | 
					 | 
				
			||||||
            (totaldur-totaltrace)/1000, int(100.0*(totaldur-totaltrace)/totaldur));
 | 
					 | 
				
			||||||
        _assm->outputf("");
 | 
					 | 
				
			||||||
        _assm->outputf("trace      ticks            trace           interp           size");
 | 
					 | 
				
			||||||
        for (int32_t i=durs.size()-1; i >= 0; i--) {
 | 
					 | 
				
			||||||
            uint64_t bothDur = durs.keyAt(i);
 | 
					 | 
				
			||||||
            DurData d = durs.get(bothDur);
 | 
					 | 
				
			||||||
            int size = d.size;
 | 
					 | 
				
			||||||
            _assm->outputf("%-4s %9lld (%2d%%)  %9lld (%2d%%)  %9lld (%2d%%)  %6d (%2d%%)  %s",
 | 
					 | 
				
			||||||
                labels->format(d.frag),
 | 
					 | 
				
			||||||
                bothDur/1000, int(100.0*bothDur/totaldur),
 | 
					 | 
				
			||||||
                d.traceDur/1000, int(100.0*d.traceDur/totaldur),
 | 
					 | 
				
			||||||
                d.interpDur/1000, int(100.0*d.interpDur/totaldur),
 | 
					 | 
				
			||||||
                size, int(100.0*size/totalsize),
 | 
					 | 
				
			||||||
                labels->format(d.frag->ip));
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Fragmento::countBlock(BlockHist *hist, const void* ip)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        int c = hist->count(ip);
 | 
					 | 
				
			||||||
        if (_assm->_logc->lcbits & LC_Assembly)
 | 
					 | 
				
			||||||
            _assm->outputf("++ %s %d", labels->format(ip), c);
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    void Fragmento::countIL(uint32_t il, uint32_t abc)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        _stats.ilsize += il;
 | 
					 | 
				
			||||||
        _stats.abcsize += abc;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef AVMPLUS_VERBOSE
 | 
					 | 
				
			||||||
    void Fragmento::drawTrees(char *fileName) {
 | 
					 | 
				
			||||||
        drawTraceTrees(this, this->_frags, this->_core, fileName);
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
#endif // NJ_VERBOSE
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    //
 | 
					    //
 | 
				
			||||||
    // Fragment
 | 
					    // Fragment
 | 
				
			||||||
    //
 | 
					    //
 | 
				
			||||||
| 
						 | 
					@ -526,16 +230,15 @@ namespace nanojit
 | 
				
			||||||
          fragEntry(NULL),
 | 
					          fragEntry(NULL),
 | 
				
			||||||
          loopEntry(NULL),
 | 
					          loopEntry(NULL),
 | 
				
			||||||
          vmprivate(NULL),
 | 
					          vmprivate(NULL),
 | 
				
			||||||
 | 
					          codeList(0),
 | 
				
			||||||
          _code(NULL),
 | 
					          _code(NULL),
 | 
				
			||||||
          _hits(0),
 | 
					          _hits(0)
 | 
				
			||||||
          _pages(NULL)
 | 
					 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    Fragment::~Fragment()
 | 
					    Fragment::~Fragment()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        onDestroy();
 | 
					        onDestroy();
 | 
				
			||||||
        NanoAssert(_pages == 0);
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Fragment::blacklist()
 | 
					    void Fragment::blacklist()
 | 
				
			||||||
| 
						 | 
					@ -578,28 +281,23 @@ namespace nanojit
 | 
				
			||||||
        lastIns = 0;
 | 
					        lastIns = 0;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Fragment::releaseCode(Fragmento* frago)
 | 
					    void Fragment::releaseCode(CodeAlloc *codeAlloc)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        _code = 0;
 | 
					        _code = 0;
 | 
				
			||||||
        while(_pages)
 | 
					        codeAlloc->freeAll(codeList);
 | 
				
			||||||
        {
 | 
					 | 
				
			||||||
            Page* next = _pages->next;
 | 
					 | 
				
			||||||
            frago->pageFree(_pages);
 | 
					 | 
				
			||||||
            _pages = next;
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Fragment::releaseTreeMem(Fragmento* frago)
 | 
					    void Fragment::releaseTreeMem(CodeAlloc *codeAlloc)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        releaseLirBuffer();
 | 
					        releaseLirBuffer();
 | 
				
			||||||
        releaseCode(frago);
 | 
					        releaseCode(codeAlloc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // now do it for all branches
 | 
					        // now do it for all branches
 | 
				
			||||||
        Fragment* branch = branches;
 | 
					        Fragment* branch = branches;
 | 
				
			||||||
        while(branch)
 | 
					        while(branch)
 | 
				
			||||||
        {
 | 
					        {
 | 
				
			||||||
            Fragment* next = branch->nextbranch;
 | 
					            Fragment* next = branch->nextbranch;
 | 
				
			||||||
            branch->releaseTreeMem(frago);  // @todo safer here to recurse in case we support nested trees
 | 
					            branch->releaseTreeMem(codeAlloc);  // @todo safer here to recurse in case we support nested trees
 | 
				
			||||||
            NJ_DELETE(branch);
 | 
					            NJ_DELETE(branch);
 | 
				
			||||||
            branch = next;
 | 
					            branch = next;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -52,27 +52,6 @@ namespace nanojit
 | 
				
			||||||
    struct GuardRecord;
 | 
					    struct GuardRecord;
 | 
				
			||||||
    class Assembler;
 | 
					    class Assembler;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    struct PageHeader
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        struct Page *next;
 | 
					 | 
				
			||||||
    };
 | 
					 | 
				
			||||||
    struct Page: public PageHeader
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        union {
 | 
					 | 
				
			||||||
            // Conceptually, the lir array holds mostly LIns values (plus some
 | 
					 | 
				
			||||||
            // skip payloads and call arguments).  But we use int8_t as the
 | 
					 | 
				
			||||||
            // element type here so the array size can be expressed in bytes.
 | 
					 | 
				
			||||||
            int8_t lir[NJ_PAGE_SIZE-sizeof(PageHeader)];
 | 
					 | 
				
			||||||
            NIns code[(NJ_PAGE_SIZE-sizeof(PageHeader))/sizeof(NIns)];
 | 
					 | 
				
			||||||
        };
 | 
					 | 
				
			||||||
    };
 | 
					 | 
				
			||||||
    struct AllocEntry : public avmplus::GCObject
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        Page *page;
 | 
					 | 
				
			||||||
        uint32_t allocSize;
 | 
					 | 
				
			||||||
    };
 | 
					 | 
				
			||||||
    typedef avmplus::List<AllocEntry*,avmplus::LIST_NonGCObjects>    AllocList;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    typedef avmplus::GCSortedMap<const void*, uint32_t, avmplus::LIST_NonGCObjects> BlockSortedMap;
 | 
					    typedef avmplus::GCSortedMap<const void*, uint32_t, avmplus::LIST_NonGCObjects> BlockSortedMap;
 | 
				
			||||||
    class BlockHist: public BlockSortedMap
 | 
					    class BlockHist: public BlockSortedMap
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
| 
						 | 
					@ -95,15 +74,10 @@ namespace nanojit
 | 
				
			||||||
    class Fragmento : public avmplus::GCFinalizedObject
 | 
					    class Fragmento : public avmplus::GCFinalizedObject
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        public:
 | 
					        public:
 | 
				
			||||||
            Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2);
 | 
					            Fragmento(AvmCore* core, LogControl* logc, uint32_t cacheSizeLog2, CodeAlloc *codeAlloc);
 | 
				
			||||||
            ~Fragmento();
 | 
					            ~Fragmento();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            void        addMemory(void* firstPage, uint32_t pageCount);  // gives memory to the Assembler
 | 
					 | 
				
			||||||
            Assembler*    assm();
 | 
					 | 
				
			||||||
            AvmCore*    core();
 | 
					            AvmCore*    core();
 | 
				
			||||||
            Page*        pageAlloc();
 | 
					 | 
				
			||||||
            void        pageFree(Page* page);
 | 
					 | 
				
			||||||
            void        pagesRelease(PageList& list);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
            Fragment*   getLoop(const void* ip);
 | 
					            Fragment*   getLoop(const void* ip);
 | 
				
			||||||
            Fragment*   getAnchor(const void* ip);
 | 
					            Fragment*   getAnchor(const void* ip);
 | 
				
			||||||
| 
						 | 
					@ -117,18 +91,12 @@ namespace nanojit
 | 
				
			||||||
            Fragment*   newBranch(Fragment *from, const void* ip);
 | 
					            Fragment*   newBranch(Fragment *from, const void* ip);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            verbose_only ( uint32_t pageCount(); )
 | 
					            verbose_only ( uint32_t pageCount(); )
 | 
				
			||||||
            verbose_only ( void dumpStats(); )
 | 
					 | 
				
			||||||
            verbose_only ( void dumpRatio(const char*, BlockHist*);)
 | 
					 | 
				
			||||||
            verbose_only ( void dumpFragStats(Fragment*, int level, fragstats&); )
 | 
					 | 
				
			||||||
            verbose_only ( void countBlock(BlockHist*, const void* pc); )
 | 
					 | 
				
			||||||
            verbose_only ( void countIL(uint32_t il, uint32_t abc); )
 | 
					 | 
				
			||||||
            verbose_only( void addLabel(Fragment* f, const char *prefix, int id); )
 | 
					            verbose_only( void addLabel(Fragment* f, const char *prefix, int id); )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            // stats
 | 
					            // stats
 | 
				
			||||||
            struct
 | 
					            struct
 | 
				
			||||||
            {
 | 
					            {
 | 
				
			||||||
                uint32_t    pages;                    // pages consumed
 | 
					                uint32_t    pages;                    // pages consumed
 | 
				
			||||||
                uint32_t    maxPageUse;                // highwater mark of (pages-freePages)
 | 
					 | 
				
			||||||
                uint32_t    flushes, ilsize, abcsize, compiles, totalCompiles;
 | 
					                uint32_t    flushes, ilsize, abcsize, compiles, totalCompiles;
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
            _stats;
 | 
					            _stats;
 | 
				
			||||||
| 
						 | 
					@ -141,21 +109,11 @@ namespace nanojit
 | 
				
			||||||
            void    drawTrees(char *fileName);
 | 
					            void    drawTrees(char *fileName);
 | 
				
			||||||
            #endif
 | 
					            #endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            uint32_t cacheUsed() const { return (_stats.pages-_freePages.size())<<NJ_LOG2_PAGE_SIZE; }
 | 
					 | 
				
			||||||
            uint32_t cacheUsedMax() const { return (_stats.maxPageUse)<<NJ_LOG2_PAGE_SIZE; }
 | 
					 | 
				
			||||||
            void        clearFragment(Fragment *f);
 | 
					            void        clearFragment(Fragment *f);
 | 
				
			||||||
        private:
 | 
					        private:
 | 
				
			||||||
            void        pagesGrow(int32_t count);
 | 
					            AvmCore*        _core;
 | 
				
			||||||
            void        trackPages();
 | 
					            DWB(CodeAlloc*) _codeAlloc;
 | 
				
			||||||
 | 
					 | 
				
			||||||
            AvmCore*            _core;
 | 
					 | 
				
			||||||
            DWB(Assembler*)        _assm;
 | 
					 | 
				
			||||||
            FragmentMap     _frags;        /* map from ip -> Fragment ptr  */
 | 
					            FragmentMap     _frags;        /* map from ip -> Fragment ptr  */
 | 
				
			||||||
            PageList        _freePages;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            /* unmanaged mem */
 | 
					 | 
				
			||||||
            AllocList    _allocList;
 | 
					 | 
				
			||||||
            avmplus::GCHeap* _gcHeap;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
            const uint32_t _max_pages;
 | 
					            const uint32_t _max_pages;
 | 
				
			||||||
            uint32_t _pagesGrowth;
 | 
					            uint32_t _pagesGrowth;
 | 
				
			||||||
| 
						 | 
					@ -181,14 +139,13 @@ namespace nanojit
 | 
				
			||||||
            ~Fragment();
 | 
					            ~Fragment();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            NIns*            code()                            { return _code; }
 | 
					            NIns*            code()                            { return _code; }
 | 
				
			||||||
            Page*            pages()                            { return _pages; }
 | 
					            void            setCode(NIns* codee)               { _code = codee; }
 | 
				
			||||||
            void            setCode(NIns* codee, Page* pages) { _code = codee; _pages = pages; }
 | 
					            int32_t&        hits()                             { return _hits; }
 | 
				
			||||||
            int32_t&        hits()                            { return _hits; }
 | 
					 | 
				
			||||||
            void            blacklist();
 | 
					            void            blacklist();
 | 
				
			||||||
            bool            isBlacklisted()        { return _hits < 0; }
 | 
					            bool            isBlacklisted()        { return _hits < 0; }
 | 
				
			||||||
            void            releaseLirBuffer();
 | 
					            void            releaseLirBuffer();
 | 
				
			||||||
            void            releaseCode(Fragmento* frago);
 | 
					            void            releaseCode(CodeAlloc *alloc);
 | 
				
			||||||
            void            releaseTreeMem(Fragmento* frago);
 | 
					            void            releaseTreeMem(CodeAlloc *alloc);
 | 
				
			||||||
            bool            isAnchor() { return anchor == this; }
 | 
					            bool            isAnchor() { return anchor == this; }
 | 
				
			||||||
            bool            isRoot() { return root == this; }
 | 
					            bool            isRoot() { return root == this; }
 | 
				
			||||||
            void            onDestroy();
 | 
					            void            onDestroy();
 | 
				
			||||||
| 
						 | 
					@ -226,12 +183,11 @@ namespace nanojit
 | 
				
			||||||
            NIns* fragEntry;
 | 
					            NIns* fragEntry;
 | 
				
			||||||
            NIns* loopEntry;
 | 
					            NIns* loopEntry;
 | 
				
			||||||
            void* vmprivate;
 | 
					            void* vmprivate;
 | 
				
			||||||
 | 
					            CodeList* codeList;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        private:
 | 
					        private:
 | 
				
			||||||
            NIns*            _code;        // ptr to start of code
 | 
					            NIns*            _code;        // ptr to start of code
 | 
				
			||||||
            GuardRecord*    _links;        // code which is linked (or pending to be) to this fragment
 | 
					            int32_t          _hits;
 | 
				
			||||||
            int32_t            _hits;
 | 
					 | 
				
			||||||
            Page*            _pages;        // native code pages
 | 
					 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif // __nanojit_Fragmento__
 | 
					#endif // __nanojit_Fragmento__
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -109,47 +109,41 @@ namespace nanojit
 | 
				
			||||||
#endif /* NJ_PROFILE */
 | 
					#endif /* NJ_PROFILE */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // LCompressedBuffer
 | 
					    // LCompressedBuffer
 | 
				
			||||||
    LirBuffer::LirBuffer(Fragmento* frago)
 | 
					    LirBuffer::LirBuffer(Allocator& alloc)
 | 
				
			||||||
        : _frago(frago),
 | 
					        :
 | 
				
			||||||
#ifdef NJ_VERBOSE
 | 
					#ifdef NJ_VERBOSE
 | 
				
			||||||
          names(NULL),
 | 
					          names(NULL),
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
          abi(ABI_FASTCALL),
 | 
					          abi(ABI_FASTCALL), state(NULL), param1(NULL), sp(NULL), rp(NULL),
 | 
				
			||||||
          state(NULL), param1(NULL), sp(NULL), rp(NULL),
 | 
					          _allocator(alloc), _bytesAllocated(0)
 | 
				
			||||||
          _pages(frago->core()->GetGC())
 | 
					 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        rewind();
 | 
					        clear();
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    LirBuffer::~LirBuffer()
 | 
					    LirBuffer::~LirBuffer()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        clear();
 | 
					        clear();
 | 
				
			||||||
        verbose_only(if (names) NJ_DELETE(names);)
 | 
					        verbose_only(if (names) NJ_DELETE(names);)
 | 
				
			||||||
        _frago = 0;
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void LirBuffer::clear()
 | 
					    void LirBuffer::clear()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        // free all the memory and clear the stats
 | 
					        // clear the stats, etc
 | 
				
			||||||
        _frago->pagesRelease(_pages);
 | 
					 | 
				
			||||||
        NanoAssert(!_pages.size());
 | 
					 | 
				
			||||||
        _unused = 0;
 | 
					        _unused = 0;
 | 
				
			||||||
 | 
					        _limit = 0;
 | 
				
			||||||
 | 
					        _bytesAllocated = 0;
 | 
				
			||||||
        _stats.lir = 0;
 | 
					        _stats.lir = 0;
 | 
				
			||||||
        _noMem = 0;
 | 
					 | 
				
			||||||
        _nextPage = 0;
 | 
					 | 
				
			||||||
        for (int i = 0; i < NumSavedRegs; ++i)
 | 
					        for (int i = 0; i < NumSavedRegs; ++i)
 | 
				
			||||||
            savedRegs[i] = NULL;
 | 
					            savedRegs[i] = NULL;
 | 
				
			||||||
        explicitSavedRegs = false;
 | 
					        explicitSavedRegs = false;
 | 
				
			||||||
 | 
					        chunkAlloc();
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void LirBuffer::rewind()
 | 
					    void LirBuffer::chunkAlloc()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        clear();
 | 
					        _unused = (uintptr_t) _allocator.alloc(CHUNK_SZB);
 | 
				
			||||||
        // pre-allocate the current and the next page we will be using
 | 
					        NanoAssert(_unused != 0); // Allocator.alloc() never returns null. See Allocator.h
 | 
				
			||||||
        Page* start = pageAlloc();
 | 
					        _limit = _unused + CHUNK_SZB;
 | 
				
			||||||
        _unused = start ? uintptr_t(&start->lir[0]) : 0;
 | 
					 | 
				
			||||||
        _nextPage = pageAlloc();
 | 
					 | 
				
			||||||
        NanoAssert((_unused && _nextPage) || _noMem);
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    int32_t LirBuffer::insCount()
 | 
					    int32_t LirBuffer::insCount()
 | 
				
			||||||
| 
						 | 
					@ -161,75 +155,56 @@ namespace nanojit
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    size_t LirBuffer::byteCount()
 | 
					    size_t LirBuffer::byteCount()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        return ((_pages.size() ? _pages.size()-1 : 0) * sizeof(Page)) +
 | 
					        return _bytesAllocated - (_limit - _unused);
 | 
				
			||||||
            (_unused - pageTop(_unused));
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    Page* LirBuffer::pageAlloc()
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        Page* page = _frago->pageAlloc();
 | 
					 | 
				
			||||||
        if (page)
 | 
					 | 
				
			||||||
            _pages.add(page);
 | 
					 | 
				
			||||||
        else
 | 
					 | 
				
			||||||
            _noMem = 1;
 | 
					 | 
				
			||||||
        return page;
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // Allocate a new page, and write the first instruction to it -- a skip
 | 
					    // Allocate a new page, and write the first instruction to it -- a skip
 | 
				
			||||||
    // linking to last instruction of the previous page.
 | 
					    // linking to last instruction of the previous page.
 | 
				
			||||||
    void LirBuffer::moveToNewPage(uintptr_t addrOfLastLInsOnCurrentPage)
 | 
					    void LirBuffer::moveToNewChunk(uintptr_t addrOfLastLInsOnCurrentChunk)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        // We don't want this to fail, so we always have a page in reserve.
 | 
					        chunkAlloc();
 | 
				
			||||||
        NanoAssert(_nextPage);
 | 
					 | 
				
			||||||
        _unused = uintptr_t(&_nextPage->lir[0]);
 | 
					 | 
				
			||||||
        _nextPage = pageAlloc();
 | 
					 | 
				
			||||||
        NanoAssert(_nextPage || _noMem);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        // Link LIR stream back to prior instruction.
 | 
					        // Link LIR stream back to prior instruction.
 | 
				
			||||||
        // Unlike all the ins*() functions, we don't call makeRoom() here
 | 
					        // Unlike all the ins*() functions, we don't call makeRoom() here
 | 
				
			||||||
        // because we know we have enough space, having just started a new
 | 
					        // because we know we have enough space, having just started a new
 | 
				
			||||||
        // page.
 | 
					        // page.
 | 
				
			||||||
        LInsSk* insSk = (LInsSk*)_unused;
 | 
					        LInsSk* insSk = (LInsSk*)_unused;
 | 
				
			||||||
        LIns*   ins   = insSk->getLIns();
 | 
					        LIns*   ins   = insSk->getLIns();
 | 
				
			||||||
        ins->initLInsSk((LInsp)addrOfLastLInsOnCurrentPage);
 | 
					        ins->initLInsSk((LInsp)addrOfLastLInsOnCurrentChunk);
 | 
				
			||||||
        _unused += sizeof(LInsSk);
 | 
					        _unused += sizeof(LInsSk);
 | 
				
			||||||
        _stats.lir++;
 | 
					        verbose_only(_stats.lir++);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // Make room for a single instruction.
 | 
					    // Make room for a single instruction.
 | 
				
			||||||
    uintptr_t LirBuffer::makeRoom(size_t szB)
 | 
					    uintptr_t LirBuffer::makeRoom(size_t szB)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        // Make sure the size is ok, and that we're not pointing to the
 | 
					        // Make sure the size is ok
 | 
				
			||||||
        // PageHeader.
 | 
					 | 
				
			||||||
        NanoAssert(0 == szB % sizeof(void*));
 | 
					        NanoAssert(0 == szB % sizeof(void*));
 | 
				
			||||||
        NanoAssert(sizeof(LIns) <= szB && szB <= NJ_MAX_LINS_SZB);
 | 
					        NanoAssert(sizeof(LIns) <= szB && szB <= MAX_LINS_SZB);
 | 
				
			||||||
        NanoAssert(_unused >= pageDataStart(_unused));
 | 
					        NanoAssert(_unused < _limit);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // If the instruction won't fit on the current page, move to the next
 | 
					        // If the instruction won't fit on the current chunk, get a new chunk
 | 
				
			||||||
        // page.
 | 
					        if (_unused + szB > _limit) {
 | 
				
			||||||
        if (_unused + szB - 1 > pageBottom(_unused)) {
 | 
					            uintptr_t addrOfLastLInsOnChunk = _unused - sizeof(LIns);
 | 
				
			||||||
            uintptr_t addrOfLastLInsOnPage = _unused - sizeof(LIns);
 | 
					            moveToNewChunk(addrOfLastLInsOnChunk);
 | 
				
			||||||
            moveToNewPage(addrOfLastLInsOnPage);
 | 
					 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // We now know that we are on a page that has the requested amount of
 | 
					        // We now know that we are on a chunk that has the requested amount of
 | 
				
			||||||
        // room: record the starting address of the requested space and bump
 | 
					        // room: record the starting address of the requested space and bump
 | 
				
			||||||
        // the pointer.
 | 
					        // the pointer.
 | 
				
			||||||
        uintptr_t startOfRoom = _unused;
 | 
					        uintptr_t startOfRoom = _unused;
 | 
				
			||||||
        _unused += szB;
 | 
					        _unused += szB;
 | 
				
			||||||
        _stats.lir++;             // count the instruction
 | 
					        verbose_only(_stats.lir++);             // count the instruction
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // If there's no more space on this page, move to the next page.
 | 
					        // If there's no more space on this chunk, move to a new one.
 | 
				
			||||||
        // (This will only occur if the asked-for size filled up exactly to
 | 
					        // (This will only occur if the asked-for size filled up exactly to
 | 
				
			||||||
        // the end of the page.)  This ensures that next time we enter this
 | 
					        // the end of the chunk.)  This ensures that next time we enter this
 | 
				
			||||||
        // function, _unused won't be pointing one byte past the end of
 | 
					        // function, _unused won't be pointing one byte past the end of
 | 
				
			||||||
        // the page, which would break everything.
 | 
					        // the chunk, which would break everything.
 | 
				
			||||||
        if (_unused > pageBottom(startOfRoom)) {
 | 
					        if (_unused >= _limit) {
 | 
				
			||||||
            // Check we only spilled over by one byte.
 | 
					            // Check we used exactly the remaining space
 | 
				
			||||||
            NanoAssert(_unused == pageTop(_unused));
 | 
					            NanoAssert(_unused == _limit);
 | 
				
			||||||
            NanoAssert(_unused == pageBottom(startOfRoom) + 1);
 | 
					            uintptr_t addrOfLastLInsOnChunk = _unused - sizeof(LIns);
 | 
				
			||||||
            uintptr_t addrOfLastLInsOnPage = _unused - sizeof(LIns);
 | 
					            moveToNewChunk(addrOfLastLInsOnChunk);
 | 
				
			||||||
            moveToNewPage(addrOfLastLInsOnPage);
 | 
					 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // Make sure it's word-aligned.
 | 
					        // Make sure it's word-aligned.
 | 
				
			||||||
| 
						 | 
					@ -342,15 +317,16 @@ namespace nanojit
 | 
				
			||||||
        // NJ_MAX_SKIP_PAYLOAD_SZB, NJ_MAX_SKIP_PAYLOAD_SZB must also be a
 | 
					        // NJ_MAX_SKIP_PAYLOAD_SZB, NJ_MAX_SKIP_PAYLOAD_SZB must also be a
 | 
				
			||||||
        // multiple of the word size, which we check.
 | 
					        // multiple of the word size, which we check.
 | 
				
			||||||
        payload_szB = alignUp(payload_szB, sizeof(void*));
 | 
					        payload_szB = alignUp(payload_szB, sizeof(void*));
 | 
				
			||||||
        NanoAssert(0 == NJ_MAX_SKIP_PAYLOAD_SZB % sizeof(void*));
 | 
					        NanoAssert(0 == LirBuffer::MAX_SKIP_PAYLOAD_SZB % sizeof(void*));
 | 
				
			||||||
        NanoAssert(sizeof(void*) <= payload_szB && payload_szB <= NJ_MAX_SKIP_PAYLOAD_SZB);
 | 
					        NanoAssert(sizeof(void*) <= payload_szB && payload_szB <= LirBuffer::MAX_SKIP_PAYLOAD_SZB);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        uintptr_t payload = _buf->makeRoom(payload_szB + sizeof(LInsSk));
 | 
					        uintptr_t payload = _buf->makeRoom(payload_szB + sizeof(LInsSk));
 | 
				
			||||||
        uintptr_t prevLInsAddr = payload - sizeof(LIns);
 | 
					        uintptr_t prevLInsAddr = payload - sizeof(LIns);
 | 
				
			||||||
        LInsSk* insSk = (LInsSk*)(payload + payload_szB);
 | 
					        LInsSk* insSk = (LInsSk*)(payload + payload_szB);
 | 
				
			||||||
        LIns*   ins   = insSk->getLIns();
 | 
					        LIns*   ins   = insSk->getLIns();
 | 
				
			||||||
        NanoAssert(prevLInsAddr >= pageDataStart(prevLInsAddr));
 | 
					        // FIXME: restate these in a useful way.
 | 
				
			||||||
        NanoAssert(samepage(prevLInsAddr, insSk));
 | 
					        // NanoAssert(prevLInsAddr >= pageDataStart(prevLInsAddr));
 | 
				
			||||||
 | 
					        // NanoAssert(samepage(prevLInsAddr, insSk));
 | 
				
			||||||
        ins->initLInsSk((LInsp)prevLInsAddr);
 | 
					        ins->initLInsSk((LInsp)prevLInsAddr);
 | 
				
			||||||
        return ins;
 | 
					        return ins;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
| 
						 | 
					@ -395,7 +371,6 @@ namespace nanojit
 | 
				
			||||||
                    int argc = ((LInsp)i)->argc();
 | 
					                    int argc = ((LInsp)i)->argc();
 | 
				
			||||||
                    i -= sizeof(LInsC);         // step over the instruction
 | 
					                    i -= sizeof(LInsC);         // step over the instruction
 | 
				
			||||||
                    i -= argc*sizeof(LInsp);    // step over the arguments
 | 
					                    i -= argc*sizeof(LInsp);    // step over the arguments
 | 
				
			||||||
                    NanoAssert( samepage(i, _i) );
 | 
					 | 
				
			||||||
                    break;
 | 
					                    break;
 | 
				
			||||||
                }
 | 
					                }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -2052,10 +2027,12 @@ namespace nanojit
 | 
				
			||||||
        return i->arg(i->argc()-n-1);
 | 
					        return i->arg(i->argc()-n-1);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void compile(Assembler* assm, Fragment* triggerFrag)
 | 
					    void compile(Fragmento* frago, Assembler* assm, Fragment* triggerFrag)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        Fragmento *frago = triggerFrag->lirbuf->_frago;
 | 
					 | 
				
			||||||
        AvmCore *core = frago->core();
 | 
					        AvmCore *core = frago->core();
 | 
				
			||||||
 | 
					#ifdef NJ_VERBOSE
 | 
				
			||||||
 | 
					        LabelMap* labels = frago->labels;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
        GC *gc = core->gc;
 | 
					        GC *gc = core->gc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        verbose_only(
 | 
					        verbose_only(
 | 
				
			||||||
| 
						 | 
					@ -2113,7 +2090,6 @@ namespace nanojit
 | 
				
			||||||
            root = triggerFrag->root;
 | 
					            root = triggerFrag->root;
 | 
				
			||||||
            root->fragEntry = 0;
 | 
					            root->fragEntry = 0;
 | 
				
			||||||
            root->loopEntry = 0;
 | 
					            root->loopEntry = 0;
 | 
				
			||||||
            root->releaseCode(frago);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
            // do the tree branches
 | 
					            // do the tree branches
 | 
				
			||||||
            verbose_only( if (anyVerb) {
 | 
					            verbose_only( if (anyVerb) {
 | 
				
			||||||
| 
						 | 
					@ -2127,14 +2103,16 @@ namespace nanojit
 | 
				
			||||||
                {
 | 
					                {
 | 
				
			||||||
                    verbose_only( if (anyVerb) {
 | 
					                    verbose_only( if (anyVerb) {
 | 
				
			||||||
                        logc->printf("=== -- Compiling branch %s ip %s\n",
 | 
					                        logc->printf("=== -- Compiling branch %s ip %s\n",
 | 
				
			||||||
                                     frago->labels->format(frag),
 | 
					                                     labels->format(frag),
 | 
				
			||||||
                                     frago->labels->format(frag->ip));
 | 
					                                     labels->format(frag->ip));
 | 
				
			||||||
                    })
 | 
					                    })
 | 
				
			||||||
                    assm->assemble(frag, loopJumps);
 | 
					                    if (!assm->error()) {
 | 
				
			||||||
 | 
					                        assm->assemble(frag, loopJumps);
 | 
				
			||||||
 | 
					                        verbose_only(frago->_stats.compiles++);
 | 
				
			||||||
 | 
					                        verbose_only(frago->_stats.totalCompiles++);
 | 
				
			||||||
 | 
					                    }
 | 
				
			||||||
                    verbose_only(if (asmVerb)
 | 
					                    verbose_only(if (asmVerb)
 | 
				
			||||||
                        assm->outputf("## compiling branch %s ip %s",
 | 
					                        assm->outputf("## compiling branch %s ip %s", labels->format(frag), labels->format(frag->ip)); )
 | 
				
			||||||
                                      frago->labels->format(frag),
 | 
					 | 
				
			||||||
                                      frago->labels->format(frag->ip)); )
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
                    NanoAssert(frag->kind == BranchTrace);
 | 
					                    NanoAssert(frag->kind == BranchTrace);
 | 
				
			||||||
                    RegAlloc* regs = NJ_NEW(gc, RegAlloc)();
 | 
					                    RegAlloc* regs = NJ_NEW(gc, RegAlloc)();
 | 
				
			||||||
| 
						 | 
					@ -2153,18 +2131,18 @@ namespace nanojit
 | 
				
			||||||
        // now the the main trunk
 | 
					        // now the the main trunk
 | 
				
			||||||
        verbose_only( if (anyVerb) {
 | 
					        verbose_only( if (anyVerb) {
 | 
				
			||||||
            logc->printf("=== -- Compile trunk %s: begin\n",
 | 
					            logc->printf("=== -- Compile trunk %s: begin\n",
 | 
				
			||||||
                         frago->labels->format(root));
 | 
					                         labels->format(root));
 | 
				
			||||||
        })
 | 
					        })
 | 
				
			||||||
        assm->assemble(root, loopJumps);
 | 
					        assm->assemble(root, loopJumps);
 | 
				
			||||||
        verbose_only( if (anyVerb) {
 | 
					        verbose_only( if (anyVerb) {
 | 
				
			||||||
            logc->printf("=== -- Compile trunk %s: end\n",
 | 
					            logc->printf("=== -- Compile trunk %s: end\n",
 | 
				
			||||||
                         frago->labels->format(root));
 | 
					                         labels->format(root));
 | 
				
			||||||
        })
 | 
					        })
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        verbose_only(
 | 
					        verbose_only(
 | 
				
			||||||
            if (asmVerb)
 | 
					            if (asmVerb)
 | 
				
			||||||
                assm->outputf("## compiling trunk %s",
 | 
					                assm->outputf("## compiling trunk %s",
 | 
				
			||||||
                              frago->labels->format(root));
 | 
					                              labels->format(root));
 | 
				
			||||||
        )
 | 
					        )
 | 
				
			||||||
        NanoAssert(!frago->core()->config.tree_opt
 | 
					        NanoAssert(!frago->core()->config.tree_opt
 | 
				
			||||||
                   || root == root->anchor || root->kind == MergeTrace);
 | 
					                   || root == root->anchor || root->kind == MergeTrace);
 | 
				
			||||||
| 
						 | 
					@ -2194,6 +2172,10 @@ namespace nanojit
 | 
				
			||||||
            root->fragEntry = 0;
 | 
					            root->fragEntry = 0;
 | 
				
			||||||
            root->loopEntry = 0;
 | 
					            root->loopEntry = 0;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					        else
 | 
				
			||||||
 | 
					        {
 | 
				
			||||||
 | 
					            CodeAlloc::moveAll(root->codeList, assm->codeList);
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        /* BEGIN decorative postamble */
 | 
					        /* BEGIN decorative postamble */
 | 
				
			||||||
        verbose_only( if (anyVerb) {
 | 
					        verbose_only( if (anyVerb) {
 | 
				
			||||||
| 
						 | 
					@ -2216,6 +2198,7 @@ namespace nanojit
 | 
				
			||||||
            if (found)
 | 
					            if (found)
 | 
				
			||||||
                return found;
 | 
					                return found;
 | 
				
			||||||
            return exprs.add(out->insLoad(v,base,disp), k);
 | 
					            return exprs.add(out->insLoad(v,base,disp), k);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        return out->insLoad(v, base, disp);
 | 
					        return out->insLoad(v, base, disp);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
| 
						 | 
					@ -2249,8 +2232,8 @@ namespace nanojit
 | 
				
			||||||
    #endif /* FEATURE_NANOJIT */
 | 
					    #endif /* FEATURE_NANOJIT */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(NJ_VERBOSE)
 | 
					#if defined(NJ_VERBOSE)
 | 
				
			||||||
    LabelMap::LabelMap(AvmCore *core)
 | 
					    LabelMap::LabelMap(AvmCore *core, nanojit::Allocator& a)
 | 
				
			||||||
        : names(core->gc), addrs(core->config.verbose_addrs), end(buf), core(core)
 | 
					        : allocator(a), names(core->gc), addrs(core->config.verbose_addrs), end(buf), core(core)
 | 
				
			||||||
    {}
 | 
					    {}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    LabelMap::~LabelMap()
 | 
					    LabelMap::~LabelMap()
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -95,7 +95,6 @@ namespace nanojit
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    struct GuardRecord;
 | 
					    struct GuardRecord;
 | 
				
			||||||
    struct SideExit;
 | 
					    struct SideExit;
 | 
				
			||||||
    struct Page;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    enum AbiKind {
 | 
					    enum AbiKind {
 | 
				
			||||||
        ABI_FASTCALL,
 | 
					        ABI_FASTCALL,
 | 
				
			||||||
| 
						 | 
					@ -494,7 +493,13 @@ namespace nanojit
 | 
				
			||||||
    private:
 | 
					    private:
 | 
				
			||||||
        // Last word: fields shared by all LIns kinds.  The reservation fields
 | 
					        // Last word: fields shared by all LIns kinds.  The reservation fields
 | 
				
			||||||
        // are read/written during assembly.
 | 
					        // are read/written during assembly.
 | 
				
			||||||
 | 
					        union {
 | 
				
			||||||
        Reservation lastWord;
 | 
					        Reservation lastWord;
 | 
				
			||||||
 | 
					            // force sizeof(LIns)==8 and 8-byte alignment on 64-bit machines.
 | 
				
			||||||
 | 
					            // this is necessary because sizeof(Reservation)==4 and we want all
 | 
				
			||||||
 | 
					            // instances of LIns to be pointer-aligned.
 | 
				
			||||||
 | 
					            void* dummy;
 | 
				
			||||||
 | 
					        };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // LIns-to-LInsXYZ converters.
 | 
					        // LIns-to-LInsXYZ converters.
 | 
				
			||||||
        LInsOp0* toLInsOp0() const { return (LInsOp0*)( uintptr_t(this+1) - sizeof(LInsOp0) ); }
 | 
					        LInsOp0* toLInsOp0() const { return (LInsOp0*)( uintptr_t(this+1) - sizeof(LInsOp0) ); }
 | 
				
			||||||
| 
						 | 
					@ -660,7 +665,6 @@ namespace nanojit
 | 
				
			||||||
        double         imm64f()    const;
 | 
					        double         imm64f()    const;
 | 
				
			||||||
        Reservation*   resv()            { return &lastWord; }
 | 
					        Reservation*   resv()            { return &lastWord; }
 | 
				
			||||||
        void*          payload()   const;
 | 
					        void*          payload()   const;
 | 
				
			||||||
        inline Page*   page()            { return (Page*) alignTo(this,NJ_PAGE_SIZE); }
 | 
					 | 
				
			||||||
        inline int32_t size()      const {
 | 
					        inline int32_t size()      const {
 | 
				
			||||||
            NanoAssert(isop(LIR_ialloc));
 | 
					            NanoAssert(isop(LIR_ialloc));
 | 
				
			||||||
            return toLInsI()->imm32 << 2;
 | 
					            return toLInsI()->imm32 << 2;
 | 
				
			||||||
| 
						 | 
					@ -847,20 +851,6 @@ namespace nanojit
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // Each page has a header;  the rest of it holds code.
 | 
					 | 
				
			||||||
    #define NJ_PAGE_CODE_AREA_SZB       (NJ_PAGE_SIZE - sizeof(PageHeader))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    // The first instruction on a page is always a start instruction, or a
 | 
					 | 
				
			||||||
    // payload-less skip instruction linking to the previous page.  The
 | 
					 | 
				
			||||||
    // biggest possible instruction would take up the entire rest of the page.
 | 
					 | 
				
			||||||
    #define NJ_MAX_LINS_SZB             (NJ_PAGE_CODE_AREA_SZB - sizeof(LInsSk))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    // The maximum skip payload size is determined by the maximum instruction
 | 
					 | 
				
			||||||
    // size.  We require that a skip's payload be adjacent to the skip LIns
 | 
					 | 
				
			||||||
    // itself.
 | 
					 | 
				
			||||||
    #define NJ_MAX_SKIP_PAYLOAD_SZB     (NJ_MAX_LINS_SZB - sizeof(LInsSk))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifdef NJ_VERBOSE
 | 
					#ifdef NJ_VERBOSE
 | 
				
			||||||
    extern const char* lirNames[];
 | 
					    extern const char* lirNames[];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -869,6 +859,7 @@ namespace nanojit
 | 
				
			||||||
     */
 | 
					     */
 | 
				
			||||||
    class LabelMap MMGC_SUBCLASS_DECL
 | 
					    class LabelMap MMGC_SUBCLASS_DECL
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
 | 
					        Allocator& allocator;
 | 
				
			||||||
        class Entry MMGC_SUBCLASS_DECL
 | 
					        class Entry MMGC_SUBCLASS_DECL
 | 
				
			||||||
        {
 | 
					        {
 | 
				
			||||||
        public:
 | 
					        public:
 | 
				
			||||||
| 
						 | 
					@ -884,7 +875,7 @@ namespace nanojit
 | 
				
			||||||
        void formatAddr(const void *p, char *buf);
 | 
					        void formatAddr(const void *p, char *buf);
 | 
				
			||||||
    public:
 | 
					    public:
 | 
				
			||||||
        avmplus::AvmCore *core;
 | 
					        avmplus::AvmCore *core;
 | 
				
			||||||
        LabelMap(avmplus::AvmCore *);
 | 
					        LabelMap(avmplus::AvmCore *, Allocator& allocator);
 | 
				
			||||||
        ~LabelMap();
 | 
					        ~LabelMap();
 | 
				
			||||||
        void add(const void *p, size_t size, size_t align, const char *name);
 | 
					        void add(const void *p, size_t size, size_t align, const char *name);
 | 
				
			||||||
        void add(const void *p, size_t size, size_t align, avmplus::String*);
 | 
					        void add(const void *p, size_t size, size_t align, avmplus::String*);
 | 
				
			||||||
| 
						 | 
					@ -895,6 +886,8 @@ namespace nanojit
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    class LirNameMap MMGC_SUBCLASS_DECL
 | 
					    class LirNameMap MMGC_SUBCLASS_DECL
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
 | 
					        Allocator& allocator;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        template <class Key>
 | 
					        template <class Key>
 | 
				
			||||||
        class CountMap: public avmplus::SortedMap<Key, int, avmplus::LIST_NonGCObjects> {
 | 
					        class CountMap: public avmplus::SortedMap<Key, int, avmplus::LIST_NonGCObjects> {
 | 
				
			||||||
        public:
 | 
					        public:
 | 
				
			||||||
| 
						 | 
					@ -924,8 +917,9 @@ namespace nanojit
 | 
				
			||||||
        void formatImm(int32_t c, char *buf);
 | 
					        void formatImm(int32_t c, char *buf);
 | 
				
			||||||
    public:
 | 
					    public:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        LirNameMap(GC *gc, LabelMap *r)
 | 
					        LirNameMap(GC *gc, Allocator& allocator, LabelMap *r)
 | 
				
			||||||
            : lircounts(gc),
 | 
					            : allocator(allocator),
 | 
				
			||||||
 | 
					            lircounts(gc),
 | 
				
			||||||
            funccounts(gc),
 | 
					            funccounts(gc),
 | 
				
			||||||
            names(gc),
 | 
					            names(gc),
 | 
				
			||||||
            labels(r)
 | 
					            labels(r)
 | 
				
			||||||
| 
						 | 
					@ -1095,13 +1089,10 @@ namespace nanojit
 | 
				
			||||||
    class LirBuffer : public GCFinalizedObject
 | 
					    class LirBuffer : public GCFinalizedObject
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        public:
 | 
					        public:
 | 
				
			||||||
            DWB(Fragmento*)        _frago;
 | 
					            LirBuffer(Allocator&);
 | 
				
			||||||
            LirBuffer(Fragmento* frago);
 | 
					            ~LirBuffer();
 | 
				
			||||||
            virtual ~LirBuffer();
 | 
					 | 
				
			||||||
            void        clear();
 | 
					            void        clear();
 | 
				
			||||||
            void        rewind();
 | 
					 | 
				
			||||||
            uintptr_t   makeRoom(size_t szB);   // make room for an instruction
 | 
					            uintptr_t   makeRoom(size_t szB);   // make room for an instruction
 | 
				
			||||||
            bool        outOMem() { return _noMem != 0; }
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
            debug_only (void validate() const;)
 | 
					            debug_only (void validate() const;)
 | 
				
			||||||
            verbose_only(DWB(LirNameMap*) names;)
 | 
					            verbose_only(DWB(LirNameMap*) names;)
 | 
				
			||||||
| 
						 | 
					@ -1121,14 +1112,32 @@ namespace nanojit
 | 
				
			||||||
            LInsp savedRegs[NumSavedRegs];
 | 
					            LInsp savedRegs[NumSavedRegs];
 | 
				
			||||||
            bool explicitSavedRegs;
 | 
					            bool explicitSavedRegs;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        protected:
 | 
					            /** each chunk is just a raw area of LIns instances, with no header
 | 
				
			||||||
            Page*        pageAlloc();
 | 
					                and no more than 8-byte alignment.  The chunk size is somewhat arbitrary
 | 
				
			||||||
            void        moveToNewPage(uintptr_t addrOfLastLInsOnCurrentPage);
 | 
					                as long as it's well larger than 2*sizeof(LInsSk) */
 | 
				
			||||||
 | 
					            static const size_t CHUNK_SZB = 8000;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            PageList    _pages;
 | 
					            /** the first instruction on a chunk is always a start instruction, or a
 | 
				
			||||||
            Page*        _nextPage; // allocated in preperation of a needing to growing the buffer
 | 
					             *  payload-less skip instruction linking to the previous chunk.  The biggest
 | 
				
			||||||
            uintptr_t   _unused;    // next unused instruction slot
 | 
					             *  possible instruction would take up the entire rest of the chunk. */
 | 
				
			||||||
            int            _noMem;        // set if ran out of memory when writing to buffer
 | 
					            static const size_t MAX_LINS_SZB = CHUNK_SZB - sizeof(LInsSk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            /** the maximum skip payload size is determined by the maximum instruction
 | 
				
			||||||
 | 
					             *  size.  We require that a skip's payload be adjacent to the skip LIns
 | 
				
			||||||
 | 
					             *  itself. */
 | 
				
			||||||
 | 
					            static const size_t MAX_SKIP_PAYLOAD_SZB = MAX_LINS_SZB - sizeof(LInsSk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        protected:
 | 
				
			||||||
 | 
					            friend class LirBufWriter;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            /** get CHUNK_SZB more memory for LIR instructions */
 | 
				
			||||||
 | 
					            void        chunkAlloc();
 | 
				
			||||||
 | 
					            void        moveToNewChunk(uintptr_t addrOfLastLInsOnCurrentChunk);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            Allocator&  _allocator;
 | 
				
			||||||
 | 
					            uintptr_t   _unused;   // next unused instruction slot in the current LIR chunk
 | 
				
			||||||
 | 
					            uintptr_t   _limit;    // one past the last usable byte of the current LIR chunk
 | 
				
			||||||
 | 
					            size_t      _bytesAllocated;
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    class LirBufWriter : public LirWriter
 | 
					    class LirBufWriter : public LirWriter
 | 
				
			||||||
| 
						 | 
					@ -1193,7 +1202,7 @@ namespace nanojit
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    class Assembler;
 | 
					    class Assembler;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void compile(Assembler *assm, Fragment *frag);
 | 
					    void compile(Fragmento *frago, Assembler *assm, Fragment *frag);
 | 
				
			||||||
    verbose_only(void live(GC *gc, LirBuffer *lirbuf);)
 | 
					    verbose_only(void live(GC *gc, LirBuffer *lirbuf);)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    class StackFilter: public LirFilter
 | 
					    class StackFilter: public LirFilter
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -536,7 +536,7 @@ Assembler::nFragExit(LInsp guard)
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef NJ_VERBOSE
 | 
					#ifdef NJ_VERBOSE
 | 
				
			||||||
    if (_frago->core()->config.show_stats) {
 | 
					    if (config.show_stats) {
 | 
				
			||||||
        // load R1 with Fragment *fromFrag, target fragment
 | 
					        // load R1 with Fragment *fromFrag, target fragment
 | 
				
			||||||
        // will make use of this when calling fragenter().
 | 
					        // will make use of this when calling fragenter().
 | 
				
			||||||
        int fromfrag = int((Fragment*)_thisfrag);
 | 
					        int fromfrag = int((Fragment*)_thisfrag);
 | 
				
			||||||
| 
						 | 
					@ -813,32 +813,6 @@ Assembler::asm_call(LInsp ins)
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void
 | 
					 | 
				
			||||||
Assembler::nMarkExecute(Page* page, int flags)
 | 
					 | 
				
			||||||
{
 | 
					 | 
				
			||||||
    NanoAssert(sizeof(Page) == NJ_PAGE_SIZE);
 | 
					 | 
				
			||||||
#ifdef UNDER_CE
 | 
					 | 
				
			||||||
    static const DWORD kProtFlags[4] = {
 | 
					 | 
				
			||||||
        PAGE_READONLY,          // 0
 | 
					 | 
				
			||||||
        PAGE_READWRITE,         // PAGE_WRITE
 | 
					 | 
				
			||||||
        PAGE_EXECUTE_READ,      // PAGE_EXEC
 | 
					 | 
				
			||||||
        PAGE_EXECUTE_READWRITE  // PAGE_EXEC|PAGE_WRITE
 | 
					 | 
				
			||||||
    };
 | 
					 | 
				
			||||||
    DWORD prot = kProtFlags[flags & (PAGE_WRITE|PAGE_EXEC)];
 | 
					 | 
				
			||||||
    DWORD dwOld;
 | 
					 | 
				
			||||||
    BOOL res = VirtualProtect(page, NJ_PAGE_SIZE, prot, &dwOld);
 | 
					 | 
				
			||||||
    if (!res)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        // todo: we can't abort or assert here, we have to fail gracefully.
 | 
					 | 
				
			||||||
        NanoAssertMsg(false, "FATAL ERROR: VirtualProtect() failed\n");
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
#ifdef AVMPLUS_PORTING_API
 | 
					 | 
				
			||||||
    NanoJIT_PortAPI_MarkExecutable(page, (void*)((char*)page+NJ_PAGE_SIZE), flags);
 | 
					 | 
				
			||||||
    // todo, must add error-handling to the portapi
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
Register
 | 
					Register
 | 
				
			||||||
Assembler::nRegisterAllocFromSet(int set)
 | 
					Assembler::nRegisterAllocFromSet(int set)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
| 
						 | 
					@ -1334,21 +1308,17 @@ Assembler::nativePageReset()
 | 
				
			||||||
void
 | 
					void
 | 
				
			||||||
Assembler::nativePageSetup()
 | 
					Assembler::nativePageSetup()
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    if (!_nIns)      _nIns     = pageAlloc();
 | 
					    if (!_nIns)
 | 
				
			||||||
    if (!_nExitIns)  _nExitIns = pageAlloc(true);
 | 
					        codeAlloc(codeStart, codeEnd, _nIns);
 | 
				
			||||||
    //nj_dprintf("assemble onto %x exits into %x\n", (int)_nIns, (int)_nExitIns);
 | 
					    if (!_nExitIns)
 | 
				
			||||||
 | 
					        codeAlloc(exitStart, exitEnd, _nExitIns);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    // constpool starts at top of page and goes down,
 | 
				
			||||||
 | 
					    // code starts at bottom of page and moves up
 | 
				
			||||||
    if (!_nSlot)
 | 
					    if (!_nSlot)
 | 
				
			||||||
    {
 | 
					        _nSlot = codeStart;
 | 
				
			||||||
        // This needs to be done or the samepage macro gets confused; pageAlloc
 | 
					    if (!_nExitSlot)
 | 
				
			||||||
        // gives us a pointer to just past the end of the page.
 | 
					        _nExitSlot = exitStart;
 | 
				
			||||||
        _nIns--;
 | 
					 | 
				
			||||||
        _nExitIns--;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        // constpool starts at top of page and goes down,
 | 
					 | 
				
			||||||
        // code starts at bottom of page and moves up
 | 
					 | 
				
			||||||
        _nSlot = (int*)pageDataStart(_nIns);
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Record the starting value of _nIns. On ARM, it is also necessary to record
 | 
					// Record the starting value of _nIns. On ARM, it is also necessary to record
 | 
				
			||||||
| 
						 | 
					@ -1371,42 +1341,28 @@ Assembler::resetInstructionPointer()
 | 
				
			||||||
    NanoAssert(samepage(_nIns,_nSlot));
 | 
					    NanoAssert(samepage(_nIns,_nSlot));
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Note: underrunProtect should not touch any registers, even IP; it
 | 
					 | 
				
			||||||
// might need to allocate a new page in the middle of an IP-using
 | 
					 | 
				
			||||||
// sequence.
 | 
					 | 
				
			||||||
void
 | 
					void
 | 
				
			||||||
Assembler::underrunProtect(int bytes)
 | 
					Assembler::underrunProtect(int bytes)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
 | 
					    NanoAssertMsg(bytes<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
 | 
				
			||||||
    intptr_t u = bytes + sizeof(PageHeader)/sizeof(NIns) + 8;
 | 
					    NanoAssert(_nSlot != 0 && int(_nIns)-int(_nSlot) <= 4096);
 | 
				
			||||||
    if ( (samepage(_nIns,_nSlot) && (((intptr_t)_nIns-u) <= intptr_t(_nSlot+1))) ||
 | 
					    uintptr_t top = uintptr_t(_nSlot);
 | 
				
			||||||
         (!samepage((intptr_t)_nIns-u,_nIns)) )
 | 
					    uintptr_t pc = uintptr_t(_nIns);
 | 
				
			||||||
 | 
					    if (pc - bytes < top)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
 | 
					        verbose_only(verbose_outputf("        %p:", _nIns);)
 | 
				
			||||||
        NIns* target = _nIns;
 | 
					        NIns* target = _nIns;
 | 
				
			||||||
 | 
					        if (_inExit)
 | 
				
			||||||
 | 
					            codeAlloc(exitStart, exitEnd, _nIns);
 | 
				
			||||||
 | 
					        else
 | 
				
			||||||
 | 
					            codeAlloc(codeStart, codeEnd, _nIns);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        _nIns = pageAlloc(_inExit);
 | 
					        _nSlot = _inExit ? exitStart : codeStart;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        // XXX _nIns at this point points to one past the end of
 | 
					        // _nSlot points to the first empty position in the new code block
 | 
				
			||||||
        // the page, intended to be written into using *(--_nIns).
 | 
					        // _nIns points just past the last empty position.
 | 
				
			||||||
        // However, (guess) something seems to be storing the value
 | 
					        // Assume B_nochk won't ever try to write to _nSlot. See B_cond_chk macro.
 | 
				
			||||||
        // of _nIns as is, and then later generating a jump to a bogus
 | 
					        B_nochk(target);
 | 
				
			||||||
        // address.  So pre-decrement to ensure that it's always
 | 
					 | 
				
			||||||
        // valid; we end up skipping using the last instruction this
 | 
					 | 
				
			||||||
        // way.
 | 
					 | 
				
			||||||
        _nIns--;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        // Update slot, either to _nIns (if decremented above), or
 | 
					 | 
				
			||||||
        // _nIns-1 once the above bug is fixed/found.
 | 
					 | 
				
			||||||
        _nSlot = (int*)pageDataStart(_nIns);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        // If samepage() is used on _nIns and _nSlot, it'll fail, since _nIns
 | 
					 | 
				
			||||||
        // points to one past the end of the page right now.  Assume that
 | 
					 | 
				
			||||||
        // JMP_nochk won't ever try to write to _nSlot, and so won't ever
 | 
					 | 
				
			||||||
        // check samepage().  See B_cond_chk macro.
 | 
					 | 
				
			||||||
        JMP_nochk(target);
 | 
					 | 
				
			||||||
    } else if (!_nSlot) {
 | 
					 | 
				
			||||||
        // make sure that there's always a slot pointer
 | 
					 | 
				
			||||||
        _nSlot = (int*)pageDataStart(_nIns);
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1613,19 +1569,22 @@ Assembler::asm_ld_imm(Register d, int32_t imm, bool chk /* = true */)
 | 
				
			||||||
        underrunProtect(LD32_size);
 | 
					        underrunProtect(LD32_size);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    int offset = PC_OFFSET_FROM(_nSlot+1, _nIns-1);
 | 
					    int offset = PC_OFFSET_FROM(_nSlot, _nIns-1);
 | 
				
			||||||
    // If the offset is out of range, waste literal space until it is in range.
 | 
					    // If the offset is out of range, waste literal space until it is in range.
 | 
				
			||||||
    while (offset <= -4096) {
 | 
					    while (offset <= -4096) {
 | 
				
			||||||
        ++_nSlot;
 | 
					        ++_nSlot;
 | 
				
			||||||
        offset += sizeof(_nSlot);
 | 
					        offset += sizeof(_nSlot);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    NanoAssert(isS12(offset) && (offset < -8));
 | 
					    NanoAssert(isS12(offset) && (offset < 0));
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // Write the literal.
 | 
					    // Write the literal.
 | 
				
			||||||
    *(++_nSlot) = imm;
 | 
					    *(_nSlot++) = imm;
 | 
				
			||||||
 | 
					    asm_output("## imm= 0x%x", imm);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // Load the literal.
 | 
					    // Load the literal.
 | 
				
			||||||
    LDR_nochk(d,PC,offset);
 | 
					    LDR_nochk(d,PC,offset);
 | 
				
			||||||
 | 
					    NanoAssert(uintptr_t(_nIns) + 8 + offset == uintptr_t(_nSlot-1));
 | 
				
			||||||
 | 
					    NanoAssert(*((int32_t*)_nSlot-1) == imm);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// Branch to target address _t with condition _c, doing underrun
 | 
					// Branch to target address _t with condition _c, doing underrun
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -676,6 +676,9 @@ enum {
 | 
				
			||||||
#define B_cond(_c,_t)                           \
 | 
					#define B_cond(_c,_t)                           \
 | 
				
			||||||
    B_cond_chk(_c,_t,1)
 | 
					    B_cond_chk(_c,_t,1)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#define B_nochk(_t)                             \
 | 
				
			||||||
 | 
					    B_cond_chk(AL,_t,0)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// NB: don't use COND_AL here, we shift the condition into place!
 | 
					// NB: don't use COND_AL here, we shift the condition into place!
 | 
				
			||||||
#define JMP(_t)                                 \
 | 
					#define JMP(_t)                                 \
 | 
				
			||||||
    B_cond_chk(AL,_t,1)
 | 
					    B_cond_chk(AL,_t,1)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -123,7 +123,7 @@ namespace nanojit
 | 
				
			||||||
    void Assembler::nFragExit(LInsp guard)
 | 
					    void Assembler::nFragExit(LInsp guard)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        SideExit *exit = guard->record()->exit;
 | 
					        SideExit *exit = guard->record()->exit;
 | 
				
			||||||
        bool trees = _frago->core()->config.tree_opt;
 | 
					        bool trees = config.tree_opt;
 | 
				
			||||||
        Fragment *frag = exit->target;
 | 
					        Fragment *frag = exit->target;
 | 
				
			||||||
        GuardRecord *lr = 0;
 | 
					        GuardRecord *lr = 0;
 | 
				
			||||||
        bool destKnown = (frag && frag->fragEntry);
 | 
					        bool destKnown = (frag && frag->fragEntry);
 | 
				
			||||||
| 
						 | 
					@ -250,52 +250,6 @@ namespace nanojit
 | 
				
			||||||
            SUBi(SP, extra);
 | 
					            SUBi(SP, extra);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Assembler::nMarkExecute(Page* page, int flags)
 | 
					 | 
				
			||||||
    {
 | 
					 | 
				
			||||||
        NanoAssert(sizeof(Page) == NJ_PAGE_SIZE);
 | 
					 | 
				
			||||||
        #if defined WIN32 || defined WIN64
 | 
					 | 
				
			||||||
            DWORD dwIgnore;
 | 
					 | 
				
			||||||
            static const DWORD kProtFlags[4] =
 | 
					 | 
				
			||||||
            {
 | 
					 | 
				
			||||||
                PAGE_READONLY,            // 0
 | 
					 | 
				
			||||||
                PAGE_READWRITE,            // PAGE_WRITE
 | 
					 | 
				
			||||||
                PAGE_EXECUTE_READ,        // PAGE_EXEC
 | 
					 | 
				
			||||||
                PAGE_EXECUTE_READWRITE    // PAGE_EXEC|PAGE_WRITE
 | 
					 | 
				
			||||||
            };
 | 
					 | 
				
			||||||
            DWORD prot = kProtFlags[flags & (PAGE_WRITE|PAGE_EXEC)];
 | 
					 | 
				
			||||||
            BOOL res = VirtualProtect(page, NJ_PAGE_SIZE, prot, &dwIgnore);
 | 
					 | 
				
			||||||
            if (!res)
 | 
					 | 
				
			||||||
            {
 | 
					 | 
				
			||||||
                // todo: we can't abort or assert here, we have to fail gracefully.
 | 
					 | 
				
			||||||
                NanoAssertMsg(false, "FATAL ERROR: VirtualProtect() failed\n");
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
        #elif defined AVMPLUS_UNIX || defined AVMPLUS_MAC
 | 
					 | 
				
			||||||
            static const int kProtFlags[4] =
 | 
					 | 
				
			||||||
            {
 | 
					 | 
				
			||||||
                PROT_READ,                        // 0
 | 
					 | 
				
			||||||
                PROT_READ|PROT_WRITE,            // PAGE_WRITE
 | 
					 | 
				
			||||||
                PROT_READ|PROT_EXEC,            // PAGE_EXEC
 | 
					 | 
				
			||||||
                PROT_READ|PROT_WRITE|PROT_EXEC    // PAGE_EXEC|PAGE_WRITE
 | 
					 | 
				
			||||||
            };
 | 
					 | 
				
			||||||
            int prot = kProtFlags[flags & (PAGE_WRITE|PAGE_EXEC)];
 | 
					 | 
				
			||||||
            intptr_t addr = (intptr_t)page;
 | 
					 | 
				
			||||||
            addr &= ~((uintptr_t)NJ_PAGE_SIZE - 1);
 | 
					 | 
				
			||||||
            NanoAssert(addr == (intptr_t)page);
 | 
					 | 
				
			||||||
            #if defined SOLARIS
 | 
					 | 
				
			||||||
            if (mprotect((char *)addr, NJ_PAGE_SIZE, prot) == -1)
 | 
					 | 
				
			||||||
            #else
 | 
					 | 
				
			||||||
            if (mprotect((void *)addr, NJ_PAGE_SIZE, prot) == -1)
 | 
					 | 
				
			||||||
            #endif
 | 
					 | 
				
			||||||
            {
 | 
					 | 
				
			||||||
                // todo: we can't abort or assert here, we have to fail gracefully.
 | 
					 | 
				
			||||||
                NanoAssertMsg(false, "FATAL ERROR: mprotect(PROT_EXEC) failed\n");
 | 
					 | 
				
			||||||
                abort();
 | 
					 | 
				
			||||||
            }
 | 
					 | 
				
			||||||
        #else
 | 
					 | 
				
			||||||
            (void)page;
 | 
					 | 
				
			||||||
        #endif
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    Register Assembler::nRegisterAllocFromSet(int set)
 | 
					    Register Assembler::nRegisterAllocFromSet(int set)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        Register r;
 | 
					        Register r;
 | 
				
			||||||
| 
						 | 
					@ -1723,8 +1677,8 @@ namespace nanojit
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    void Assembler::nativePageSetup()
 | 
					    void Assembler::nativePageSetup()
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        if (!_nIns)         _nIns       = pageAlloc();
 | 
					        if (!_nIns) codeAlloc(codeStart, codeEnd, _nIns);
 | 
				
			||||||
        if (!_nExitIns)  _nExitIns = pageAlloc(true);
 | 
					        if (!_nExitIns) codeAlloc(exitStart, exitEnd, _nExitIns);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    // Reset the _nIns pointer to the starting value. This can be used to roll
 | 
					    // Reset the _nIns pointer to the starting value. This can be used to roll
 | 
				
			||||||
| 
						 | 
					@ -1744,15 +1698,15 @@ namespace nanojit
 | 
				
			||||||
    // enough room for n bytes
 | 
					    // enough room for n bytes
 | 
				
			||||||
    void Assembler::underrunProtect(int n)
 | 
					    void Assembler::underrunProtect(int n)
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
 | 
					        NIns *eip = _nIns;
 | 
				
			||||||
        NanoAssertMsg(n<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
 | 
					        NanoAssertMsg(n<=LARGEST_UNDERRUN_PROT, "constant LARGEST_UNDERRUN_PROT is too small");
 | 
				
			||||||
        NIns *eip = this->_nIns;
 | 
					        if (eip - n < (_inExit ? exitStart : codeStart)) {
 | 
				
			||||||
        Page *p = (Page*)pageTop(eip-1);
 | 
					 | 
				
			||||||
        NIns *top = (NIns*) &p->code[0];
 | 
					 | 
				
			||||||
        if (eip - n < top) {
 | 
					 | 
				
			||||||
            // We are done with the current page.  Tell Valgrind that new code
 | 
					            // We are done with the current page.  Tell Valgrind that new code
 | 
				
			||||||
            // has been generated.
 | 
					            // has been generated.
 | 
				
			||||||
            VALGRIND_DISCARD_TRANSLATIONS(pageTop(p), NJ_PAGE_SIZE);
 | 
					            if (_inExit)
 | 
				
			||||||
            _nIns = pageAlloc(_inExit);
 | 
					                codeAlloc(exitStart, exitEnd, _nIns);
 | 
				
			||||||
 | 
					            else
 | 
				
			||||||
 | 
					                codeAlloc(codeStart, codeEnd, _nIns);
 | 
				
			||||||
            JMP(eip);
 | 
					            JMP(eip);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,7 +32,18 @@
 | 
				
			||||||
 *
 | 
					 *
 | 
				
			||||||
 ***** END LICENSE BLOCK ***** */
 | 
					 ***** END LICENSE BLOCK ***** */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#include "avmplus.h"
 | 
					#include "nanojit.h"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef SOLARIS
 | 
				
			||||||
 | 
						#include <ucontext.h>
 | 
				
			||||||
 | 
						#include <dlfcn.h>
 | 
				
			||||||
 | 
						#include <procfs.h>
 | 
				
			||||||
 | 
						#include <sys/stat.h>
 | 
				
			||||||
 | 
					    extern "C" caddr_t _getfp(void);
 | 
				
			||||||
 | 
					    typedef caddr_t maddr_ptr;
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					    typedef void *maddr_ptr;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
using namespace avmplus;
 | 
					using namespace avmplus;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -42,6 +53,10 @@ GC* AvmCore::gc = &_gc;
 | 
				
			||||||
GCHeap GC::heap;
 | 
					GCHeap GC::heap;
 | 
				
			||||||
String* AvmCore::k_str[] = { (String*)"" };
 | 
					String* AvmCore::k_str[] = { (String*)"" };
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void
 | 
				
			||||||
 | 
					avmplus::AvmLog(char const *msg, ...) {
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef _DEBUG
 | 
					#ifdef _DEBUG
 | 
				
			||||||
// NanoAssertFail matches JS_Assert in jsutil.cpp.
 | 
					// NanoAssertFail matches JS_Assert in jsutil.cpp.
 | 
				
			||||||
void NanoAssertFail()
 | 
					void NanoAssertFail()
 | 
				
			||||||
| 
						 | 
					@ -56,3 +71,115 @@ void NanoAssertFail()
 | 
				
			||||||
    abort();
 | 
					    abort();
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef WIN32
 | 
				
			||||||
 | 
					void
 | 
				
			||||||
 | 
					VMPI_setPageProtection(void *address,
 | 
				
			||||||
 | 
					                       size_t size,
 | 
				
			||||||
 | 
					                       bool executableFlag,
 | 
				
			||||||
 | 
					                       bool writeableFlag)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    DWORD oldProtectFlags = 0;
 | 
				
			||||||
 | 
					    DWORD newProtectFlags = 0;
 | 
				
			||||||
 | 
					    if ( executableFlag && writeableFlag ) {
 | 
				
			||||||
 | 
					        newProtectFlags = PAGE_EXECUTE_READWRITE;
 | 
				
			||||||
 | 
					    } else if ( executableFlag ) {
 | 
				
			||||||
 | 
					        newProtectFlags = PAGE_EXECUTE_READ;
 | 
				
			||||||
 | 
					    } else if ( writeableFlag ) {
 | 
				
			||||||
 | 
					        newProtectFlags = PAGE_READWRITE;
 | 
				
			||||||
 | 
					    } else {
 | 
				
			||||||
 | 
					        newProtectFlags = PAGE_READONLY;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    BOOL retval;
 | 
				
			||||||
 | 
					    MEMORY_BASIC_INFORMATION mbi;
 | 
				
			||||||
 | 
					    do {
 | 
				
			||||||
 | 
					        VirtualQuery(address, &mbi, sizeof(MEMORY_BASIC_INFORMATION));
 | 
				
			||||||
 | 
					        size_t markSize = size > mbi.RegionSize ? mbi.RegionSize : size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        retval = VirtualProtect(address, markSize, newProtectFlags, &oldProtectFlags);
 | 
				
			||||||
 | 
					        NanoAssert(retval);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        address = (char*) address + markSize;
 | 
				
			||||||
 | 
					        size -= markSize;
 | 
				
			||||||
 | 
					    } while(size > 0 && retval);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    // We should not be clobbering PAGE_GUARD protections
 | 
				
			||||||
 | 
					    NanoAssert((oldProtectFlags & PAGE_GUARD) == 0);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#else // !WIN32
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void VMPI_setPageProtection(void *address,
 | 
				
			||||||
 | 
					                            size_t size,
 | 
				
			||||||
 | 
					                            bool executableFlag,
 | 
				
			||||||
 | 
					                            bool writeableFlag)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					  int bitmask = sysconf(_SC_PAGESIZE) - 1;
 | 
				
			||||||
 | 
					  // mprotect requires that the addresses be aligned on page boundaries
 | 
				
			||||||
 | 
					  void *endAddress = (void*) ((char*)address + size);
 | 
				
			||||||
 | 
					  void *beginPage = (void*) ((size_t)address & ~bitmask);
 | 
				
			||||||
 | 
					  void *endPage   = (void*) (((size_t)endAddress + bitmask) & ~bitmask);
 | 
				
			||||||
 | 
					  size_t sizePaged = (size_t)endPage - (size_t)beginPage;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					  int flags = PROT_READ;
 | 
				
			||||||
 | 
					  if (executableFlag) {
 | 
				
			||||||
 | 
					    flags |= PROT_EXEC;
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  if (writeableFlag) {
 | 
				
			||||||
 | 
					    flags |= PROT_WRITE;
 | 
				
			||||||
 | 
					  }
 | 
				
			||||||
 | 
					  int retval = mprotect((maddr_ptr)beginPage, (unsigned int)sizePaged, flags);
 | 
				
			||||||
 | 
					  AvmAssert(retval == 0);
 | 
				
			||||||
 | 
					  (void)retval;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif // WIN32
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#ifdef WIN32
 | 
				
			||||||
 | 
					void*
 | 
				
			||||||
 | 
					nanojit::CodeAlloc::allocCodeChunk(size_t nbytes) {
 | 
				
			||||||
 | 
					    return VirtualAlloc(NULL,
 | 
				
			||||||
 | 
					                        nbytes,
 | 
				
			||||||
 | 
					                        MEM_COMMIT | MEM_RESERVE,
 | 
				
			||||||
 | 
					                        PAGE_EXECUTE_READWRITE);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void
 | 
				
			||||||
 | 
					nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
 | 
				
			||||||
 | 
					    VirtualFree(p, 0, MEM_RELEASE);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#elif defined(AVMPLUS_UNIX)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void*
 | 
				
			||||||
 | 
					nanojit::CodeAlloc::allocCodeChunk(size_t nbytes) {
 | 
				
			||||||
 | 
					    return mmap(NULL,
 | 
				
			||||||
 | 
					                nbytes,
 | 
				
			||||||
 | 
					                PROT_READ | PROT_WRITE | PROT_EXEC,
 | 
				
			||||||
 | 
					                MAP_PRIVATE | MAP_ANON,
 | 
				
			||||||
 | 
					                -1,
 | 
				
			||||||
 | 
					                0);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void
 | 
				
			||||||
 | 
					nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
 | 
				
			||||||
 | 
					    munmap(p, nbytes);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#else // !WIN32 && !AVMPLUS_UNIX
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void*
 | 
				
			||||||
 | 
					nanojit::CodeAlloc::allocCodeChunk(size_t nbytes) {
 | 
				
			||||||
 | 
					    return valloc(nbytes);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void
 | 
				
			||||||
 | 
					nanojit::CodeAlloc::freeCodeChunk(void *p, size_t nbytes) {
 | 
				
			||||||
 | 
					    free(p);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#endif // WIN32
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -288,12 +288,19 @@ namespace MMgc {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define MMGC_MEM_TYPE(x)
 | 
					#define MMGC_MEM_TYPE(x)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					extern void VMPI_setPageProtection(void *address,
 | 
				
			||||||
 | 
					                                   size_t size,
 | 
				
			||||||
 | 
					                                   bool executableFlag,
 | 
				
			||||||
 | 
					                                   bool writeableFlag);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
namespace avmplus {
 | 
					namespace avmplus {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    using namespace MMgc;
 | 
					    using namespace MMgc;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    typedef int FunctionID;
 | 
					    typedef int FunctionID;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    extern void AvmLog(char const *msg, ...);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    class String
 | 
					    class String
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -120,14 +120,12 @@ namespace nanojit
 | 
				
			||||||
    class LIns;
 | 
					    class LIns;
 | 
				
			||||||
    struct SideExit;
 | 
					    struct SideExit;
 | 
				
			||||||
    class RegAlloc;
 | 
					    class RegAlloc;
 | 
				
			||||||
    struct Page;
 | 
					 | 
				
			||||||
    typedef avmplus::AvmCore AvmCore;
 | 
					    typedef avmplus::AvmCore AvmCore;
 | 
				
			||||||
    typedef avmplus::OSDep OSDep;
 | 
					    typedef avmplus::OSDep OSDep;
 | 
				
			||||||
    typedef avmplus::GCSortedMap<const void*,Fragment*,avmplus::LIST_GCObjects> FragmentMap;
 | 
					    typedef avmplus::GCSortedMap<const void*,Fragment*,avmplus::LIST_GCObjects> FragmentMap;
 | 
				
			||||||
    typedef avmplus::SortedMap<SideExit*,RegAlloc*,avmplus::LIST_GCObjects> RegAllocMap;
 | 
					    typedef avmplus::SortedMap<SideExit*,RegAlloc*,avmplus::LIST_GCObjects> RegAllocMap;
 | 
				
			||||||
    typedef avmplus::List<LIns*,avmplus::LIST_NonGCObjects>    InsList;
 | 
					    typedef avmplus::List<LIns*,avmplus::LIST_NonGCObjects>    InsList;
 | 
				
			||||||
    typedef avmplus::List<char*, avmplus::LIST_GCObjects> StringList;
 | 
					    typedef avmplus::List<char*, avmplus::LIST_GCObjects> StringList;
 | 
				
			||||||
    typedef avmplus::List<Page*,avmplus::LIST_NonGCObjects>    PageList;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    const uint32_t MAXARGS = 8;
 | 
					    const uint32_t MAXARGS = 8;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -295,8 +293,9 @@ namespace nanojit {
 | 
				
			||||||
// -------------------------------------------------------------------
 | 
					// -------------------------------------------------------------------
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#include "Allocator.h"
 | 
				
			||||||
#include "Native.h"
 | 
					#include "Native.h"
 | 
				
			||||||
 | 
					#include "CodeAlloc.h"
 | 
				
			||||||
#include "LIR.h"
 | 
					#include "LIR.h"
 | 
				
			||||||
#include "RegAlloc.h"
 | 
					#include "RegAlloc.h"
 | 
				
			||||||
#include "Fragmento.h"
 | 
					#include "Fragmento.h"
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -86,16 +86,14 @@
 | 
				
			||||||
#define THREAD_SAFE 0
 | 
					#define THREAD_SAFE 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef _MSC_VER
 | 
					#ifdef _MSC_VER
 | 
				
			||||||
typedef unsigned char      uint8_t;
 | 
					typedef __int8             int8_t;
 | 
				
			||||||
typedef unsigned short     uint16_t;
 | 
					typedef __int16            int16_t;
 | 
				
			||||||
typedef signed char        int8_t;
 | 
					typedef __int32            int32_t;
 | 
				
			||||||
typedef short              int16_t;
 | 
					 | 
				
			||||||
typedef unsigned int       uint32_t;
 | 
					 | 
				
			||||||
typedef signed int         int32_t;
 | 
					 | 
				
			||||||
typedef __int64            int64_t;
 | 
					typedef __int64            int64_t;
 | 
				
			||||||
 | 
					typedef unsigned __int8    uint8_t;
 | 
				
			||||||
 | 
					typedef unsigned __int16   uint16_t;
 | 
				
			||||||
 | 
					typedef unsigned __int32   uint32_t;
 | 
				
			||||||
typedef unsigned __int64   uint64_t;
 | 
					typedef unsigned __int64   uint64_t;
 | 
				
			||||||
typedef long long          int64_t;
 | 
					 | 
				
			||||||
typedef unsigned long long uint64_t;
 | 
					 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
#include <inttypes.h>
 | 
					#include <inttypes.h>
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
| 
						 | 
					@ -120,11 +118,11 @@ int _histEntryValue (void* id, int64_t value);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
#endif 
 | 
					#endif 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define DOPROF
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
#ifndef DOPROF
 | 
					#ifndef DOPROF
 | 
				
			||||||
#define _vprof(v)
 | 
					#define _vprof(v)
 | 
				
			||||||
 | 
					#define _nvprof(n,v)
 | 
				
			||||||
#define _hprof(h)
 | 
					#define _hprof(h)
 | 
				
			||||||
 | 
					#define _nhprof(n,h)
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#define _vprof(v,...) \
 | 
					#define _vprof(v,...) \
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue