Merge tracemonkey to mozilla-central.

This commit is contained in:
Robert Sayre 2010-03-26 15:53:14 -07:00
commit 66f3f0f7a3
115 changed files with 6707 additions and 2657 deletions

View file

@ -589,7 +589,7 @@ nsScriptSecurityManager::CheckObjectAccess(JSContext *cx, JSObject *obj,
// Do the same-origin check -- this sets a JS exception if the check fails.
// Pass the parent object's class name, as we have no class-info for it.
nsresult rv =
ssm->CheckPropertyAccess(cx, target, STOBJ_GET_CLASS(obj)->name, id,
ssm->CheckPropertyAccess(cx, target, obj->getClass()->name, id,
(mode & JSACC_WRITE) ?
(PRInt32)nsIXPCSecurityManager::ACCESS_SET_PROPERTY :
(PRInt32)nsIXPCSecurityManager::ACCESS_GET_PROPERTY);
@ -2388,7 +2388,7 @@ nsScriptSecurityManager::doGetObjectPrincipal(JSObject *aObj
JSObject* origObj = aObj;
#endif
const JSClass *jsClass = STOBJ_GET_CLASS(aObj);
const JSClass *jsClass = aObj->getClass();
// A common case seen in this code is that we enter this function
// with aObj being a Function object, whose parent is a Call
@ -2465,12 +2465,12 @@ nsScriptSecurityManager::doGetObjectPrincipal(JSObject *aObj
}
}
aObj = STOBJ_GET_PARENT(aObj);
aObj = aObj->getParent();
if (!aObj)
break;
jsClass = STOBJ_GET_CLASS(aObj);
jsClass = aObj->getClass();
} while (1);
NS_ASSERTION(!aAllowShortCircuit ||

View file

@ -42,8 +42,11 @@
#ifndef nsContentUtils_h___
#define nsContentUtils_h___
#include "jsprvtd.h"
#include "jsnum.h"
#include <math.h>
#if defined(XP_WIN) || defined(XP_OS2)
#include <float.h>
#endif
#include "nsAString.h"
#include "nsIStatefulFrame.h"
#include "nsINodeInfo.h"
@ -61,6 +64,8 @@
#include "nsReadableUtils.h"
#include "nsIPrefBranch2.h"
#include "jsapi.h"
struct nsNativeKeyEvent; // Don't include nsINativeKeyBindings.h here: it will force strange compilation error!
class nsIDOMScriptObjectFactory;
@ -1771,20 +1776,14 @@ private:
/*
* Check whether a floating point number is finite (not +/-infinity and not a
* NaN value). We wrap JSDOUBLE_IS_FINITE in a function because it expects to
* take the address of its argument, and because the argument must be of type
* jsdouble to have the right size and layout of bits.
*
* Note: we could try to exploit the fact that |infinity - infinity == NaN|
* instead of using JSDOUBLE_IS_FINITE. This would produce more compact code
* and perform better by avoiding type conversions and bit twiddling.
* Unfortunately, some architectures don't guarantee that |f == f| evaluates
* to true (where f is any *finite* floating point number). See
* https://bugzilla.mozilla.org/show_bug.cgi?id=369418#c63 . To play it safe
* for gecko 1.9, we just reuse JSDOUBLE_IS_FINITE.
* NaN value).
*/
inline NS_HIDDEN_(PRBool) NS_FloatIsFinite(jsdouble f) {
return JSDOUBLE_IS_FINITE(f);
#ifdef WIN32
return _finite(f);
#else
return finite(f);
#endif
}
/*

View file

@ -936,7 +936,7 @@ nsICanvasRenderingContextWebGL_Uniform1iv_tn(JSContext *cx, JSObject *obj, uint3
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_Uniform1iv,
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform1iv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, 0)))
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform1iv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_Uniform2iv_tn(JSContext *cx, JSObject *obj, uint32 location, JSObject *arg)
@ -945,7 +945,7 @@ nsICanvasRenderingContextWebGL_Uniform2iv_tn(JSContext *cx, JSObject *obj, uint3
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_Uniform2iv,
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform2iv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, 0)))
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform2iv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_Uniform3iv_tn(JSContext *cx, JSObject *obj, uint32 location, JSObject *arg)
@ -954,7 +954,7 @@ nsICanvasRenderingContextWebGL_Uniform3iv_tn(JSContext *cx, JSObject *obj, uint3
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_Uniform3iv,
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform3iv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, 0)))
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform3iv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_Uniform4iv_tn(JSContext *cx, JSObject *obj, uint32 location, JSObject *arg)
@ -963,7 +963,7 @@ nsICanvasRenderingContextWebGL_Uniform4iv_tn(JSContext *cx, JSObject *obj, uint3
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_Uniform4iv,
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform4iv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, 0)))
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform4iv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_Uniform1fv_tn(JSContext *cx, JSObject *obj, uint32 location, JSObject *arg)
@ -972,7 +972,7 @@ nsICanvasRenderingContextWebGL_Uniform1fv_tn(JSContext *cx, JSObject *obj, uint3
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_Uniform1fv,
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform1fv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, 0)))
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform1fv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_Uniform2fv_tn(JSContext *cx, JSObject *obj, uint32 location, JSObject *arg)
@ -981,7 +981,7 @@ nsICanvasRenderingContextWebGL_Uniform2fv_tn(JSContext *cx, JSObject *obj, uint3
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_Uniform2fv,
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform2fv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, 0)))
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform2fv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_Uniform3fv_tn(JSContext *cx, JSObject *obj, uint32 location, JSObject *arg)
@ -990,7 +990,7 @@ nsICanvasRenderingContextWebGL_Uniform3fv_tn(JSContext *cx, JSObject *obj, uint3
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_Uniform3fv,
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform3fv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, 0)))
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform3fv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_Uniform4fv_tn(JSContext *cx, JSObject *obj, uint32 location, JSObject *arg)
@ -999,7 +999,7 @@ nsICanvasRenderingContextWebGL_Uniform4fv_tn(JSContext *cx, JSObject *obj, uint3
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_Uniform4fv,
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform4fv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, 0)))
(4, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_Uniform4fv_tn, CONTEXT, THIS, UINT32, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_UniformMatrix2fv_tn(JSContext *cx, JSObject *obj, uint32 loc, JSBool transpose, JSObject *arg)
@ -1008,7 +1008,7 @@ nsICanvasRenderingContextWebGL_UniformMatrix2fv_tn(JSContext *cx, JSObject *obj,
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_UniformMatrix2fv,
(5, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_UniformMatrix2fv_tn, CONTEXT, THIS, UINT32, BOOL, OBJECT, 0, 0)))
(5, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_UniformMatrix2fv_tn, CONTEXT, THIS, UINT32, BOOL, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_UniformMatrix3fv_tn(JSContext *cx, JSObject *obj, uint32 loc, JSBool transpose, JSObject *arg)
@ -1017,7 +1017,7 @@ nsICanvasRenderingContextWebGL_UniformMatrix3fv_tn(JSContext *cx, JSObject *obj,
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_UniformMatrix3fv,
(5, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_UniformMatrix3fv_tn, CONTEXT, THIS, UINT32, BOOL, OBJECT, 0, 0)))
(5, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_UniformMatrix3fv_tn, CONTEXT, THIS, UINT32, BOOL, OBJECT, 0, nanojit::ACC_STORE_ANY)))
static jsval FASTCALL
nsICanvasRenderingContextWebGL_UniformMatrix4fv_tn(JSContext *cx, JSObject *obj, uint32 loc, JSBool transpose, JSObject *arg)
@ -1026,6 +1026,6 @@ nsICanvasRenderingContextWebGL_UniformMatrix4fv_tn(JSContext *cx, JSObject *obj,
}
JS_DEFINE_TRCINFO_1(nsICanvasRenderingContextWebGL_UniformMatrix4fv,
(5, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_UniformMatrix4fv_tn, CONTEXT, THIS, UINT32, BOOL, OBJECT, 0, 0)))
(5, (static, JSVAL_FAIL, nsICanvasRenderingContextWebGL_UniformMatrix4fv_tn, CONTEXT, THIS, UINT32, BOOL, OBJECT, 0, nanojit::ACC_STORE_ANY)))
#endif /* JS_TRACER */

View file

@ -41,6 +41,9 @@
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#if defined(XP_WIN) || defined(XP_OS2)
#include <float.h>
#endif
#include "prmem.h"
@ -87,7 +90,6 @@
#include "nsIDocShellTreeNode.h"
#include "nsIXPConnect.h"
#include "jsapi.h"
#include "jsnum.h"
#include "nsTArray.h"
@ -116,7 +118,17 @@ using namespace mozilla;
/* Float validation stuff */
#define VALIDATE(_f) if (!JSDOUBLE_IS_FINITE(_f)) return PR_FALSE
static inline bool
DoubleIsFinite(double d)
{
#ifdef WIN32
return _finite(d);
#else
return finite(d);
#endif
}
#define VALIDATE(_f) if (!DoubleIsFinite(_f)) return PR_FALSE
/* These must take doubles as args, because JSDOUBLE_IS_FINITE expects
* to take the address of its argument; we can't cast/convert in the

View file

@ -42,7 +42,6 @@
#include "jsprvtd.h" // we are using private JS typedefs...
#include "jscntxt.h"
#include "jsdbgapi.h"
#include "jsnum.h"
#include "nscore.h"
#include "nsDOMClassInfo.h"
@ -1483,10 +1482,10 @@ FindObjectClass(JSObject* aGlobalObject)
JSObject *obj, *proto = aGlobalObject;
do {
obj = proto;
proto = STOBJ_GET_PROTO(obj);
proto = obj->getProto();
} while (proto);
sObjectClass = STOBJ_GET_CLASS(obj);
sObjectClass = obj->getClass();
}
static void
@ -1567,6 +1566,13 @@ GetInternedJSVal(JSContext *cx, const char *str)
}
// static
nsISupports *
nsDOMClassInfo::GetNative(nsIXPConnectWrappedNative *wrapper, JSObject *obj)
{
return wrapper ? wrapper->Native() : static_cast<nsISupports*>(obj->getPrivate());
}
nsresult
nsDOMClassInfo::DefineStaticJSVals(JSContext *cx)
{
@ -3824,7 +3830,7 @@ nsDOMClassInfo::GetArrayIndexFromId(JSContext *cx, jsval id, PRBool *aIsNumber)
jsint i = -1;
if (!JSDOUBLE_IS_INT(array_index, i)) {
if (!::JS_DoubleIsInt32(array_index, &i)) {
return -1;
}
@ -6412,7 +6418,7 @@ nsWindowSH::NewResolve(nsIXPConnectWrappedNative *wrapper, JSContext *cx,
#ifdef DEBUG
if (!win->IsChromeWindow()) {
NS_ASSERTION(JSVAL_IS_OBJECT(v) &&
!strcmp(STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name,
!strcmp(JSVAL_TO_OBJECT(v)->getClass()->name,
"XPCCrossOriginWrapper"),
"Didn't wrap a window!");
}
@ -6525,7 +6531,7 @@ nsWindowSH::NewResolve(nsIXPConnectWrappedNative *wrapper, JSContext *cx,
#ifdef DEBUG
if (!win->IsChromeWindow()) {
NS_ASSERTION(JSVAL_IS_OBJECT(v) &&
!strcmp(STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v))->name,
!strcmp(JSVAL_TO_OBJECT(v)->getClass()->name,
"XPCCrossOriginWrapper"),
"Didn't wrap a location object!");
}
@ -6733,7 +6739,7 @@ nsWindowSH::NewResolve(nsIXPConnectWrappedNative *wrapper, JSContext *cx,
wrapper->GetJSObject(&realObj);
if (obj == realObj) {
JSObject *proto = STOBJ_GET_PROTO(obj);
JSObject *proto = obj->getProto();
if (proto) {
jsid interned_id;
JSObject *pobj = NULL;
@ -8476,8 +8482,8 @@ nsHTMLDocumentSH::DocumentAllGetProperty(JSContext *cx, JSObject *obj,
return JS_TRUE;
}
while (STOBJ_GET_CLASS(obj) != &sHTMLDocumentAllClass) {
obj = STOBJ_GET_PROTO(obj);
while (obj->getClass() != &sHTMLDocumentAllClass) {
obj = obj->getProto();
if (!obj) {
NS_ERROR("The JS engine lies!");

View file

@ -43,7 +43,6 @@
#include "nsIDOMClassInfo.h"
#include "nsIXPCScriptable.h"
#include "jsapi.h"
#include "jsobj.h"
#include "nsIScriptSecurityManager.h"
#include "nsIScriptContext.h"
#include "nsDOMJSUtils.h" // for GetScriptContextFromJSContext
@ -206,12 +205,7 @@ public:
static void PreserveNodeWrapper(nsIXPConnectWrappedNative *aWrapper);
static inline nsISupports *GetNative(nsIXPConnectWrappedNative *wrapper,
JSObject *obj)
{
return wrapper ? wrapper->Native() :
static_cast<nsISupports*>(obj->getPrivate());
}
static nsISupports *GetNative(nsIXPConnectWrappedNative *wrapper, JSObject *obj);
static nsIXPConnect *XPConnect()
{

View file

@ -40,7 +40,6 @@
#include "jsapi.h"
#include "jsdtoa.h"
#include "jsprvtd.h"
#include "jsnum.h"
#include "jsbool.h"
#include "jsarena.h"
#include "jscntxt.h"

View file

@ -39,7 +39,6 @@
#ifndef nsJSON_h__
#define nsJSON_h__
#include "jsprvtd.h"
#include "jsapi.h"
#include "nsIJSON.h"
#include "nsString.h"

View file

@ -44,7 +44,12 @@
#include "nsCRTGlue.h"
#include "prlog.h"
#include "prdtoa.h"
#include "jsnum.h"
#include <math.h>
#if defined(XP_WIN) || defined(XP_OS2)
#include <float.h>
#endif
#ifdef HAVE_SSIZE_T
#include <sys/types.h>
#endif
@ -254,6 +259,14 @@ static JSFunctionSpec sUInt64Functions[] = {
JS_FS_END
};
static inline bool FloatIsFinite(jsdouble f) {
#ifdef WIN32
return _finite(f);
#else
return finite(f);
#endif
}
JS_ALWAYS_INLINE void
ASSERT_OK(JSBool ok)
{
@ -1041,7 +1054,7 @@ jsvalToIntegerExplicit(JSContext* cx, jsval val, IntegerType* result)
if (JSVAL_IS_DOUBLE(val)) {
// Convert -Inf, Inf, and NaN to 0; otherwise, convert by C-style cast.
jsdouble d = *JSVAL_TO_DOUBLE(val);
*result = JSDOUBLE_IS_FINITE(d) ? IntegerType(d) : 0;
*result = FloatIsFinite(d) ? IntegerType(d) : 0;
return true;
}
if (!JSVAL_IS_PRIMITIVE(val)) {

View file

@ -197,7 +197,6 @@ INSTALLED_HEADERS = \
jslock.h \
jslong.h \
jsmath.h \
jsnum.h \
jsobj.h \
jsobjinlines.h \
json.h \

View file

@ -95,10 +95,6 @@ ifdef JS_NO_THIN_LOCKS
DEFINES += -DJS_USE_ONLY_NSPR_LOCKS
endif
ifdef JS_HAS_FILE_OBJECT
DEFINES += -DJS_HAS_FILE_OBJECT
endif
ifdef JS_GC_ZEAL
DEFINES += -DJS_GC_ZEAL
endif
@ -318,11 +314,6 @@ ifndef BUILD_OPT
# $(NULL)
endif
ifdef JS_HAS_FILE_OBJECT
JS_CPPFILES += jsfile.cpp
JS_HFILES += jsfile.h
endif
LIB_CPPFILES = $(JS_CPPFILES)
LIB_ASFILES := $(wildcard *_$(OS_ARCH).s)
PROG_CPPFILES = js.cpp

View file

@ -117,11 +117,11 @@ typedef struct _HISTORY {
/*
** Globals.
*/
int rl_eof;
int rl_erase;
int rl_intr;
int rl_kill;
int rl_quit;
unsigned rl_eof;
unsigned rl_erase;
unsigned rl_intr;
unsigned rl_kill;
unsigned rl_quit;
STATIC CHAR NIL[] = "";
STATIC CONST CHAR *Input = NIL;
@ -841,7 +841,7 @@ meta()
unsigned int c;
KEYMAP *kp;
if ((c = TTYget()) == EOF)
if ((int)(c = TTYget()) == EOF)
return CSeof;
#if defined(ANSI_ARROWS)
/* Also include VT-100 arrows. */
@ -857,7 +857,7 @@ meta()
#endif /* defined(ANSI_ARROWS) */
if (isdigit(c)) {
for (Repeat = c - '0'; (c = TTYget()) != EOF && isdigit(c); )
for (Repeat = c - '0'; (int)(c = TTYget()) != EOF && isdigit(c); )
Repeat = Repeat * 10 + c - '0';
Pushed = 1;
PushBack = c;
@ -902,7 +902,7 @@ TTYspecial(c)
if (ISMETA(c))
return CSdispatch;
if (c == rl_erase || c == DEL)
if (c == rl_erase || (int)c == DEL)
return bk_del_char();
if (c == rl_kill) {
if (Point != 0) {
@ -936,7 +936,7 @@ editinput()
Line[0] = '\0';
Signal = -1;
while ((c = TTYget()) != EOF)
while ((int)(c = TTYget()) != EOF)
switch (TTYspecial(c)) {
case CSdone:
return Line;
@ -966,7 +966,7 @@ editinput()
case CSstay:
break;
}
if (strlen(Line))
if (strlen((char *)Line))
return Line;
free(Line);
return NULL;
@ -1050,7 +1050,7 @@ add_history(p)
return;
#if defined(UNIQUE_HISTORY)
if (H.Size && strcmp(p, H.Lines[H.Size - 1]) == 0)
if (H.Size && strcmp(p, (char *)H.Lines[H.Size - 1]) == 0)
return;
#endif /* defined(UNIQUE_HISTORY) */
hist_add((CHAR *)p);
@ -1113,7 +1113,7 @@ quote()
{
unsigned int c;
return (c = TTYget()) == EOF ? CSeof : insert_char((int)c);
return (int)(c = TTYget()) == EOF ? CSeof : insert_char((int)c);
}
STATIC STATUS
@ -1147,9 +1147,9 @@ exchange()
unsigned int c;
if ((c = TTYget()) != CTL('X'))
return c == EOF ? CSeof : ring_bell();
return (int)c == EOF ? CSeof : ring_bell();
if ((c = Mark) <= End) {
if ((int)(c = Mark) <= End) {
Mark = Point;
Point = c;
return CSmove;
@ -1186,7 +1186,7 @@ move_to_char()
int i;
CHAR *p;
if ((c = TTYget()) == EOF)
if ((int)(c = TTYget()) == EOF)
return CSeof;
for (i = Point + 1, p = &Line[i]; i < End; i++, p++)
if (*p == c) {

View file

@ -108,11 +108,11 @@ typedef unsigned char CHAR;
/*
** Variables and routines internal to this package.
*/
extern int rl_eof;
extern int rl_erase;
extern int rl_intr;
extern int rl_kill;
extern int rl_quit;
extern unsigned rl_eof;
extern unsigned rl_erase;
extern unsigned rl_intr;
extern unsigned rl_kill;
extern unsigned rl_quit;
extern char *rl_complete();
extern int rl_list_possib();
extern void rl_ttyset();

View file

@ -182,7 +182,7 @@ const line_regexp_parts = [
"^(?:(\\w+):)?", // optional label at start of line
"\\s*(\\.?\\w+)", // optional spaces, (pseudo-)opcode
"(?:\\s+(\\w+|\\([^)]*\\)))?", // optional first immediate operand
"(?:\\s+([\\w-]+|\\([^)]*\\)))?", // optional second immediate operand
"(?:\\s+([\\w-,]+|\\([^)]*\\)))?", // optional second immediate operand
"(?:\\s*(?:#.*))?$" // optional spaces and comment
];
@ -216,14 +216,29 @@ function assemble(filename) {
throw new Error("missing .igroup name");
if (igroup)
throw new Error("nested .igroup " + imm1);
let oprange = imm2.match(/^(\w+)(?:-(\w+))?$/);
if (!oprange)
if (!imm2.match(/^\w+(?:-\w+)?(?:,\w+(?:-\w+)?)*$/))
throw new Error("invalid igroup operator range " + imm2);
let firstop = jsop2opcode[oprange[1]];
let ops = imm2.split(",").reduce(function(initial, current) {
let split = current.match(/^(\w+)(?:-(\w+))?$/);
let opcode = jsop2opcode[split[1]];
if (opcode in initial)
throw new Error("repeated opcode " + split[1]);
initial[opcode] = true;
if (split[2] !== undefined) {
let lastopcode = jsop2opcode[split[2]];
if (opcode >= lastopcode)
throw new Error("invalid opcode range: " + current);
while (++opcode <= lastopcode) {
if (opcode in initial)
throw new Error("repeated opcode " + split[2]);
initial[opcode] = true;
}
}
return initial;
}, {});
igroup = {
name: imm1,
firstop: firstop,
lastop: oprange[2] ? jsop2opcode[oprange[2]] : firstop,
ops: ops,
imacros: []
};
break;
@ -286,11 +301,8 @@ function assemble(filename) {
print("};");
let opcode = igroup.firstop;
let oplast = igroup.lastop;
do {
for (let opcode in igroup.ops)
opcode2extra[opcode] = maxdepth;
} while (opcode++ != oplast);
igroups.push(igroup);
igroup = null;
} else {

View file

@ -86,7 +86,8 @@
.end equality
.igroup binary JSOP_BITOR-JSOP_MOD
# A single range, split up like so to test groups over multiple ranges of ops
.igroup binary JSOP_BITOR-JSOP_BITAND,JSOP_EQ-JSOP_DIV,JSOP_MOD
.imacro any_obj # any obj
dup # any obj obj

View file

@ -443,6 +443,12 @@ JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp)
return !JSVAL_IS_NULL(tvr.value());
}
JS_PUBLIC_API(JSBool)
JS_DoubleIsInt32(jsdouble d, jsint *ip)
{
return JSDOUBLE_IS_INT(d, *ip);
}
JS_PUBLIC_API(JSBool)
JS_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip)
{
@ -502,7 +508,7 @@ JS_TypeOfValue(JSContext *cx, jsval v)
obj = JSVAL_TO_OBJECT(v);
if (obj)
return obj->map->ops->typeOf(cx, obj);
return JSTYPE_OBJECT;
type = JSTYPE_OBJECT;
} else if (JSVAL_IS_NUMBER(v)) {
type = JSTYPE_NUMBER;
} else if (JSVAL_IS_STRING(v)) {
@ -725,6 +731,12 @@ JS_DestroyRuntime(JSRuntime *rt)
js_free(rt);
}
#ifdef JS_REPRMETER
namespace reprmeter {
extern void js_DumpReprMeter();
}
#endif
JS_PUBLIC_API(void)
JS_ShutDown(void)
{
@ -734,10 +746,13 @@ JS_ShutDown(void)
#ifdef JS_OPMETER
extern void js_DumpOpMeters();
js_DumpOpMeters();
#endif
#ifdef JS_REPRMETER
reprmeter::js_DumpReprMeter();
#endif
js_FinishDtoa();
#ifdef JS_THREADSAFE
js_CleanupLocks();
@ -1139,9 +1154,9 @@ js_InitFunctionAndObjectClasses(JSContext *cx, JSObject *obj)
}
/* Function.prototype and the global object delegate to Object.prototype. */
OBJ_SET_PROTO(cx, fun_proto, obj_proto);
if (!OBJ_GET_PROTO(cx, obj))
OBJ_SET_PROTO(cx, obj, obj_proto);
fun_proto->setProto(obj_proto);
if (!obj->getProto())
obj->setProto(obj_proto);
out:
/* If resolving, remove the other entry (Object or Function) from table. */
@ -1188,9 +1203,6 @@ JS_InitStandardClasses(JSContext *cx, JSObject *obj)
js_InitStringClass(cx, obj) &&
js_InitEval(cx, obj) &&
js_InitTypedArrayClasses(cx, obj) &&
#if JS_HAS_SCRIPT_OBJECT
js_InitScriptClass(cx, obj) &&
#endif
#if JS_HAS_XML_SUPPORT
js_InitXMLClasses(cx, obj) &&
#endif
@ -1249,9 +1261,6 @@ static JSStdName standard_class_atoms[] = {
{js_InitStringClass, EAGER_ATOM_AND_CLASP(String)},
{js_InitExceptionClasses, EAGER_ATOM_AND_CLASP(Error)},
{js_InitRegExpClass, EAGER_ATOM_AND_CLASP(RegExp)},
#if JS_HAS_SCRIPT_OBJECT
{js_InitScriptClass, EAGER_ATOM_AND_CLASP(Script)},
#endif
#if JS_HAS_XML_SUPPORT
{js_InitXMLClass, EAGER_ATOM_AND_CLASP(XML)},
{js_InitNamespaceClass, EAGER_ATOM_AND_XCLASP(Namespace)},
@ -1409,7 +1418,7 @@ JS_ResolveStandardClass(JSContext *cx, JSObject *obj, jsval id,
}
}
if (!stdnm && !OBJ_GET_PROTO(cx, obj)) {
if (!stdnm && !obj->getProto()) {
/*
* Try even less frequently used names delegated from the global
* object to Object.prototype, but only if the Object class hasn't
@ -1474,7 +1483,8 @@ JS_EnumerateStandardClasses(JSContext *cx, JSObject *obj)
atom = rt->atomState.typeAtoms[JSTYPE_VOID];
if (!AlreadyHasOwnProperty(cx, obj, atom) &&
!obj->defineProperty(cx, ATOM_TO_JSID(atom), JSVAL_VOID,
JS_PropertyStub, JS_PropertyStub, JSPROP_PERMANENT)) {
JS_PropertyStub, JS_PropertyStub,
JSPROP_PERMANENT | JSPROP_READONLY)) {
return JS_FALSE;
}
@ -1656,9 +1666,7 @@ JS_GetScopeChain(JSContext *cx)
JS_PUBLIC_API(JSObject *)
JS_GetGlobalForObject(JSContext *cx, JSObject *obj)
{
JSObject *parent;
while ((parent = OBJ_GET_PARENT(cx, obj)) != NULL)
while (JSObject *parent = obj->getParent())
obj = parent;
return obj;
}
@ -2658,7 +2666,7 @@ JS_GetPrototype(JSContext *cx, JSObject *obj)
JSObject *proto;
CHECK_REQUEST(cx);
proto = OBJ_GET_PROTO(cx, obj);
proto = obj->getProto();
/* Beware ref to dead object (we may be called from obj's finalizer). */
return proto && proto->map ? proto : NULL;
@ -2674,9 +2682,7 @@ JS_SetPrototype(JSContext *cx, JSObject *obj, JSObject *proto)
JS_PUBLIC_API(JSObject *)
JS_GetParent(JSContext *cx, JSObject *obj)
{
JSObject *parent;
parent = OBJ_GET_PARENT(cx, obj);
JSObject *parent = obj->getParent();
/* Beware ref to dead object (we may be called from obj's finalizer). */
return parent && parent->map ? parent : NULL;
@ -2744,7 +2750,7 @@ JS_SealObject(JSContext *cx, JSObject *obj, JSBool deep)
uint32 nslots, i;
jsval v;
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_MakeArraySlow(cx, obj))
if (obj->isDenseArray() && !js_MakeArraySlow(cx, obj))
return JS_FALSE;
if (!OBJ_IS_NATIVE(obj)) {
@ -3029,7 +3035,7 @@ JS_AliasProperty(JSContext *cx, JSObject *obj, const char *name,
} else {
sprop = (JSScopeProperty *)prop;
ok = (js_AddNativeProperty(cx, obj, ATOM_TO_JSID(atom),
sprop->getter, sprop->setter, sprop->slot,
sprop->getter(), sprop->setter(), sprop->slot,
sprop->attrs, sprop->getFlags() | JSScopeProperty::ALIAS,
sprop->shortid)
!= NULL);
@ -3063,7 +3069,7 @@ LookupResult(JSContext *cx, JSObject *obj, JSObject *obj2, JSProperty *prop,
*vp = SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj2))
? LOCKED_OBJ_GET_SLOT(obj2, sprop->slot)
: JSVAL_TRUE;
} else if (OBJ_IS_DENSE_ARRAY(cx, obj2)) {
} else if (obj2->isDenseArray()) {
ok = js_GetDenseArrayElementValue(cx, obj2, prop, vp);
} else {
/* XXX bad API: no way to return "defined but value unknown" */
@ -3102,8 +3108,8 @@ GetPropertyAttributesById(JSContext *cx, JSObject *obj, jsid id, uintN flags,
if (OBJ_IS_NATIVE(obj2)) {
JSScopeProperty *sprop = (JSScopeProperty *) prop;
desc->getter = sprop->getter;
desc->setter = sprop->setter;
desc->getter = sprop->getter();
desc->setter = sprop->setter();
desc->value = SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj2))
? LOCKED_OBJ_GET_SLOT(obj2, sprop->slot)
: JSVAL_VOID;
@ -3643,7 +3649,7 @@ JS_NewArrayObject(JSContext *cx, jsint length, jsval *vector)
JS_PUBLIC_API(JSBool)
JS_IsArrayObject(JSContext *cx, JSObject *obj)
{
return OBJ_IS_ARRAY(cx, js_GetWrappedObject(cx, obj));
return js_GetWrappedObject(cx, obj)->isArray();
}
JS_PUBLIC_API(JSBool)
@ -3702,7 +3708,7 @@ JS_AliasElement(JSContext *cx, JSObject *obj, const char *name, jsint alias)
}
sprop = (JSScopeProperty *)prop;
ok = (js_AddNativeProperty(cx, obj, INT_TO_JSID(alias),
sprop->getter, sprop->setter, sprop->slot,
sprop->getter(), sprop->setter(), sprop->slot,
sprop->attrs, sprop->getFlags() | JSScopeProperty::ALIAS,
sprop->shortid)
!= NULL);
@ -3966,7 +3972,7 @@ JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp)
i = JSVAL_TO_INT(iterobj->fslots[JSSLOT_ITER_INDEX]);
if (i < 0) {
/* Native case: private data is a property tree node pointer. */
obj = OBJ_GET_PARENT(cx, iterobj);
obj = iterobj->getParent();
JS_ASSERT(OBJ_IS_NATIVE(obj));
scope = OBJ_SCOPE(obj);
sprop = (JSScopeProperty *) iterobj->getPrivate();
@ -4154,7 +4160,7 @@ JS_CloneFunctionObject(JSContext *cx, JSObject *funobj, JSObject *parent)
JSMSG_BAD_CLONE_FUNOBJ_SCOPE);
goto break2;
}
obj = OBJ_GET_PARENT(cx, obj);
obj = obj->getParent();
}
JSAtom *atom = JS_LOCAL_NAME_TO_ATOM(names[i]);
@ -4735,7 +4741,7 @@ JS_CompileUCFunctionForPrincipals(JSContext *cx, JSObject *obj,
JSObject *pobj = obj;
uintN depth = 1;
while ((pobj = OBJ_GET_PARENT(cx, pobj)) != NULL)
while ((pobj = pobj->getParent()) != NULL)
++depth;
JS_BASIC_STATS_ACCUM(&cx->runtime->hostenvScopeDepthStats, depth);
}
@ -5582,7 +5588,8 @@ JS_ClearPendingException(JSContext *cx)
JS_PUBLIC_API(JSBool)
JS_ReportPendingException(JSContext *cx)
{
JSBool save, ok;
JSBool ok;
JSPackedBool save;
CHECK_REQUEST(cx);

View file

@ -467,6 +467,9 @@ JS_ValueToSource(JSContext *cx, jsval v);
extern JS_PUBLIC_API(JSBool)
JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp);
extern JS_PUBLIC_API(JSBool)
JS_DoubleIsInt32(jsdouble d, jsint *ip);
/*
* Convert a value to a number, then to an int32, according to the ECMA rules
* for ToInt32.

View file

@ -226,7 +226,7 @@ ValueIsLength(JSContext *cx, jsval* vp)
JSBool
js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp)
{
if (OBJ_IS_ARRAY(cx, obj)) {
if (obj->isArray()) {
*lengthp = obj->fslots[JSSLOT_ARRAY_LENGTH];
return JS_TRUE;
}
@ -444,7 +444,7 @@ GetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, JSBool *hole,
jsval *vp)
{
JS_ASSERT(index >= 0);
if (OBJ_IS_DENSE_ARRAY(cx, obj) && index < js_DenseArrayCapacity(obj) &&
if (obj->isDenseArray() && index < js_DenseArrayCapacity(obj) &&
(*vp = obj->dslots[jsuint(index)]) != JSVAL_HOLE) {
*hole = JS_FALSE;
return JS_TRUE;
@ -484,7 +484,7 @@ SetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, jsval v)
{
JS_ASSERT(index >= 0);
if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
if (obj->isDenseArray()) {
/* Predicted/prefetched code should favor the remains-dense case. */
if (index <= jsuint(-1)) {
jsuint idx = jsuint(index);
@ -518,7 +518,7 @@ static JSBool
DeleteArrayElement(JSContext *cx, JSObject *obj, jsdouble index)
{
JS_ASSERT(index >= 0);
if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
if (obj->isDenseArray()) {
if (index <= jsuint(-1)) {
jsuint idx = jsuint(index);
if (!INDEX_TOO_SPARSE(obj, idx) && idx < js_DenseArrayCapacity(obj)) {
@ -615,9 +615,9 @@ static JSBool
array_length_getter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
{
do {
if (OBJ_IS_ARRAY(cx, obj))
if (obj->isArray())
return IndexToValue(cx, obj->fslots[JSSLOT_ARRAY_LENGTH], vp);
} while ((obj = OBJ_GET_PROTO(cx, obj)) != NULL);
} while ((obj = obj->getProto()) != NULL);
return JS_TRUE;
}
@ -630,7 +630,7 @@ array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
JSTempValueRooter tvr;
JSBool ok;
if (!OBJ_IS_ARRAY(cx, obj)) {
if (!obj->isArray()) {
jsid lengthId = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
return obj->defineProperty(cx, lengthId, *vp, NULL, NULL, JSPROP_ENUMERATE);
@ -652,7 +652,7 @@ array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
return JS_TRUE;
}
if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
if (obj->isDenseArray()) {
/* Don't reallocate if we're not actually shrinking our slots. */
jsuint capacity = js_DenseArrayCapacity(obj);
if (capacity > newlen && !ResizeSlots(cx, obj, capacity, newlen))
@ -709,7 +709,7 @@ array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
static inline bool
IsDenseArrayId(JSContext *cx, JSObject *obj, jsid id)
{
JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, obj));
JS_ASSERT(obj->isDenseArray());
uint32 i;
return id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom) ||
@ -723,7 +723,7 @@ static JSBool
array_lookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
JSProperty **propp)
{
if (!OBJ_IS_DENSE_ARRAY(cx, obj))
if (!obj->isDenseArray())
return js_LookupProperty(cx, obj, id, objp, propp);
if (IsDenseArrayId(cx, obj, id)) {
@ -732,7 +732,7 @@ array_lookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
return JS_TRUE;
}
JSObject *proto = STOBJ_GET_PROTO(obj);
JSObject *proto = obj->getProto();
if (!proto) {
*objp = NULL;
*propp = NULL;
@ -776,7 +776,7 @@ array_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
return JS_TRUE;
}
if (!OBJ_IS_DENSE_ARRAY(cx, obj))
if (!obj->isDenseArray())
return js_GetProperty(cx, obj, id, vp);
if (!js_IdIsIndex(ID_TO_VALUE(id), &i) || i >= js_DenseArrayCapacity(obj) ||
@ -785,7 +785,7 @@ array_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
JSProperty *prop;
JSScopeProperty *sprop;
JSObject *proto = STOBJ_GET_PROTO(obj);
JSObject *proto = obj->getProto();
if (!proto) {
*vp = JSVAL_VOID;
return JS_TRUE;
@ -862,7 +862,7 @@ array_setProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
if (id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom))
return array_length_setter(cx, obj, id, vp);
if (!OBJ_IS_DENSE_ARRAY(cx, obj))
if (!obj->isDenseArray())
return js_SetProperty(cx, obj, id, vp);
if (!js_IdIsIndex(id, &i) || INDEX_TOO_SPARSE(obj, i)) {
@ -947,15 +947,16 @@ dense_grow(JSContext* cx, JSObject* obj, jsint i, jsval v)
JSBool FASTCALL
js_Array_dense_setelem(JSContext* cx, JSObject* obj, jsint i, jsval v)
{
JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, obj));
JS_ASSERT(obj->isDenseArray());
return dense_grow(cx, obj, i, v);
}
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem, CONTEXT, OBJECT, INT32, JSVAL, 0, 0)
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem, CONTEXT, OBJECT, INT32, JSVAL, 0,
nanojit::ACC_STORE_ANY)
JSBool FASTCALL
js_Array_dense_setelem_int(JSContext* cx, JSObject* obj, jsint i, int32 j)
{
JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, obj));
JS_ASSERT(obj->isDenseArray());
jsval v;
if (JS_LIKELY(INT_FITS_IN_JSVAL(j))) {
@ -968,12 +969,13 @@ js_Array_dense_setelem_int(JSContext* cx, JSObject* obj, jsint i, int32 j)
return dense_grow(cx, obj, i, v);
}
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_int, CONTEXT, OBJECT, INT32, INT32, 0, 0)
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_int, CONTEXT, OBJECT, INT32, INT32, 0,
nanojit::ACC_STORE_ANY)
JSBool FASTCALL
js_Array_dense_setelem_double(JSContext* cx, JSObject* obj, jsint i, jsdouble d)
{
JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, obj));
JS_ASSERT(obj->isDenseArray());
jsval v;
jsint j;
@ -987,7 +989,8 @@ js_Array_dense_setelem_double(JSContext* cx, JSObject* obj, jsint i, jsdouble d)
return dense_grow(cx, obj, i, v);
}
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_double, CONTEXT, OBJECT, INT32, DOUBLE, 0, 0)
JS_DEFINE_CALLINFO_4(extern, BOOL, js_Array_dense_setelem_double, CONTEXT, OBJECT, INT32, DOUBLE,
0, nanojit::ACC_STORE_ANY)
#endif
static JSBool
@ -1001,7 +1004,7 @@ array_defineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
return JS_TRUE;
isIndex = js_IdIsIndex(ID_TO_VALUE(id), &i);
if (!isIndex || attrs != JSPROP_ENUMERATE || !OBJ_IS_DENSE_ARRAY(cx, obj) || INDEX_TOO_SPARSE(obj, i)) {
if (!isIndex || attrs != JSPROP_ENUMERATE || !obj->isDenseArray() || INDEX_TOO_SPARSE(obj, i)) {
if (!ENSURE_SLOW_ARRAY(cx, obj))
return JS_FALSE;
return js_DefineProperty(cx, obj, id, value, getter, setter, attrs);
@ -1033,7 +1036,7 @@ array_deleteProperty(JSContext *cx, JSObject *obj, jsval id, jsval *rval)
{
uint32 i;
if (!OBJ_IS_DENSE_ARRAY(cx, obj))
if (!obj->isDenseArray())
return js_DeleteProperty(cx, obj, id, rval);
if (id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom)) {
@ -1121,7 +1124,7 @@ array_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
switch (enum_op) {
case JSENUMERATE_INIT:
JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, obj));
JS_ASSERT(obj->isDenseArray());
capacity = js_DenseArrayCapacity(obj);
if (idp)
*idp = INT_TO_JSVAL(obj->fslots[JSSLOT_ARRAY_COUNT]);
@ -1643,7 +1646,7 @@ InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, jsva
* Optimize for dense arrays so long as adding the given set of elements
* wouldn't otherwise make the array slow.
*/
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_PrototypeHasIndexedProperties(cx, obj) &&
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
start <= MAXINDEX - count && !INDEX_TOO_BIG(start + count)) {
#ifdef DEBUG_jwalden
@ -1710,7 +1713,7 @@ InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, jsva
return JS_TRUE;
/* Finish out any remaining elements past the max array index. */
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !ENSURE_SLOW_ARRAY(cx, obj))
if (obj->isDenseArray() && !ENSURE_SLOW_ARRAY(cx, obj))
return JS_FALSE;
JS_ASSERT(start == MAXINDEX);
@ -1737,7 +1740,7 @@ static JSBool
InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, jsval *vector,
JSBool holey = JS_FALSE)
{
JS_ASSERT(OBJ_IS_ARRAY(cx, obj));
JS_ASSERT(obj->isArray());
obj->fslots[JSSLOT_ARRAY_LENGTH] = length;
@ -1816,7 +1819,7 @@ array_reverse(JSContext *cx, uintN argc, jsval *vp)
return JS_FALSE;
*vp = OBJECT_TO_JSVAL(obj);
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_PrototypeHasIndexedProperties(cx, obj)) {
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj)) {
/* An empty array or an array with no elements is already reversed. */
if (len == 0 || !obj->dslots)
return JS_TRUE;
@ -2407,7 +2410,7 @@ array_push1_dense(JSContext* cx, JSObject* obj, jsval v, jsval *rval)
JSBool JS_FASTCALL
js_ArrayCompPush(JSContext *cx, JSObject *obj, jsval v)
{
JS_ASSERT(OBJ_IS_DENSE_ARRAY(cx, obj));
JS_ASSERT(obj->isDenseArray());
uint32_t length = (uint32_t) obj->fslots[JSSLOT_ARRAY_LENGTH];
JS_ASSERT(length <= js_DenseArrayCapacity(obj));
@ -2426,14 +2429,15 @@ js_ArrayCompPush(JSContext *cx, JSObject *obj, jsval v)
obj->dslots[length] = v;
return JS_TRUE;
}
JS_DEFINE_CALLINFO_3(extern, BOOL, js_ArrayCompPush, CONTEXT, OBJECT, JSVAL, 0, 0)
JS_DEFINE_CALLINFO_3(extern, BOOL, js_ArrayCompPush, CONTEXT, OBJECT, JSVAL, 0,
nanojit::ACC_STORE_ANY)
#ifdef JS_TRACER
static jsval FASTCALL
Array_p_push1(JSContext* cx, JSObject* obj, jsval v)
{
JSAutoTempValueRooter tvr(cx, v);
if (OBJ_IS_DENSE_ARRAY(cx, obj)
if (obj->isDenseArray()
? array_push1_dense(cx, obj, v, tvr.addr())
: array_push_slowly(cx, obj, 1, tvr.addr(), tvr.addr())) {
return tvr.value();
@ -2452,7 +2456,7 @@ array_push(JSContext *cx, uintN argc, jsval *vp)
obj = JS_THIS_OBJECT(cx, vp);
if (!obj)
return JS_FALSE;
if (argc != 1 || !OBJ_IS_DENSE_ARRAY(cx, obj))
if (argc != 1 || !obj->isDenseArray())
return array_push_slowly(cx, obj, argc, vp + 2, vp);
return array_push1_dense(cx, obj, vp[2], vp);
@ -2505,7 +2509,7 @@ static jsval FASTCALL
Array_p_pop(JSContext* cx, JSObject* obj)
{
JSAutoTempValueRooter tvr(cx);
if (OBJ_IS_DENSE_ARRAY(cx, obj)
if (obj->isDenseArray()
? array_pop_dense(cx, obj, tvr.addr())
: array_pop_slowly(cx, obj, tvr.addr())) {
return tvr.value();
@ -2523,7 +2527,7 @@ array_pop(JSContext *cx, uintN argc, jsval *vp)
obj = JS_THIS_OBJECT(cx, vp);
if (!obj)
return JS_FALSE;
if (OBJ_IS_DENSE_ARRAY(cx, obj))
if (obj->isDenseArray())
return array_pop_dense(cx, obj, vp);
return array_pop_slowly(cx, obj, vp);
}
@ -2543,7 +2547,7 @@ array_shift(JSContext *cx, uintN argc, jsval *vp)
} else {
length--;
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_PrototypeHasIndexedProperties(cx, obj) &&
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
length < js_DenseArrayCapacity(obj)) {
if (JS_LIKELY(obj->dslots != NULL)) {
*vp = obj->dslots[0];
@ -2602,7 +2606,7 @@ array_unshift(JSContext *cx, uintN argc, jsval *vp)
/* Slide up the array to make room for argc at the bottom. */
argv = JS_ARGV(cx, vp);
if (length > 0) {
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_PrototypeHasIndexedProperties(cx, obj) &&
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
!INDEX_TOO_SPARSE(obj, unsigned(newlen + argc))) {
JS_ASSERT(newlen + argc == length + argc);
if (!EnsureCapacity(cx, obj, length + argc))
@ -2707,7 +2711,7 @@ array_splice(JSContext *cx, uintN argc, jsval *vp)
/* If there are elements to remove, put them into the return value. */
if (count > 0) {
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_PrototypeHasIndexedProperties(cx, obj) &&
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
!js_PrototypeHasIndexedProperties(cx, obj2) &&
end <= js_DenseArrayCapacity(obj)) {
if (!InitArrayObject(cx, obj2, count, obj->dslots + begin,
@ -2736,7 +2740,7 @@ array_splice(JSContext *cx, uintN argc, jsval *vp)
if (argc > count) {
delta = (jsuint)argc - count;
last = length;
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_PrototypeHasIndexedProperties(cx, obj) &&
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
length <= js_DenseArrayCapacity(obj) &&
(length == 0 || obj->dslots[length - 1] != JSVAL_HOLE)) {
if (!EnsureCapacity(cx, obj, length + delta))
@ -2763,7 +2767,7 @@ array_splice(JSContext *cx, uintN argc, jsval *vp)
length += delta;
} else if (argc < count) {
delta = count - (jsuint)argc;
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_PrototypeHasIndexedProperties(cx, obj) &&
if (obj->isDenseArray() && !js_PrototypeHasIndexedProperties(cx, obj) &&
length <= js_DenseArrayCapacity(obj)) {
/* (uint) end could be 0, so we can't use a vanilla >= test. */
for (last = end; last < length; last++) {
@ -2812,7 +2816,7 @@ array_concat(JSContext *cx, uintN argc, jsval *vp)
/* Create a new Array object and root it using *vp. */
aobj = JS_THIS_OBJECT(cx, vp);
if (OBJ_IS_DENSE_ARRAY(cx, aobj)) {
if (aobj->isDenseArray()) {
/*
* Clone aobj but pass the minimum of its length and capacity, to
* handle a = [1,2,3]; a.length = 10000 "dense" cases efficiently. In
@ -2854,7 +2858,7 @@ array_concat(JSContext *cx, uintN argc, jsval *vp)
aobj = JSVAL_TO_OBJECT(v);
wobj = js_GetWrappedObject(cx, aobj);
if (OBJ_IS_ARRAY(cx, wobj)) {
if (wobj->isArray()) {
jsid id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom);
if (!aobj->getProperty(cx, id, tvr.addr()))
return false;
@ -2939,7 +2943,7 @@ array_slice(JSContext *cx, uintN argc, jsval *vp)
if (begin > end)
begin = end;
if (OBJ_IS_DENSE_ARRAY(cx, obj) && end <= js_DenseArrayCapacity(obj) &&
if (obj->isDenseArray() && end <= js_DenseArrayCapacity(obj) &&
!js_PrototypeHasIndexedProperties(cx, obj)) {
nobj = js_NewArrayObject(cx, end - begin, obj->dslots + begin,
obj->fslots[JSSLOT_ARRAY_COUNT] !=
@ -3298,7 +3302,7 @@ array_isArray(JSContext *cx, uintN argc, jsval *vp)
{
*vp = BOOLEAN_TO_JSVAL(argc > 0 &&
!JSVAL_IS_PRIMITIVE(vp[2]) &&
OBJ_IS_ARRAY(cx, js_GetWrappedObject(cx, JSVAL_TO_OBJECT(vp[2]))));
js_GetWrappedObject(cx, JSVAL_TO_OBJECT(vp[2]))->isArray());
return JS_TRUE;
}
@ -3309,13 +3313,13 @@ static JSPropertySpec array_props[] = {
};
JS_DEFINE_TRCINFO_1(array_toString,
(2, (static, STRING_FAIL, Array_p_toString, CONTEXT, THIS, 0, 0)))
(2, (static, STRING_FAIL, Array_p_toString, CONTEXT, THIS, 0, nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(array_join,
(3, (static, STRING_FAIL, Array_p_join, CONTEXT, THIS, STRING, 0, 0)))
(3, (static, STRING_FAIL, Array_p_join, CONTEXT, THIS, STRING, 0, nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(array_push,
(3, (static, JSVAL_FAIL, Array_p_push1, CONTEXT, THIS, JSVAL, 0, 0)))
(3, (static, JSVAL_FAIL, Array_p_push1, CONTEXT, THIS, JSVAL, 0, nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(array_pop,
(2, (static, JSVAL_FAIL, Array_p_pop, CONTEXT, THIS, 0, 0)))
(2, (static, JSVAL_FAIL, Array_p_pop, CONTEXT, THIS, 0, nanojit::ACC_STORE_ANY)))
static JSFunctionSpec array_methods[] = {
#if JS_HAS_TOSOURCE
@ -3396,7 +3400,7 @@ JS_STATIC_ASSERT(JSSLOT_ARRAY_LENGTH + 1 == JSSLOT_ARRAY_COUNT);
JSObject* JS_FASTCALL
js_NewEmptyArray(JSContext* cx, JSObject* proto)
{
JS_ASSERT(OBJ_IS_ARRAY(cx, proto));
JS_ASSERT(proto->isArray());
JSObject* obj = js_NewGCObject(cx);
if (!obj)
@ -3416,7 +3420,7 @@ js_NewEmptyArray(JSContext* cx, JSObject* proto)
return obj;
}
#ifdef JS_TRACER
JS_DEFINE_CALLINFO_2(extern, OBJECT, js_NewEmptyArray, CONTEXT, OBJECT, 0, 0)
JS_DEFINE_CALLINFO_2(extern, OBJECT, js_NewEmptyArray, CONTEXT, OBJECT, 0, nanojit::ACC_STORE_ANY)
#endif
JSObject* JS_FASTCALL
@ -3431,7 +3435,8 @@ js_NewEmptyArrayWithLength(JSContext* cx, JSObject* proto, int32 len)
return obj;
}
#ifdef JS_TRACER
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_NewEmptyArrayWithLength, CONTEXT, OBJECT, INT32, 0, 0)
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_NewEmptyArrayWithLength, CONTEXT, OBJECT, INT32, 0,
nanojit::ACC_STORE_ANY)
#endif
JSObject* JS_FASTCALL
@ -3446,7 +3451,8 @@ js_NewArrayWithSlots(JSContext* cx, JSObject* proto, uint32 len)
return obj;
}
#ifdef JS_TRACER
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_NewArrayWithSlots, CONTEXT, OBJECT, UINT32, 0, 0)
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_NewArrayWithSlots, CONTEXT, OBJECT, UINT32, 0,
nanojit::ACC_STORE_ANY)
#endif
JSObject *
@ -3511,15 +3517,15 @@ js_ArrayInfo(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
if (!bytes)
return JS_FALSE;
if (JSVAL_IS_PRIMITIVE(argv[i]) ||
!OBJ_IS_ARRAY(cx, (array = JSVAL_TO_OBJECT(argv[i])))) {
!(array = JSVAL_TO_OBJECT(argv[i]))->isArray()) {
fprintf(stderr, "%s: not array\n", bytes);
cx->free(bytes);
continue;
}
fprintf(stderr, "%s: %s (len %lu", bytes,
OBJ_IS_DENSE_ARRAY(cx, array) ? "dense" : "sparse",
array->isDenseArray()) ? "dense" : "sparse",
array->fslots[JSSLOT_ARRAY_LENGTH]);
if (OBJ_IS_DENSE_ARRAY(cx, array)) {
if (array->isDenseArray()) {
fprintf(stderr, ", count %lu, capacity %lu",
array->fslots[JSSLOT_ARRAY_COUNT],
js_DenseArrayCapacity(array));

View file

@ -67,9 +67,6 @@ JSObject::isArray() const
return isDenseArray() || getClass() == &js_SlowArrayClass;
}
#define OBJ_IS_DENSE_ARRAY(cx,obj) (obj)->isDenseArray()
#define OBJ_IS_ARRAY(cx,obj) (obj)->isArray()
/*
* Dense arrays are not native (OBJ_IS_NATIVE(cx, aobj) for a dense array aobj
* results in false, meaning aobj->map does not point to a JSScope).
@ -90,9 +87,9 @@ JSObject::isArray() const
* (obj) for the |this| value of a getter, setter, or method call (bug 476447).
*/
static JS_INLINE JSObject *
js_GetProtoIfDenseArray(JSContext *cx, JSObject *obj)
js_GetProtoIfDenseArray(JSObject *obj)
{
return OBJ_IS_DENSE_ARRAY(cx, obj) ? OBJ_GET_PROTO(cx, obj) : obj;
return obj->isDenseArray() ? obj->getProto() : obj;
}
extern JSObject *

View file

@ -90,7 +90,7 @@ js_dmod(jsdouble a, jsdouble b)
}
return js_fmod(a, b);
}
JS_DEFINE_CALLINFO_2(extern, DOUBLE, js_dmod, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(extern, DOUBLE, js_dmod, DOUBLE, DOUBLE, 1, ACC_NONE)
int32 FASTCALL
js_imod(int32 a, int32 b)
@ -100,7 +100,7 @@ js_imod(int32 a, int32 b)
int r = a % b;
return r;
}
JS_DEFINE_CALLINFO_2(extern, INT32, js_imod, INT32, INT32, 1, 1)
JS_DEFINE_CALLINFO_2(extern, INT32, js_imod, INT32, INT32, 1, ACC_NONE)
/* The following boxing/unboxing primitives we can't emit inline because
they either interact with the GC and depend on Spidermonkey's 32-bit
@ -118,7 +118,7 @@ js_BoxDouble(JSContext* cx, jsdouble d)
return JSVAL_ERROR_COOKIE;
return v;
}
JS_DEFINE_CALLINFO_2(extern, JSVAL, js_BoxDouble, CONTEXT, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(extern, JSVAL, js_BoxDouble, CONTEXT, DOUBLE, 1, ACC_NONE)
jsval FASTCALL
js_BoxInt32(JSContext* cx, int32 i)
@ -132,7 +132,7 @@ js_BoxInt32(JSContext* cx, int32 i)
return JSVAL_ERROR_COOKIE;
return v;
}
JS_DEFINE_CALLINFO_2(extern, JSVAL, js_BoxInt32, CONTEXT, INT32, 1, 1)
JS_DEFINE_CALLINFO_2(extern, JSVAL, js_BoxInt32, CONTEXT, INT32, 1, ACC_NONE)
jsdouble FASTCALL
js_UnboxDouble(jsval v)
@ -141,7 +141,7 @@ js_UnboxDouble(jsval v)
return (jsdouble)JSVAL_TO_INT(v);
return *JSVAL_TO_DOUBLE(v);
}
JS_DEFINE_CALLINFO_1(extern, DOUBLE, js_UnboxDouble, JSVAL, 1, 1)
JS_DEFINE_CALLINFO_1(extern, DOUBLE, js_UnboxDouble, JSVAL, 1, ACC_NONE)
int32 FASTCALL
js_UnboxInt32(jsval v)
@ -150,7 +150,7 @@ js_UnboxInt32(jsval v)
return JSVAL_TO_INT(v);
return js_DoubleToECMAInt32(*JSVAL_TO_DOUBLE(v));
}
JS_DEFINE_CALLINFO_1(extern, INT32, js_UnboxInt32, JSVAL, 1, 1)
JS_DEFINE_CALLINFO_1(extern, INT32, js_UnboxInt32, JSVAL, 1, ACC_NONE)
JSBool FASTCALL
js_TryUnboxInt32(jsval v, int32* i32p)
@ -168,66 +168,35 @@ js_TryUnboxInt32(jsval v, int32* i32p)
*i32p = i;
return JS_TRUE;
}
JS_DEFINE_CALLINFO_2(extern, BOOL, js_TryUnboxInt32, JSVAL, INT32PTR, 1, 1)
JS_DEFINE_CALLINFO_2(extern, BOOL, js_TryUnboxInt32, JSVAL, INT32PTR, 1, ACC_NONE)
int32 FASTCALL
js_DoubleToInt32(jsdouble d)
{
return js_DoubleToECMAInt32(d);
}
JS_DEFINE_CALLINFO_1(extern, INT32, js_DoubleToInt32, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_1(extern, INT32, js_DoubleToInt32, DOUBLE, 1, ACC_NONE)
uint32 FASTCALL
js_DoubleToUint32(jsdouble d)
{
return js_DoubleToECMAUint32(d);
}
JS_DEFINE_CALLINFO_1(extern, UINT32, js_DoubleToUint32, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_1(extern, UINT32, js_DoubleToUint32, DOUBLE, 1, ACC_NONE)
jsdouble FASTCALL
js_StringToNumber(JSContext* cx, JSString* str)
{
const jschar* bp;
const jschar* end;
const jschar* ep;
jsdouble d;
str->getCharsAndEnd(bp, end);
if ((!js_strtod(cx, bp, end, &ep, &d) ||
js_SkipWhiteSpace(ep, end) != end) &&
(!js_strtointeger(cx, bp, end, &ep, 0, &d) ||
js_SkipWhiteSpace(ep, end) != end)) {
return js_NaN;
}
return d;
return StringToNumberType<jsdouble>(cx, str);
}
JS_DEFINE_CALLINFO_2(extern, DOUBLE, js_StringToNumber, CONTEXT, STRING, 1, 1)
JS_DEFINE_CALLINFO_2(extern, DOUBLE, js_StringToNumber, CONTEXT, STRING, 1, ACC_NONE)
int32 FASTCALL
js_StringToInt32(JSContext* cx, JSString* str)
{
const jschar* bp;
const jschar* end;
const jschar* ep;
jsdouble d;
if (str->length() == 1) {
jschar c = str->chars()[0];
if ('0' <= c && c <= '9')
return c - '0';
return 0;
}
str->getCharsAndEnd(bp, end);
if ((!js_strtod(cx, bp, end, &ep, &d) ||
js_SkipWhiteSpace(ep, end) != end) &&
(!js_strtointeger(cx, bp, end, &ep, 0, &d) ||
js_SkipWhiteSpace(ep, end) != end)) {
return 0;
}
return js_DoubleToECMAInt32(d);
return StringToNumberType<int32>(cx, str);
}
JS_DEFINE_CALLINFO_2(extern, INT32, js_StringToInt32, CONTEXT, STRING, 1, 1)
JS_DEFINE_CALLINFO_2(extern, INT32, js_StringToInt32, CONTEXT, STRING, 1, ACC_NONE)
JSBool FASTCALL
js_AddProperty(JSContext* cx, JSObject* obj, JSScopeProperty* sprop)
@ -265,8 +234,9 @@ js_AddProperty(JSContext* cx, JSObject* obj, JSScopeProperty* sprop)
scope->extend(cx, sprop);
} else {
JSScopeProperty *sprop2 =
scope->addProperty(cx, sprop->id, sprop->getter, sprop->setter, SPROP_INVALID_SLOT,
sprop->attrs, sprop->getFlags(), sprop->shortid);
scope->addProperty(cx, sprop->id, sprop->getter(), sprop->setter(),
SPROP_INVALID_SLOT, sprop->attrs, sprop->getFlags(),
sprop->shortid);
if (sprop2 != sprop)
goto exit_trace;
}
@ -281,13 +251,13 @@ js_AddProperty(JSContext* cx, JSObject* obj, JSScopeProperty* sprop)
JS_UNLOCK_SCOPE(cx, scope);
return JS_FALSE;
}
JS_DEFINE_CALLINFO_3(extern, BOOL, js_AddProperty, CONTEXT, OBJECT, SCOPEPROP, 0, 0)
JS_DEFINE_CALLINFO_3(extern, BOOL, js_AddProperty, CONTEXT, OBJECT, SCOPEPROP, 0, ACC_STORE_ANY)
static JSBool
HasProperty(JSContext* cx, JSObject* obj, jsid id)
{
// Check that we know how the lookup op will behave.
for (JSObject* pobj = obj; pobj; pobj = OBJ_GET_PROTO(cx, pobj)) {
for (JSObject* pobj = obj; pobj; pobj = pobj->getProto()) {
if (pobj->map->ops->lookupProperty != js_LookupProperty)
return JSVAL_TO_SPECIAL(JSVAL_VOID);
JSClass* clasp = OBJ_GET_CLASS(cx, pobj);
@ -313,7 +283,7 @@ js_HasNamedProperty(JSContext* cx, JSObject* obj, JSString* idstr)
return HasProperty(cx, obj, id);
}
JS_DEFINE_CALLINFO_3(extern, BOOL, js_HasNamedProperty, CONTEXT, OBJECT, STRING, 0, 0)
JS_DEFINE_CALLINFO_3(extern, BOOL, js_HasNamedProperty, CONTEXT, OBJECT, STRING, 0, ACC_STORE_ANY)
JSBool FASTCALL
js_HasNamedPropertyInt32(JSContext* cx, JSObject* obj, int32 index)
@ -324,7 +294,8 @@ js_HasNamedPropertyInt32(JSContext* cx, JSObject* obj, int32 index)
return HasProperty(cx, obj, id);
}
JS_DEFINE_CALLINFO_3(extern, BOOL, js_HasNamedPropertyInt32, CONTEXT, OBJECT, INT32, 0, 0)
JS_DEFINE_CALLINFO_3(extern, BOOL, js_HasNamedPropertyInt32, CONTEXT, OBJECT, INT32, 0,
ACC_STORE_ANY)
JSString* FASTCALL
js_TypeOfObject(JSContext* cx, JSObject* obj)
@ -333,7 +304,7 @@ js_TypeOfObject(JSContext* cx, JSObject* obj)
return ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_OBJECT]);
return ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[obj->typeOf(cx)]);
}
JS_DEFINE_CALLINFO_2(extern, STRING, js_TypeOfObject, CONTEXT, OBJECT, 1, 1)
JS_DEFINE_CALLINFO_2(extern, STRING, js_TypeOfObject, CONTEXT, OBJECT, 1, ACC_NONE)
JSString* FASTCALL
js_TypeOfBoolean(JSContext* cx, int32 unboxed)
@ -344,7 +315,7 @@ js_TypeOfBoolean(JSContext* cx, int32 unboxed)
JSType type = JS_TypeOfValue(cx, boxed);
return ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[type]);
}
JS_DEFINE_CALLINFO_2(extern, STRING, js_TypeOfBoolean, CONTEXT, INT32, 1, 1)
JS_DEFINE_CALLINFO_2(extern, STRING, js_TypeOfBoolean, CONTEXT, INT32, 1, ACC_NONE)
jsdouble FASTCALL
js_BooleanOrUndefinedToNumber(JSContext* cx, int32 unboxed)
@ -354,7 +325,7 @@ js_BooleanOrUndefinedToNumber(JSContext* cx, int32 unboxed)
JS_ASSERT(unboxed == JS_TRUE || unboxed == JS_FALSE);
return unboxed;
}
JS_DEFINE_CALLINFO_2(extern, DOUBLE, js_BooleanOrUndefinedToNumber, CONTEXT, INT32, 1, 1)
JS_DEFINE_CALLINFO_2(extern, DOUBLE, js_BooleanOrUndefinedToNumber, CONTEXT, INT32, 1, ACC_NONE)
JSString* FASTCALL
js_BooleanOrUndefinedToString(JSContext *cx, int32 unboxed)
@ -362,7 +333,7 @@ js_BooleanOrUndefinedToString(JSContext *cx, int32 unboxed)
JS_ASSERT(uint32(unboxed) <= 2);
return ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[unboxed]);
}
JS_DEFINE_CALLINFO_2(extern, STRING, js_BooleanOrUndefinedToString, CONTEXT, INT32, 1, 1)
JS_DEFINE_CALLINFO_2(extern, STRING, js_BooleanOrUndefinedToString, CONTEXT, INT32, 1, ACC_NONE)
JSObject* FASTCALL
js_NewNullClosure(JSContext* cx, JSObject* funobj, JSObject* proto, JSObject* parent)
@ -382,7 +353,8 @@ js_NewNullClosure(JSContext* cx, JSObject* funobj, JSObject* proto, JSObject* pa
reinterpret_cast<jsval>(fun));
return closure;
}
JS_DEFINE_CALLINFO_4(extern, OBJECT, js_NewNullClosure, CONTEXT, OBJECT, OBJECT, OBJECT, 0, 0)
JS_DEFINE_CALLINFO_4(extern, OBJECT, js_NewNullClosure, CONTEXT, OBJECT, OBJECT, OBJECT, 0,
ACC_STORE_ANY)
JS_REQUIRES_STACK JSBool FASTCALL
js_PopInterpFrame(JSContext* cx, InterpState* state)
@ -423,7 +395,7 @@ js_PopInterpFrame(JSContext* cx, InterpState* state)
*state->inlineCallCountp = *state->inlineCallCountp - 1;
return JS_TRUE;
}
JS_DEFINE_CALLINFO_2(extern, BOOL, js_PopInterpFrame, CONTEXT, INTERPSTATE, 0, 0)
JS_DEFINE_CALLINFO_2(extern, BOOL, js_PopInterpFrame, CONTEXT, INTERPSTATE, 0, ACC_STORE_ANY)
JSString* FASTCALL
js_ConcatN(JSContext *cx, JSString **strArray, uint32 size)
@ -462,4 +434,4 @@ js_ConcatN(JSContext *cx, JSString **strArray, uint32 size)
cx->free(buf);
return str;
}
JS_DEFINE_CALLINFO_3(extern, STRING, js_ConcatN, CONTEXT, STRINGPTR, UINT32, 0, 0)
JS_DEFINE_CALLINFO_3(extern, STRING, js_ConcatN, CONTEXT, STRINGPTR, UINT32, 0, ACC_STORE_ANY)

View file

@ -256,15 +256,26 @@ struct ClosureVarInfo;
#define _JS_CALLINFO(name) name##_ci
#if defined(JS_NO_FASTCALL) && defined(NANOJIT_IA32)
#define _JS_DEFINE_CALLINFO(linkage, name, crtype, cargtypes, argtypes, cse, fold) \
#define _JS_DEFINE_CALLINFO(linkage, name, crtype, cargtypes, argtypes, isPure, storeAccSet) \
_JS_TN_LINKAGE(linkage, crtype) name cargtypes; \
_JS_CI_LINKAGE(linkage) const nanojit::CallInfo _JS_CALLINFO(name) = \
{ (intptr_t) &name, argtypes, cse, fold, nanojit::ABI_CDECL _JS_CI_NAME(name) };
{ (intptr_t) &name, argtypes, nanojit::ABI_CDECL, isPure, storeAccSet _JS_CI_NAME(name) };\
/* XXX: a temporary assertion to check all cse/fold pairs are correctly */ \
/* converted to isPure/storeAccSet pairs for bug 545274. Will be removed */ \
/* when bug 517910 starts doing more precise storeAccSet markings. */ \
JS_STATIC_ASSERT_IF(!isPure, storeAccSet == nanojit::ACC_STORE_ANY); /* temporary */ \
JS_STATIC_ASSERT_IF(isPure, storeAccSet == nanojit::ACC_NONE);
#else
#define _JS_DEFINE_CALLINFO(linkage, name, crtype, cargtypes, argtypes, cse, fold) \
#define _JS_DEFINE_CALLINFO(linkage, name, crtype, cargtypes, argtypes, isPure, storeAccSet) \
_JS_TN_LINKAGE(linkage, crtype) FASTCALL name cargtypes; \
_JS_CI_LINKAGE(linkage) const nanojit::CallInfo _JS_CALLINFO(name) = \
{ (intptr_t) &name, argtypes, cse, fold, nanojit::ABI_FASTCALL _JS_CI_NAME(name) };
{ (intptr_t) &name, argtypes, nanojit::ABI_FASTCALL, isPure, storeAccSet _JS_CI_NAME(name) }; \
/* XXX: a temporary assertion to check all cse/fold pairs are correctly */ \
/* converted to isPure/storeAccSet pairs for bug 545274. Will be removed */ \
/* when bug 517910 starts doing more precise storeAccSet markings. */ \
JS_STATIC_ASSERT_IF(!isPure, storeAccSet == nanojit::ACC_STORE_ANY); /* temporary */ \
JS_STATIC_ASSERT_IF(isPure, storeAccSet == nanojit::ACC_NONE);
#endif
/*
@ -289,33 +300,40 @@ struct ClosureVarInfo;
*
* - The parameter types.
*
* - The cse flag. 1 if the builtin call can be optimized away by common
* subexpression elimination; otherwise 0. This should be 1 only if the
* function is idempotent and the return value is determined solely by the
* arguments.
* - The isPure flag. Set to 1 if:
* (a) the function's return value is determined solely by its arguments
* (ie. no hidden state, no implicit inputs used such as global
* variables or the result of an I/O operation); and
* (b) the function causes no observable side-effects (ie. no writes to
* global variables, no I/O output).
* Multiple calls to a pure function can be merged during CSE.
*
* - The fold flag. Reserved. The same as cse for now.
* - The storeAccSet. This indicates which memory access regions the function
* accesses. It must be ACC_NONE if the function is pure; use
* ACC_STORE_ANY if you're not sure. Used to determine if each call site of
* the function aliases any loads.
*/
#define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, cse, fold) \
#define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, isPure, storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), (_JS_CTYPE_TYPE(at0)), \
(_JS_CTYPE_ARGSIZE(at0) << (1*nanojit::ARGSIZE_SHIFT)) | \
_JS_CTYPE_RETSIZE(rt), cse, fold)
#define JS_DEFINE_CALLINFO_2(linkage, rt, op, at0, at1, cse, fold) \
_JS_CTYPE_RETSIZE(rt), \
isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_2(linkage, rt, op, at0, at1, isPure, storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1)), \
(_JS_CTYPE_ARGSIZE(at0) << (2*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at1) << (1*nanojit::ARGSIZE_SHIFT)) | \
_JS_CTYPE_RETSIZE(rt), \
cse, fold)
#define JS_DEFINE_CALLINFO_3(linkage, rt, op, at0, at1, at2, cse, fold) \
isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_3(linkage, rt, op, at0, at1, at2, isPure, storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2)), \
(_JS_CTYPE_ARGSIZE(at0) << (3*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at1) << (2*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at2) << (1*nanojit::ARGSIZE_SHIFT)) | \
_JS_CTYPE_RETSIZE(rt), \
cse, fold)
#define JS_DEFINE_CALLINFO_4(linkage, rt, op, at0, at1, at2, at3, cse, fold) \
isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_4(linkage, rt, op, at0, at1, at2, at3, isPure, storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \
_JS_CTYPE_TYPE(at3)), \
@ -324,8 +342,8 @@ struct ClosureVarInfo;
(_JS_CTYPE_ARGSIZE(at2) << (2*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at3) << (1*nanojit::ARGSIZE_SHIFT)) | \
_JS_CTYPE_RETSIZE(rt), \
cse, fold)
#define JS_DEFINE_CALLINFO_5(linkage, rt, op, at0, at1, at2, at3, at4, cse, fold) \
isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_5(linkage, rt, op, at0, at1, at2, at3, at4, isPure, storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \
_JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4)), \
@ -335,8 +353,8 @@ struct ClosureVarInfo;
(_JS_CTYPE_ARGSIZE(at3) << (2*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at4) << (1*nanojit::ARGSIZE_SHIFT)) | \
_JS_CTYPE_RETSIZE(rt), \
cse, fold)
#define JS_DEFINE_CALLINFO_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, cse, fold) \
isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, isPure, storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \
_JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYPE_TYPE(at5)), \
@ -346,8 +364,10 @@ struct ClosureVarInfo;
(_JS_CTYPE_ARGSIZE(at3) << (3*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at4) << (2*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at5) << (1*nanojit::ARGSIZE_SHIFT)) | \
_JS_CTYPE_RETSIZE(rt), cse, fold)
#define JS_DEFINE_CALLINFO_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, cse, fold) \
_JS_CTYPE_RETSIZE(rt), \
isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, isPure, \
storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \
_JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYPE_TYPE(at5), \
@ -359,8 +379,10 @@ struct ClosureVarInfo;
(_JS_CTYPE_ARGSIZE(at4) << (3*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at5) << (2*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at6) << (1*nanojit::ARGSIZE_SHIFT)) | \
_JS_CTYPE_RETSIZE(rt), cse, fold)
#define JS_DEFINE_CALLINFO_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, at7, cse, fold) \
_JS_CTYPE_RETSIZE(rt), \
isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, at7, isPure, \
storeAccSet) \
_JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \
(_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \
_JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYPE_TYPE(at5), \
@ -373,37 +395,38 @@ struct ClosureVarInfo;
(_JS_CTYPE_ARGSIZE(at5) << (3*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at6) << (2*nanojit::ARGSIZE_SHIFT)) | \
(_JS_CTYPE_ARGSIZE(at7) << (1*nanojit::ARGSIZE_SHIFT)) | \
_JS_CTYPE_RETSIZE(rt), cse, fold)
_JS_CTYPE_RETSIZE(rt), \
isPure, storeAccSet)
#define JS_DECLARE_CALLINFO(name) extern const nanojit::CallInfo _JS_CALLINFO(name);
#define _JS_TN_INIT_HELPER_n(n, args) _JS_TN_INIT_HELPER_##n args
#define _JS_TN_INIT_HELPER_1(linkage, rt, op, at0, cse, fold) \
#define _JS_TN_INIT_HELPER_1(linkage, rt, op, at0, isPure, storeAccSet) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_2(linkage, rt, op, at0, at1, cse, fold) \
#define _JS_TN_INIT_HELPER_2(linkage, rt, op, at0, at1, isPure, storeAccSet) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_3(linkage, rt, op, at0, at1, at2, cse, fold) \
#define _JS_TN_INIT_HELPER_3(linkage, rt, op, at0, at1, at2, isPure, storeAccSet) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_4(linkage, rt, op, at0, at1, at2, at3, cse, fold) \
#define _JS_TN_INIT_HELPER_4(linkage, rt, op, at0, at1, at2, at3, isPure, storeAccSet) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
_JS_CTYPE_ACH(at3) _JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_5(linkage, rt, op, at0, at1, at2, at3, at4, cse, fold) \
#define _JS_TN_INIT_HELPER_5(linkage, rt, op, at0, at1, at2, at3, at4, isPure, storeAccSet) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at4) _JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) \
_JS_CTYPE_PCH(at0), \
@ -411,7 +434,7 @@ struct ClosureVarInfo;
_JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, cse, fold) \
#define _JS_TN_INIT_HELPER_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, isPure, storeAccSet) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at5) _JS_CTYPE_PCH(at4) _JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) \
_JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
@ -419,7 +442,7 @@ struct ClosureVarInfo;
_JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, cse, fold) \
#define _JS_TN_INIT_HELPER_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, isPure, storeAccSet) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at6) _JS_CTYPE_PCH(at5) _JS_CTYPE_PCH(at4) _JS_CTYPE_PCH(at3) \
_JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
@ -427,7 +450,7 @@ struct ClosureVarInfo;
_JS_CTYPE_ACH(at2) _JS_CTYPE_ACH(at1) _JS_CTYPE_ACH(at0), \
_JS_CTYPE_FLAGS(rt)
#define _JS_TN_INIT_HELPER_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, at7, cse, fold) \
#define _JS_TN_INIT_HELPER_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, at7, isPure, storeAccSet) \
&_JS_CALLINFO(op), \
_JS_CTYPE_PCH(at7) _JS_CTYPE_PCH(at6) _JS_CTYPE_PCH(at5) _JS_CTYPE_PCH(at4) \
_JS_CTYPE_PCH(at3) _JS_CTYPE_PCH(at2) _JS_CTYPE_PCH(at1) _JS_CTYPE_PCH(at0), \
@ -492,14 +515,14 @@ js_dmod(jsdouble a, jsdouble b);
#else
#define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, cse, fold)
#define JS_DEFINE_CALLINFO_2(linkage, rt, op, at0, at1, cse, fold)
#define JS_DEFINE_CALLINFO_3(linkage, rt, op, at0, at1, at2, cse, fold)
#define JS_DEFINE_CALLINFO_4(linkage, rt, op, at0, at1, at2, at3, cse, fold)
#define JS_DEFINE_CALLINFO_5(linkage, rt, op, at0, at1, at2, at3, at4, cse, fold)
#define JS_DEFINE_CALLINFO_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, cse, fold)
#define JS_DEFINE_CALLINFO_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, cse, fold)
#define JS_DEFINE_CALLINFO_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, at7, cse, fold)
#define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_2(linkage, rt, op, at0, at1, isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_3(linkage, rt, op, at0, at1, at2, isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_4(linkage, rt, op, at0, at1, at2, at3, isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_5(linkage, rt, op, at0, at1, at2, at3, at4, isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, isPure, storeAccSet)
#define JS_DEFINE_CALLINFO_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, at7, isPure, storeAccSet)
#define JS_DECLARE_CALLINFO(name)
#define JS_DEFINE_TRCINFO_1(name, tn0)
#define JS_DEFINE_TRCINFO_2(name, tn0, tn1)

View file

@ -1657,7 +1657,7 @@ js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback,
PopulateReportBlame(cx, &report);
if (!js_ExpandErrorArguments(cx, callback, userRef, errorNumber,
&message, &report, charArgs, ap)) {
&message, &report, !!charArgs, ap)) {
return JS_FALSE;
}

View file

@ -63,6 +63,13 @@
#include "jsvector.h"
#include "jshashtable.h"
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4100) /* Silence unreferenced formal parameter warnings */
#pragma warning(push)
#pragma warning(disable:4355) /* Silence warning about "this" used in base member initializer list */
#endif
/*
* js_GetSrcNote cache to avoid O(n^2) growth in finding a source note for a
* given pc in a script. We use the script->code pointer to tag the cache,
@ -125,7 +132,7 @@ struct TreeFragment;
struct InterpState;
template<typename T> class Queue;
typedef Queue<uint16> SlotList;
struct TypeMap;
class TypeMap;
struct REFragment;
typedef nanojit::HashMap<REHashKey, REFragment*, REHashFn> REHashMap;
@ -283,7 +290,7 @@ class CallStack
return suspendedFrame;
}
bool isSuspended() const { return suspendedFrame; }
bool isSuspended() const { return !!suspendedFrame; }
void setPrevious(CallStack *cs) { previous = cs; }
CallStack *getPrevious() const { return previous; }
@ -417,9 +424,9 @@ struct TraceMonitor {
*/
REHashMap* reFragments;
// Cached temporary typemap to avoid realloc'ing every time we create one.
// Cached temporary typemap to avoid realloc'ing every time we create one.
// This must be used in only one place at a given time. It must be cleared
// before use.
// before use.
TypeMap* cachedTempTypeMap;
#ifdef DEBUG
@ -1801,6 +1808,10 @@ class JSAutoIdArray {
JSIdArray * const idArray;
JSTempValueRooter tvr;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
/* No copy or assignment semantics. */
JSAutoIdArray(JSAutoIdArray &);
void operator=(JSAutoIdArray &);
};
/* The auto-root for enumeration object and its state. */
@ -2280,4 +2291,9 @@ ContextAllocPolicy::reportAllocOverflow() const
}
#ifdef _MSC_VER
#pragma warning(pop)
#pragma warning(pop)
#endif
#endif /* jscntxt_h___ */

View file

@ -2253,7 +2253,7 @@ date_valueOf(JSContext *cx, uintN argc, jsval *vp)
// Don't really need an argument here, but we don't support arg-less builtins
JS_DEFINE_TRCINFO_1(date_now,
(1, (static, DOUBLE, date_now_tn, CONTEXT, 0, 0)))
(1, (static, DOUBLE, date_now_tn, CONTEXT, 0, nanojit::ACC_STORE_ANY)))
static JSFunctionSpec date_static_methods[] = {
JS_FN("UTC", date_UTC, MAXARGS,0),
@ -2263,7 +2263,7 @@ static JSFunctionSpec date_static_methods[] = {
};
JS_DEFINE_TRCINFO_1(date_valueOf,
(3, (static, JSVAL_RETRY, date_valueOf_tn, CONTEXT, THIS, STRING, 0, 0)))
(3, (static, JSVAL_RETRY, date_valueOf_tn, CONTEXT, THIS, STRING, 0, nanojit::ACC_STORE_ANY)))
static JSFunctionSpec date_methods[] = {
JS_FN("getTime", date_getTime, 0,0),

View file

@ -476,7 +476,7 @@ DropWatchPointAndUnlock(JSContext *cx, JSWatchPoint *wp, uintN flag)
((wprop->attrs ^ sprop->attrs) & JSPROP_SETTER) == 0 &&
IsWatchedProperty(cx, wprop)) {
sprop = scope->changeProperty(cx, wprop, 0, wprop->attrs,
wprop->getter, wp->setter);
wprop->getter(), wp->setter);
if (!sprop)
ok = JS_FALSE;
}
@ -705,7 +705,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
frame.fun = fun;
frame.argv = argv + 2;
frame.down = js_GetTopStackFrame(cx);
frame.scopeChain = OBJ_GET_PARENT(cx, closure);
frame.scopeChain = closure->getParent();
if (script && script->nslots)
frame.slots = argv + slotsStart;
if (script) {
@ -772,12 +772,14 @@ static bool
IsWatchedProperty(JSContext *cx, JSScopeProperty *sprop)
{
if (sprop->attrs & JSPROP_SETTER) {
JSObject *funobj = js_CastAsObject(sprop->setter);
JSFunction *fun = GET_FUNCTION_PRIVATE(cx, funobj);
JSObject *funobj = sprop->setterObject();
if (!funobj->isFunction())
return false;
JSFunction *fun = GET_FUNCTION_PRIVATE(cx, funobj);
return FUN_NATIVE(fun) == js_watch_set_wrapper;
}
return sprop->setter == js_watch_set;
return sprop->setterOp() == js_watch_set;
}
JSPropertyOp
@ -799,7 +801,7 @@ js_WrapWatchedSetter(JSContext *cx, jsid id, uintN attrs, JSPropertyOp setter)
atom = NULL;
}
wrapper = js_NewFunction(cx, NULL, js_watch_set_wrapper, 1, 0,
OBJ_GET_PARENT(cx, js_CastAsObject(setter)),
js_CastAsObject(setter)->getParent(),
atom);
if (!wrapper)
return NULL;
@ -875,8 +877,8 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval,
value = SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj))
? LOCKED_OBJ_GET_SLOT(pobj, sprop->slot)
: JSVAL_VOID;
getter = sprop->getter;
setter = sprop->setter;
getter = sprop->getter();
setter = sprop->setter();
attrs = sprop->attrs;
flags = sprop->getFlags();
shortid = sprop->shortid;
@ -909,7 +911,7 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval,
wp = FindWatchPoint(rt, OBJ_SCOPE(obj), propid);
if (!wp) {
DBG_UNLOCK(rt);
watcher = js_WrapWatchedSetter(cx, propid, sprop->attrs, sprop->setter);
watcher = js_WrapWatchedSetter(cx, propid, sprop->attrs, sprop->setter());
if (!watcher) {
ok = JS_FALSE;
goto out;
@ -923,13 +925,13 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval,
wp->handler = NULL;
wp->closure = NULL;
wp->object = obj;
JS_ASSERT(sprop->setter != js_watch_set || pobj != obj);
wp->setter = sprop->setter;
JS_ASSERT(sprop->setter() != js_watch_set || pobj != obj);
wp->setter = sprop->setter();
wp->flags = JSWP_LIVE;
/* XXXbe nest in obj lock here */
sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop, 0, sprop->attrs,
sprop->getter, watcher);
sprop->getter(), watcher);
if (!sprop) {
/* Self-link so DropWatchPointAndUnlock can JS_REMOVE_LINK it. */
JS_INIT_CLIST(&wp->links);
@ -1463,7 +1465,7 @@ JS_GetPropertyDesc(JSContext *cx, JSObject *obj, JSScopeProperty *sprop,
{
pd->id = ID_TO_VALUE(sprop->id);
bool wasThrowing = cx->throwing;
JSBool wasThrowing = cx->throwing;
JSAutoTempValueRooter lastException(cx, cx->exception);
cx->throwing = JS_FALSE;
@ -1487,10 +1489,10 @@ JS_GetPropertyDesc(JSContext *cx, JSObject *obj, JSScopeProperty *sprop,
| ((sprop->attrs & JSPROP_READONLY) ? JSPD_READONLY : 0)
| ((sprop->attrs & JSPROP_PERMANENT) ? JSPD_PERMANENT : 0);
pd->spare = 0;
if (sprop->getter == js_GetCallArg) {
if (sprop->getter() == js_GetCallArg) {
pd->slot = sprop->shortid;
pd->flags |= JSPD_ARGUMENT;
} else if (sprop->getter == js_GetCallVar) {
} else if (sprop->getter() == js_GetCallVar) {
pd->slot = sprop->shortid;
pd->flags |= JSPD_VARIABLE;
} else {

View file

@ -1319,7 +1319,7 @@ js_PushBlockScope(JSTreeContext *tc, JSStmtInfo *stmt, JSObject *blockObj,
{
js_PushStatement(tc, stmt, STMT_BLOCK, top);
stmt->flags |= SIF_SCOPE;
STOBJ_SET_PARENT(blockObj, tc->blockChain);
blockObj->setParent(tc->blockChain);
stmt->downScope = tc->topScopeStmt;
tc->topScopeStmt = stmt;
tc->blockChain = blockObj;
@ -1512,7 +1512,7 @@ js_PopStatement(JSTreeContext *tc)
if (STMT_LINKS_SCOPE(stmt)) {
tc->topScopeStmt = stmt->downScope;
if (stmt->flags & SIF_SCOPE) {
tc->blockChain = STOBJ_GET_PARENT(stmt->blockObj);
tc->blockChain = stmt->blockObj->getParent();
JS_SCOPE_DEPTH_METERING(--tc->scopeDepth);
}
}
@ -2099,7 +2099,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
* as their parent by JSCompiler::newFunction.
*/
JSObject *scopeobj = (cg->flags & TCF_IN_FUNCTION)
? STOBJ_GET_PARENT(FUN_OBJECT(cg->fun))
? FUN_OBJECT(cg->fun)->getParent()
: cg->scopeChain;
if (scopeobj != caller->varobj(cx))
return JS_TRUE;

View file

@ -253,8 +253,8 @@ js_GetArgsObject(JSContext *cx, JSStackFrame *fp)
* js_GetClassPrototype not being able to find a global object containing
* the standard prototype by starting from arguments and following parent.
*/
JSObject *parent, *global = fp->scopeChain;
while ((parent = OBJ_GET_PARENT(cx, global)) != NULL)
JSObject *global = fp->scopeChain;
while (JSObject *parent = global->getParent())
global = parent;
JS_ASSERT(fp->argv);
@ -297,7 +297,7 @@ js_Arguments(JSContext *cx, JSObject *parent, uint32 argc, JSObject *callee,
#endif
JS_DEFINE_CALLINFO_6(extern, OBJECT, js_Arguments, CONTEXT, OBJECT, UINT32, OBJECT,
DOUBLEPTR, APNPTR, 0, 0)
DOUBLEPTR, APNPTR, 0, nanojit::ACC_STORE_ANY)
/* FIXME change the return type to void. */
JSBool JS_FASTCALL
@ -309,7 +309,8 @@ js_PutArguments(JSContext *cx, JSObject *argsobj, jsval *args)
return true;
}
JS_DEFINE_CALLINFO_3(extern, BOOL, js_PutArguments, CONTEXT, OBJECT, JSVALPTR, 0, 0)
JS_DEFINE_CALLINFO_3(extern, BOOL, js_PutArguments, CONTEXT, OBJECT, JSVALPTR, 0,
nanojit::ACC_STORE_ANY)
static JSBool
args_delProperty(JSContext *cx, JSObject *obj, jsval idval, jsval *vp)
@ -867,7 +868,8 @@ js_CreateCallObjectOnTrace(JSContext *cx, JSFunction *fun, JSObject *callee, JSO
return callobj;
}
JS_DEFINE_CALLINFO_4(extern, OBJECT, js_CreateCallObjectOnTrace, CONTEXT, FUNCTION, OBJECT, OBJECT, 0, 0)
JS_DEFINE_CALLINFO_4(extern, OBJECT, js_CreateCallObjectOnTrace, CONTEXT, FUNCTION, OBJECT, OBJECT,
0, nanojit::ACC_STORE_ANY)
JSFunction *
js_GetCallObjectFunction(JSObject *obj)
@ -922,7 +924,7 @@ js_PutCallObject(JSContext *cx, JSStackFrame *fp)
/* Clear private pointers to fp, which is about to go away (js_Invoke). */
if (js_IsNamedLambda(fun)) {
JSObject *env = STOBJ_GET_PARENT(callobj);
JSObject *env = callobj->getParent();
JS_ASSERT(STOBJ_GET_CLASS(env) == &js_DeclEnvClass);
JS_ASSERT(env->getPrivate() == fp);
@ -947,7 +949,8 @@ js_PutCallObjectOnTrace(JSContext *cx, JSObject *scopeChain, uint32 nargs, jsval
return true;
}
JS_DEFINE_CALLINFO_6(extern, BOOL, js_PutCallObjectOnTrace, CONTEXT, OBJECT, UINT32, JSVALPTR, UINT32, JSVALPTR, 0, 0)
JS_DEFINE_CALLINFO_6(extern, BOOL, js_PutCallObjectOnTrace, CONTEXT, OBJECT, UINT32, JSVALPTR,
UINT32, JSVALPTR, 0, nanojit::ACC_STORE_ANY)
static JSBool
call_enumerate(JSContext *cx, JSObject *obj)
@ -1004,113 +1007,138 @@ call_enumerate(JSContext *cx, JSObject *obj)
return ok;
}
typedef enum JSCallPropertyKind {
enum JSCallPropertyKind {
JSCPK_ARGUMENTS,
JSCPK_ARG,
JSCPK_VAR
} JSCallPropertyKind;
JSCPK_VAR,
JSCPK_UPVAR
};
static JSBool
CallPropertyOp(JSContext *cx, JSObject *obj, jsid id, jsval *vp,
JSCallPropertyKind kind, JSBool setter)
JSCallPropertyKind kind, JSBool setter = false)
{
JSFunction *fun;
JSStackFrame *fp;
uintN i;
JS_ASSERT(obj->getClass() == &js_CallClass);
uintN i = 0;
if (kind != JSCPK_ARGUMENTS) {
JS_ASSERT((int16) JSVAL_TO_INT(id) == JSVAL_TO_INT(id));
i = (uint16) JSVAL_TO_INT(id);
}
jsval *array;
if (kind == JSCPK_UPVAR) {
JSObject *callee = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(obj, JSSLOT_CALLEE));
if (STOBJ_GET_CLASS(obj) != &js_CallClass)
return JS_TRUE;
#ifdef DEBUG
JSFunction *callee_fun = (JSFunction *) callee->getPrivate();
JS_ASSERT(FUN_FLAT_CLOSURE(callee_fun));
JS_ASSERT(i < callee_fun->u.i.nupvars);
#endif
fun = js_GetCallObjectFunction(obj);
fp = (JSStackFrame *) obj->getPrivate();
if (kind == JSCPK_ARGUMENTS) {
if (setter) {
if (fp)
fp->flags |= JSFRAME_OVERRIDE_ARGS;
STOBJ_SET_SLOT(obj, JSSLOT_CALL_ARGUMENTS, *vp);
} else {
if (fp && !(fp->flags & JSFRAME_OVERRIDE_ARGS)) {
JSObject *argsobj;
argsobj = js_GetArgsObject(cx, fp);
if (!argsobj)
return JS_FALSE;
*vp = OBJECT_TO_JSVAL(argsobj);
} else {
*vp = STOBJ_GET_SLOT(obj, JSSLOT_CALL_ARGUMENTS);
}
}
return JS_TRUE;
}
JS_ASSERT((int16) JSVAL_TO_INT(id) == JSVAL_TO_INT(id));
i = (uint16) JSVAL_TO_INT(id);
JS_ASSERT_IF(kind == JSCPK_ARG, i < fun->nargs);
JS_ASSERT_IF(kind == JSCPK_VAR, i < fun->u.i.nvars);
if (!fp) {
i += CALL_CLASS_FIXED_RESERVED_SLOTS;
if (kind == JSCPK_VAR)
i += fun->nargs;
else
JS_ASSERT(kind == JSCPK_ARG);
return setter
? JS_SetReservedSlot(cx, obj, i, *vp)
: JS_GetReservedSlot(cx, obj, i, vp);
}
if (kind == JSCPK_ARG) {
array = fp->argv;
array = callee->dslots;
} else {
JS_ASSERT(kind == JSCPK_VAR);
array = fp->slots;
JSFunction *fun = js_GetCallObjectFunction(obj);
JS_ASSERT_IF(kind == JSCPK_ARG, i < fun->nargs);
JS_ASSERT_IF(kind == JSCPK_VAR, i < fun->u.i.nvars);
JSStackFrame *fp = (JSStackFrame *) obj->getPrivate();
if (kind == JSCPK_ARGUMENTS) {
if (setter) {
if (fp)
fp->flags |= JSFRAME_OVERRIDE_ARGS;
STOBJ_SET_SLOT(obj, JSSLOT_CALL_ARGUMENTS, *vp);
} else {
if (fp && !(fp->flags & JSFRAME_OVERRIDE_ARGS)) {
JSObject *argsobj;
argsobj = js_GetArgsObject(cx, fp);
if (!argsobj)
return false;
*vp = OBJECT_TO_JSVAL(argsobj);
} else {
*vp = STOBJ_GET_SLOT(obj, JSSLOT_CALL_ARGUMENTS);
}
}
return true;
}
if (!fp) {
i += CALL_CLASS_FIXED_RESERVED_SLOTS;
if (kind == JSCPK_VAR)
i += fun->nargs;
else
JS_ASSERT(kind == JSCPK_ARG);
return setter
? JS_SetReservedSlot(cx, obj, i, *vp)
: JS_GetReservedSlot(cx, obj, i, vp);
}
if (kind == JSCPK_ARG) {
array = fp->argv;
} else {
JS_ASSERT(kind == JSCPK_VAR);
array = fp->slots;
}
}
if (setter) {
GC_POKE(cx, array[i]);
array[i] = *vp;
} else {
*vp = array[i];
}
return JS_TRUE;
return true;
}
static JSBool
GetCallArguments(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
return CallPropertyOp(cx, obj, id, vp, JSCPK_ARGUMENTS, JS_FALSE);
return CallPropertyOp(cx, obj, id, vp, JSCPK_ARGUMENTS);
}
static JSBool
SetCallArguments(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
return CallPropertyOp(cx, obj, id, vp, JSCPK_ARGUMENTS, JS_TRUE);
return CallPropertyOp(cx, obj, id, vp, JSCPK_ARGUMENTS, true);
}
JSBool
js_GetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
return CallPropertyOp(cx, obj, id, vp, JSCPK_ARG, JS_FALSE);
return CallPropertyOp(cx, obj, id, vp, JSCPK_ARG);
}
JSBool
SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
return CallPropertyOp(cx, obj, id, vp, JSCPK_ARG, JS_TRUE);
return CallPropertyOp(cx, obj, id, vp, JSCPK_ARG, true);
}
JSBool
GetFlatUpvar(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
return CallPropertyOp(cx, obj, id, vp, JSCPK_UPVAR);
}
JSBool
SetFlatUpvar(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
return CallPropertyOp(cx, obj, id, vp, JSCPK_UPVAR, true);
}
JSBool
js_GetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
return CallPropertyOp(cx, obj, id, vp, JSCPK_VAR, JS_FALSE);
return CallPropertyOp(cx, obj, id, vp, JSCPK_VAR);
}
JSBool
js_GetCallVarChecked(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
if (!CallPropertyOp(cx, obj, id, vp, JSCPK_VAR, JS_FALSE))
return JS_FALSE;
if (!CallPropertyOp(cx, obj, id, vp, JSCPK_VAR))
return false;
return CheckForEscapingClosure(cx, obj, vp);
}
@ -1118,23 +1146,25 @@ js_GetCallVarChecked(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
JSBool
SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
return CallPropertyOp(cx, obj, id, vp, JSCPK_VAR, JS_TRUE);
return CallPropertyOp(cx, obj, id, vp, JSCPK_VAR, true);
}
JSBool JS_FASTCALL
js_SetCallArg(JSContext *cx, JSObject *obj, jsid id, jsval v)
{
return CallPropertyOp(cx, obj, id, &v, JSCPK_ARG, JS_TRUE);
return CallPropertyOp(cx, obj, id, &v, JSCPK_ARG, true);
}
JSBool JS_FASTCALL
js_SetCallVar(JSContext *cx, JSObject *obj, jsid id, jsval v)
{
return CallPropertyOp(cx, obj, id, &v, JSCPK_VAR, JS_TRUE);
return CallPropertyOp(cx, obj, id, &v, JSCPK_VAR, true);
}
JS_DEFINE_CALLINFO_4(extern, BOOL, js_SetCallArg, CONTEXT, OBJECT, JSID, JSVAL, 0, 0)
JS_DEFINE_CALLINFO_4(extern, BOOL, js_SetCallVar, CONTEXT, OBJECT, JSID, JSVAL, 0, 0)
JS_DEFINE_CALLINFO_4(extern, BOOL, js_SetCallArg, CONTEXT, OBJECT, JSID, JSVAL, 0,
nanojit::ACC_STORE_ANY)
JS_DEFINE_CALLINFO_4(extern, BOOL, js_SetCallVar, CONTEXT, OBJECT, JSID, JSVAL, 0,
nanojit::ACC_STORE_ANY)
static JSBool
call_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags,
@ -1148,7 +1178,7 @@ call_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags,
uintN slot, attrs;
JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_CallClass);
JS_ASSERT(!STOBJ_GET_PROTO(obj));
JS_ASSERT(!obj->getProto());
if (!JSVAL_IS_STRING(idval))
return JS_TRUE;
@ -1173,7 +1203,7 @@ call_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags,
* comments in js_PurgeScopeChainHelper from jsobj.cpp.
*/
localKind = js_LookupLocal(cx, fun, JSID_TO_ATOM(id), &slot);
if (localKind != JSLOCAL_NONE && localKind != JSLOCAL_UPVAR) {
if (localKind != JSLOCAL_NONE) {
JS_ASSERT((uint16) slot == slot);
/*
@ -1186,12 +1216,22 @@ call_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags,
getter = js_GetCallArg;
setter = SetCallArg;
} else {
JS_ASSERT(localKind == JSLOCAL_VAR || localKind == JSLOCAL_CONST);
JS_ASSERT(slot < fun->u.i.nvars);
getter = js_GetCallVar;
setter = SetCallVar;
if (localKind == JSLOCAL_CONST)
attrs |= JSPROP_READONLY;
JSCallPropertyKind cpkind;
if (localKind == JSLOCAL_UPVAR) {
if (!FUN_FLAT_CLOSURE(fun))
return JS_TRUE;
getter = GetFlatUpvar;
setter = SetFlatUpvar;
cpkind = JSCPK_UPVAR;
} else {
JS_ASSERT(localKind == JSLOCAL_VAR || localKind == JSLOCAL_CONST);
JS_ASSERT(slot < fun->u.i.nvars);
getter = js_GetCallVar;
setter = SetCallVar;
cpkind = JSCPK_VAR;
if (localKind == JSLOCAL_CONST)
attrs |= JSPROP_READONLY;
}
/*
* Use js_GetCallVarChecked if the local's value is a null closure.
@ -1199,7 +1239,7 @@ call_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags,
* null closure, not on every use.
*/
jsval v;
if (!CallPropertyOp(cx, obj, INT_TO_JSID((int16)slot), &v, JSCPK_VAR, JS_FALSE))
if (!CallPropertyOp(cx, obj, INT_TO_JSID((int16)slot), &v, cpkind))
return JS_FALSE;
if (VALUE_IS_FUNCTION(cx, v) &&
GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v))->needsWrapper()) {
@ -1317,7 +1357,7 @@ fun_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
JS_GetInstancePrivate(cx, obj, &js_FunctionClass, NULL))) {
if (slot != FUN_LENGTH)
return JS_TRUE;
obj = OBJ_GET_PROTO(cx, obj);
obj = obj->getProto();
if (!obj)
return JS_TRUE;
}
@ -1478,7 +1518,7 @@ fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
* object itself.
*/
JSObject *proto =
js_NewObject(cx, &js_ObjectClass, NULL, OBJ_GET_PARENT(cx, obj));
js_NewObject(cx, &js_ObjectClass, NULL, obj->getParent());
if (!proto)
return JS_FALSE;
@ -1573,8 +1613,8 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp)
fun = js_NewFunction(cx, NULL, NULL, 0, JSFUN_INTERPRETED, NULL, NULL);
if (!fun)
return JS_FALSE;
STOBJ_CLEAR_PARENT(FUN_OBJECT(fun));
STOBJ_CLEAR_PROTO(FUN_OBJECT(fun));
FUN_OBJECT(fun)->clearParent();
FUN_OBJECT(fun)->clearProto();
#ifdef __GNUC__
nvars = nargs = nupvars = 0; /* quell GCC uninitialized warning */
#endif
@ -2185,7 +2225,7 @@ Function(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
* its running context's globalObject, which might be different from the
* top-level reachable from scopeChain (in HTML frames, e.g.).
*/
parent = OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(argv[-2]));
parent = JSVAL_TO_OBJECT(argv[-2])->getParent();
fun = js_NewFunction(cx, obj, NULL, 0, JSFUN_LAMBDA | JSFUN_INTERPRETED,
parent, cx->runtime->atomState.anonymousAtom);
@ -2409,7 +2449,7 @@ js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN nargs,
if (funobj) {
JS_ASSERT(HAS_FUNCTION_CLASS(funobj));
OBJ_SET_PARENT(cx, funobj, parent);
funobj->setParent(parent);
} else {
funobj = js_NewObject(cx, &js_FunctionClass, NULL, parent);
if (!funobj)
@ -2419,7 +2459,7 @@ js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN nargs,
fun = (JSFunction *) funobj;
/* Initialize all function members. */
fun->nargs = nargs;
fun->nargs = uint16(nargs);
fun->flags = flags & (JSFUN_FLAGS_MASK | JSFUN_KINDMASK | JSFUN_TRCINFO);
if ((flags & JSFUN_KINDMASK) >= JSFUN_INTERPRETED) {
JS_ASSERT(!native);
@ -2478,8 +2518,8 @@ js_CloneFunctionObject(JSContext *cx, JSFunction *fun, JSObject *parent,
}
#ifdef JS_TRACER
JS_DEFINE_CALLINFO_4(extern, OBJECT, js_CloneFunctionObject,
CONTEXT, FUNCTION, OBJECT, OBJECT, 0, 0)
JS_DEFINE_CALLINFO_4(extern, OBJECT, js_CloneFunctionObject, CONTEXT, FUNCTION, OBJECT, OBJECT, 0,
nanojit::ACC_STORE_ANY)
#endif
/*
@ -2509,7 +2549,7 @@ js_AllocFlatClosure(JSContext *cx, JSFunction *fun, JSObject *scopeChain)
}
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_AllocFlatClosure,
CONTEXT, FUNCTION, OBJECT, 0, 0)
CONTEXT, FUNCTION, OBJECT, 0, nanojit::ACC_STORE_ANY)
JS_REQUIRES_STACK JSObject *
js_NewFlatClosure(JSContext *cx, JSFunction *fun)
@ -2541,6 +2581,7 @@ js_NewDebuggableFlatClosure(JSContext *cx, JSFunction *fun)
{
JS_ASSERT(cx->fp->fun->flags & JSFUN_HEAVYWEIGHT);
JS_ASSERT(!cx->fp->fun->optimizedClosure());
JS_ASSERT(FUN_FLAT_CLOSURE(fun));
return WrapEscapingClosure(cx, cx->fp, FUN_OBJECT(fun), fun);
}

View file

@ -346,7 +346,7 @@ struct JSGCArena {
return reinterpret_cast<JSGCArena *>(pageStart);
}
bool hasPrevUnmarked() const { return info.prevUnmarkedPage; }
bool hasPrevUnmarked() const { return !!info.prevUnmarkedPage; }
JSGCArena *getPrevUnmarked() const {
JS_ASSERT(hasPrevUnmarked());
@ -520,7 +520,7 @@ IsMarkedGCThing(JSGCArena *a, void *thing)
{
JS_ASSERT(a == JSGCArena::fromGCThing(thing));
jsuword index = ThingToGCCellIndex(thing);
return JS_TEST_BIT(a->markBitmap, index);
return !!JS_TEST_BIT(a->markBitmap, index);
}
inline bool
@ -529,7 +529,7 @@ IsMarkedGCThing(JSGCArena *a, jsuword thingOffset)
JS_ASSERT(thingOffset < GC_ARENA_CELLS_SIZE);
JS_ASSERT(!(thingOffset & GC_CELL_MASK));
jsuword index = thingOffset >> GC_CELL_SHIFT;
return JS_TEST_BIT(a->markBitmap, index);
return !!JS_TEST_BIT(a->markBitmap, index);
}
inline bool
@ -1060,7 +1060,7 @@ js_DumpGCStats(JSRuntime *rt, FILE *fp)
sumThings += st->nthings;
sumMaxThings += st->maxthings;
sumThingSize += thingSize * st->nthings;
sumTotalThingSize += thingSize * st->totalthings;
sumTotalThingSize += size_t(thingSize * st->totalthings);
sumArenaCapacity += thingSize * thingsPerArena * st->narenas;
sumTotalArenaCapacity += thingSize * thingsPerArena * st->totalarenas;
sumAlloc += st->alloc;

View file

@ -78,9 +78,9 @@ class HashTable : AllocPolicy
NonConstT t;
bool isFree() const { return keyHash == 0; }
void setFree() { keyHash = 0; t = T(); }
void setFree() { keyHash = 0; assignT(t, T()); }
bool isRemoved() const { return keyHash == 1; }
void setRemoved() { keyHash = 1; t = T(); }
void setRemoved() { keyHash = 1; assignT(t, T()); }
bool isLive() const { return keyHash > 1; }
void setLive(HashNumber hn) { JS_ASSERT(hn > 1); keyHash = hn; }
@ -109,11 +109,13 @@ class HashTable : AllocPolicy
Ptr(Entry &entry) : entry(&entry) {}
public:
bool found() const { return entry->isLive(); }
operator ConvertibleToBool() { return found() ? &Ptr::nonNull : 0; }
bool found() const { return entry->isLive(); }
operator ConvertibleToBool() const { return found() ? &Ptr::nonNull : 0; }
bool operator==(const Ptr &rhs) const { JS_ASSERT(found() && rhs.found()); return entry == rhs.entry; }
bool operator!=(const Ptr &rhs) const { return !(*this == rhs); }
T &operator*() const { return entry->t; }
T *operator->() const { return &entry->t; }
T &operator*() const { return entry->t; }
T *operator->() const { return &entry->t; }
};
/* A Ptr that can be used to add a key after a failed lookup. */
@ -140,7 +142,7 @@ class HashTable : AllocPolicy
++cur;
}
Entry *cur, * const end;
Entry *cur, *end;
public:
bool empty() const {
@ -179,22 +181,15 @@ class HashTable : AllocPolicy
void operator=(const Enum &);
public:
/* Type returned from hash table used to initialize Enum object. */
struct Init {
Init(Range r, HashTable &t) : range(r), table(t) {}
Range range;
HashTable &table;
};
/* Initialize with the return value of enumerate. */
Enum(Init i) : Range(i.range), table(i.table), removed(false) {}
template<class Map>
Enum(Map &map) : Range(map.all()), table(map.impl), removed(false) {}
/*
* Removes the |front()| element from the table, leaving |front()|
* invalid until the next call to |popFront()|. For example:
*
* HashSet<int> s;
* for (HashSet<int>::Enum e(s.enumerate()); !e.empty(); e.popFront())
* for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
* if (e.front() == 42)
* e.removeFront();
*/
@ -529,10 +524,6 @@ class HashTable : AllocPolicy
return Range(table, table + tableCapacity);
}
typename Enum::Init enumerate() {
return typename Enum::Init(all(), *this);
}
bool empty() const {
return !entryCount;
}
@ -715,6 +706,8 @@ class HashMap
};
typedef detail::HashTable<Entry, MapHashPolicy, AllocPolicy> Impl;
friend class Impl::Enum;
/* Not implicitly copyable (expensive). May add explicit |clone| later. */
HashMap(const HashMap &);
HashMap &operator=(const HashMap &);
@ -785,12 +778,12 @@ class HashMap
size_t count() const { return impl.count(); }
/*
* Returns a value that may be used to initialize an Enum. An Enum may be
* used to examine and remove table entries:
* Typedef for the enumeration class. An Enum may be used to examine and
* remove table entries:
*
* typedef HashMap<int,char> HM;
* HM s;
* for (HM::Enum e(s.enumerate()); !e.empty(); e.popFront())
* for (HM::Enum e(s); !e.empty(); e.popFront())
* if (e.front().value == 'l')
* e.removeFront();
*
@ -798,7 +791,6 @@ class HashMap
* Enum in HashTable above (with T = Entry).
*/
typedef typename Impl::Enum Enum;
typename Enum::Init enumerate() { return impl.enumerate(); }
/* Remove all entries. */
void clear() { impl.clear(); }
@ -861,6 +853,8 @@ class HashSet
};
typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
friend class Impl::Enum;
/* Not implicitly copyable (expensive). May add explicit |clone| later. */
HashSet(const HashSet &);
HashSet &operator=(const HashSet &);
@ -927,12 +921,12 @@ class HashSet
size_t count() const { return impl.count(); }
/*
* Returns a value that may be used to initialize an Enum. An Enum may be
* used to examine and remove table entries.
* Typedef for the enumeration class. An Enum may be used to examine and
* remove table entries:
*
* typedef HashSet<int> HS;
* HS s;
* for (HS::Enum e(s.enumerate()); !e.empty(); e.popFront())
* for (HS::Enum e(s); !e.empty(); e.popFront())
* if (e.front() == 42)
* e.removeFront();
*
@ -940,7 +934,6 @@ class HashSet
* Enum in HashTable above.
*/
typedef typename Impl::Enum Enum;
typename Enum::Init enumerate() { return impl.enumerate(); }
/* Remove all entries. */
void clear() { impl.clear(); }

View file

@ -150,12 +150,12 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj,
JSObject *tmp = obj;
for (uintN i = 0; i != scopeIndex; i++)
tmp = OBJ_GET_PARENT(cx, tmp);
tmp = tmp->getParent();
JS_ASSERT(tmp != pobj);
protoIndex = 1;
for (;;) {
tmp = OBJ_GET_PROTO(cx, tmp);
tmp = tmp->getProto();
/*
* We cannot cache properties coming from native objects behind
@ -295,7 +295,7 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj,
* matching empty scope. In unusual cases involving
* __proto__ assignment we may not find one.
*/
JSObject *proto = STOBJ_GET_PROTO(obj);
JSObject *proto = obj->getProto();
if (!proto || !OBJ_IS_NATIVE(proto))
return JS_NO_PROP_CACHE_FILL;
JSScope *protoscope = OBJ_SCOPE(proto);
@ -335,7 +335,7 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj,
#ifdef DEBUG
if (scopeIndex == 0) {
JS_ASSERT(protoIndex != 0);
JS_ASSERT((protoIndex == 1) == (OBJ_GET_PROTO(cx, obj) == pobj));
JS_ASSERT((protoIndex == 1) == (obj->getProto() == pobj));
}
#endif
@ -443,7 +443,7 @@ js_FullTestPropertyCache(JSContext *cx, jsbytecode *pc,
if (JOF_MODE(cs->format) == JOF_NAME) {
while (vcap & (PCVCAP_SCOPEMASK << PCVCAP_PROTOBITS)) {
tmp = OBJ_GET_PARENT(cx, pobj);
tmp = pobj->getParent();
if (!tmp || !OBJ_IS_NATIVE(tmp))
break;
pobj = tmp;
@ -454,7 +454,7 @@ js_FullTestPropertyCache(JSContext *cx, jsbytecode *pc,
}
while (vcap & PCVCAP_PROTOMASK) {
tmp = OBJ_GET_PROTO(cx, pobj);
tmp = pobj->getProto();
if (!tmp || !OBJ_IS_NATIVE(tmp))
break;
pobj = tmp;
@ -762,7 +762,7 @@ js_GetScopeChain(JSContext *cx, JSStackFrame *fp)
*/
limitClone = fp->scopeChain;
while (OBJ_GET_CLASS(cx, limitClone) == &js_WithClass)
limitClone = OBJ_GET_PARENT(cx, limitClone);
limitClone = limitClone->getParent();
JS_ASSERT(limitClone);
/*
@ -783,7 +783,7 @@ js_GetScopeChain(JSContext *cx, JSStackFrame *fp)
* be a block either. So we can just grab limitClone's prototype here
* regardless of its type or which frame it belongs to.
*/
limitBlock = OBJ_GET_PROTO(cx, limitClone);
limitBlock = limitClone->getProto();
/* If the innermost block has already been cloned, we are done. */
if (limitBlock == sharedBlock)
@ -808,8 +808,8 @@ js_GetScopeChain(JSContext *cx, JSStackFrame *fp)
*/
JSObject *newChild = innermostNewChild;
for (;;) {
JS_ASSERT(OBJ_GET_PROTO(cx, newChild) == sharedBlock);
sharedBlock = OBJ_GET_PARENT(cx, sharedBlock);
JS_ASSERT(newChild->getProto() == sharedBlock);
sharedBlock = sharedBlock->getParent();
/* Sometimes limitBlock will be NULL, so check that first. */
if (sharedBlock == limitBlock || !sharedBlock)
@ -821,14 +821,10 @@ js_GetScopeChain(JSContext *cx, JSStackFrame *fp)
if (!clone)
return NULL;
/*
* Avoid OBJ_SET_PARENT overhead as newChild cannot escape to
* other threads.
*/
STOBJ_SET_PARENT(newChild, clone);
newChild->setParent(clone);
newChild = clone;
}
STOBJ_SET_PARENT(newChild, fp->scopeChain);
newChild->setParent(fp->scopeChain);
/*
@ -894,7 +890,7 @@ js_ComputeGlobalThis(JSContext *cx, JSBool lazy, jsval *argv)
JSObject *thisp;
if (JSVAL_IS_PRIMITIVE(argv[-2]) ||
!OBJ_GET_PARENT(cx, JSVAL_TO_OBJECT(argv[-2]))) {
!JSVAL_TO_OBJECT(argv[-2])->getParent()) {
thisp = cx->globalObject;
} else {
jsid id;
@ -937,10 +933,8 @@ js_ComputeGlobalThis(JSContext *cx, JSBool lazy, jsval *argv)
return NULL;
if (v != JSVAL_NULL) {
thisp = JSVAL_IS_VOID(v)
? OBJ_GET_PARENT(cx, thisp)
: JSVAL_TO_OBJECT(v);
while ((parent = OBJ_GET_PARENT(cx, thisp)) != NULL)
thisp = JSVAL_IS_VOID(v) ? thisp->getParent() : JSVAL_TO_OBJECT(v);
while ((parent = thisp->getParent()) != NULL)
thisp = parent;
}
}
@ -1135,8 +1129,8 @@ js_Invoke(JSContext *cx, uintN argc, jsval *vp, uintN flags)
goto bad;
funobj = JSVAL_TO_OBJECT(v);
parent = OBJ_GET_PARENT(cx, funobj);
clasp = OBJ_GET_CLASS(cx, funobj);
parent = funobj->getParent();
clasp = funobj->getClass();
if (clasp != &js_FunctionClass) {
#if JS_HAS_NO_SUCH_METHOD
if (clasp == &js_NoSuchMethodClass) {
@ -1561,7 +1555,7 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script,
frame.argsobj = NULL;
JSObject *obj = chain;
if (cx->options & JSOPTION_VAROBJFIX) {
while (JSObject *tmp = OBJ_GET_PARENT(cx, obj))
while (JSObject *tmp = obj->getParent())
obj = tmp;
}
frame.fun = NULL;
@ -1899,9 +1893,9 @@ js_InvokeConstructor(JSContext *cx, uintN argc, JSBool clampReturn, jsval *vp)
}
rval = vp[1];
proto = JSVAL_IS_OBJECT(rval) ? JSVAL_TO_OBJECT(rval) : NULL;
parent = OBJ_GET_PARENT(cx, obj2);
parent = obj2->getParent();
if (OBJ_GET_CLASS(cx, obj2) == &js_FunctionClass) {
if (obj2->getClass() == &js_FunctionClass) {
fun2 = GET_FUNCTION_PRIVATE(cx, obj2);
if (!FUN_INTERPRETED(fun2) && fun2->u.n.clasp)
clasp = fun2->u.n.clasp;
@ -2005,7 +1999,7 @@ js_LeaveWith(JSContext *cx)
JS_ASSERT(OBJ_GET_CLASS(cx, withobj) == &js_WithClass);
JS_ASSERT(withobj->getPrivate() == cx->fp);
JS_ASSERT(OBJ_BLOCK_DEPTH(cx, withobj) >= 0);
cx->fp->scopeChain = OBJ_GET_PARENT(cx, withobj);
cx->fp->scopeChain = withobj->getParent();
withobj->setPrivate(NULL);
}
@ -2037,7 +2031,7 @@ js_UnwindScope(JSContext *cx, JSStackFrame *fp, jsint stackDepth,
JS_ASSERT(stackDepth >= 0);
JS_ASSERT(StackBase(fp) + stackDepth <= fp->regs->sp);
for (obj = fp->blockChain; obj; obj = OBJ_GET_PARENT(cx, obj)) {
for (obj = fp->blockChain; obj; obj = obj->getParent()) {
JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_BlockClass);
if (OBJ_BLOCK_DEPTH(cx, obj) < stackDepth)
break;
@ -2353,6 +2347,166 @@ js_DumpOpMeters()
#ifndef jsinvoke_cpp___
#ifdef JS_REPRMETER
// jsval representation metering: this measures the kinds of jsvals that
// are used as inputs to each JSOp.
namespace reprmeter {
enum Repr {
NONE,
INT,
DOUBLE,
BOOLEAN_PROPER,
BOOLEAN_OTHER,
STRING,
OBJECT_NULL,
OBJECT_PLAIN,
FUNCTION_INTERPRETED,
FUNCTION_FASTNATIVE,
FUNCTION_SLOWNATIVE,
ARRAY_SLOW,
ARRAY_DENSE
};
// Return the |repr| value giving the representation of the given jsval.
static Repr
GetRepr(jsval v)
{
if (JSVAL_IS_INT(v))
return INT;
if (JSVAL_IS_DOUBLE(v))
return DOUBLE;
if (JSVAL_IS_SPECIAL(v)) {
return (v == JSVAL_TRUE || v == JSVAL_FALSE)
? BOOLEAN_PROPER
: BOOLEAN_OTHER;
}
if (JSVAL_IS_STRING(v))
return STRING;
JS_ASSERT(JSVAL_IS_OBJECT(v));
JSObject *obj = JSVAL_TO_OBJECT(v);
if (VALUE_IS_FUNCTION(cx, v)) {
JSFunction *fun = GET_FUNCTION_PRIVATE(cx, obj);
if (FUN_INTERPRETED(fun))
return FUNCTION_INTERPRETED;
if (fun->flags & JSFUN_FAST_NATIVE)
return FUNCTION_FASTNATIVE;
return FUNCTION_SLOWNATIVE;
}
// This must come before the general array test, because that
// one subsumes this one.
if (!obj)
return OBJECT_NULL;
if (obj->isDenseArray())
return ARRAY_DENSE;
if (obj->isArray())
return ARRAY_SLOW;
return OBJECT_PLAIN;
}
static const char *reprName[] = { "invalid", "int", "double", "bool", "special",
"string", "null", "object",
"fun:interp", "fun:fast", "fun:slow",
"array:slow", "array:dense" };
// Logically, a tuple of (JSOp, repr_1, ..., repr_n) where repr_i is
// the |repr| of the ith input to the JSOp.
struct OpInput {
enum { max_uses = 16 };
JSOp op;
Repr uses[max_uses];
OpInput() : op(JSOp(255)) {
for (int i = 0; i < max_uses; ++i)
uses[i] = NONE;
}
OpInput(JSOp op) : op(op) {
for (int i = 0; i < max_uses; ++i)
uses[i] = NONE;
}
// Hash function
operator uint32() const {
uint32 h = op;
for (int i = 0; i < max_uses; ++i)
h = h * 7 + uses[i] * 13;
return h;
}
bool operator==(const OpInput &opinput) const {
if (op != opinput.op)
return false;
for (int i = 0; i < max_uses; ++i) {
if (uses[i] != opinput.uses[i])
return false;
}
return true;
}
OpInput &operator=(const OpInput &opinput) {
op = opinput.op;
for (int i = 0; i < max_uses; ++i)
uses[i] = opinput.uses[i];
return *this;
}
};
typedef HashMap<OpInput, uint64, DefaultHasher<OpInput>, SystemAllocPolicy> OpInputHistogram;
OpInputHistogram opinputs;
bool opinputsInitialized = false;
// Record an OpInput for the current op. This should be called just
// before executing the op.
static void
MeterRepr(JSStackFrame *fp)
{
// Note that we simply ignore the possibility of errors (OOMs)
// using the hash map, since this is only metering code.
if (!opinputsInitialized) {
opinputs.init();
opinputsInitialized = true;
}
JSOp op = JSOp(*fp->regs->pc);
unsigned nuses = js_GetStackUses(&js_CodeSpec[op], op, fp->regs->pc);
// Build the OpInput.
OpInput opinput(op);
for (unsigned i = 0; i < nuses; ++i) {
jsval v = fp->regs->sp[-nuses+i];
opinput.uses[i] = GetRepr(v);
}
OpInputHistogram::AddPtr p = opinputs.lookupForAdd(opinput);
if (p)
++p->value;
else
opinputs.add(p, opinput, 1);
}
void
js_DumpReprMeter()
{
FILE *f = fopen("/tmp/reprmeter.txt", "w");
JS_ASSERT(f);
for (OpInputHistogram::Range r = opinputs.all(); !r.empty(); r.popFront()) {
const OpInput &o = r.front().key;
uint64 c = r.front().value;
fprintf(f, "%3d,%s", o.op, js_CodeName[o.op]);
for (int i = 0; i < OpInput::max_uses && o.uses[i] != NONE; ++i)
fprintf(f, ",%s", reprName[o.uses[i]]);
fprintf(f, ",%llu\n", c);
}
fclose(f);
}
}
#endif /* JS_REPRMETER */
#define PUSH(v) (*regs.sp++ = (v))
#define PUSH_OPND(v) PUSH(v)
#define STORE_OPND(n,v) (regs.sp[n] = (v))
@ -2540,6 +2694,12 @@ JS_STATIC_ASSERT(!CAN_DO_FAST_INC_DEC(INT_TO_JSVAL_CONSTEXPR(JSVAL_INT_MAX)));
#endif
#ifdef JS_REPRMETER
# define METER_REPR(fp) (reprmeter::MeterRepr(fp))
#else
# define METER_REPR(fp)
#endif /* JS_REPRMETER */
/*
* Threaded interpretation via computed goto appears to be well-supported by
* GCC 3 and higher. IBM's C compiler when run with the right options (e.g.,
@ -2782,8 +2942,9 @@ js_Interpret(JSContext *cx)
JS_EXTENSION_(goto *jumpTable[op]); \
JS_END_MACRO
# define DO_NEXT_OP(n) JS_BEGIN_MACRO \
METER_OP_PAIR(op, regs.pc[n]); \
METER_OP_PAIR(op, JSOp(regs.pc[n])); \
op = (JSOp) *(regs.pc += (n)); \
METER_REPR(fp); \
DO_OP(); \
JS_END_MACRO
@ -3108,11 +3269,17 @@ js_Interpret(JSContext *cx)
#endif /* !JS_THREADED_INTERP */
error:
#ifdef JS_TRACER
if (fp->imacpc && cx->throwing) {
// To keep things simple, we hard-code imacro exception handlers here.
if (*fp->imacpc == JSOP_NEXTITER && js_ValueIsStopIteration(cx->exception)) {
if (*fp->imacpc == JSOP_NEXTITER &&
InCustomIterNextTryRegion(regs.pc) &&
js_ValueIsStopIteration(cx->exception)) {
// If the other NEXTITER imacro, native_iter_next, throws
// StopIteration, do not catch it here. See bug 547911.
// pc may point to JSOP_DUP here due to bug 474854.
JS_ASSERT(*regs.pc == JSOP_CALL || *regs.pc == JSOP_DUP || *regs.pc == JSOP_TRUE);
JS_ASSERT(*regs.pc == JSOP_CALL || *regs.pc == JSOP_DUP);
cx->throwing = JS_FALSE;
cx->exception = JSVAL_VOID;
regs.sp[-1] = JSVAL_HOLE;
@ -3125,6 +3292,7 @@ js_Interpret(JSContext *cx)
fp->imacpc = NULL;
atoms = script->atomMap.vector;
}
#endif
JS_ASSERT((size_t)((fp->imacpc ? fp->imacpc : regs.pc) - script->code) < script->length);

View file

@ -119,7 +119,10 @@ struct JSStackFrame {
* also used in some other cases --- entering 'with' blocks, for
* example.
*/
JSObject *scopeChain;
union {
JSObject *scopeChain;
jsval scopeChainVal;
};
JSObject *blockChain;
uint32 flags; /* frame flags -- see below */
@ -187,12 +190,17 @@ StackBase(JSStackFrame *fp)
return fp->slots + fp->script->nfixed;
}
#ifdef DEBUG
void
JSStackFrame::assertValidStackDepth(uintN depth)
{
JS_ASSERT(0 <= regs->sp - StackBase(this));
JS_ASSERT(depth <= uintptr_t(regs->sp - StackBase(this)));
}
#else
void
JSStackFrame::assertValidStackDepth(uintN /*depth*/){}
#endif
static JS_INLINE uintN
GlobalVarCount(JSStackFrame *fp)
@ -413,7 +421,7 @@ js_FillPropertyCache(JSContext *cx, JSObject *obj,
pobj = obj; \
JS_ASSERT(PCVCAP_TAG(entry->vcap) <= 1); \
if (PCVCAP_TAG(entry->vcap) == 1 && \
(tmp_ = OBJ_GET_PROTO(cx, pobj)) != NULL) { \
(tmp_ = pobj->getProto()) != NULL) { \
pobj = tmp_; \
} \
\

View file

@ -153,7 +153,7 @@ InitNativeIterator(JSContext *cx, JSObject *iterobj, JSObject *obj, uintN flags)
JS_ASSERT(STOBJ_GET_CLASS(iterobj) == &js_IteratorClass);
/* Initialize iterobj in case of enumerate hook failure. */
STOBJ_SET_PARENT(iterobj, obj);
iterobj->setParent(obj);
STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_STATE, JSVAL_NULL);
STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_FLAGS, INT_TO_JSVAL(flags));
if (!js_RegisterCloseableIterator(cx, iterobj))
@ -180,7 +180,7 @@ InitNativeIterator(JSContext *cx, JSObject *iterobj, JSObject *obj, uintN flags)
* store the original object.
*/
JS_ASSERT(obj != iterobj);
STOBJ_SET_PROTO(iterobj, obj);
iterobj->setProto(obj);
}
return JS_TRUE;
}
@ -239,9 +239,9 @@ IteratorNextImpl(JSContext *cx, JSObject *obj, jsval *rval)
JSBool foreach, ok;
jsid id;
JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_IteratorClass);
JS_ASSERT(obj->getClass() == &js_IteratorClass);
iterable = OBJ_GET_PARENT(cx, obj);
iterable = obj->getParent();
JS_ASSERT(iterable);
state = STOBJ_GET_SLOT(obj, JSSLOT_ITER_STATE);
if (JSVAL_IS_NULL(state))
@ -464,7 +464,7 @@ js_CloseIterator(JSContext *cx, jsval v)
#endif
return JS_TRUE;
}
JS_DEFINE_CALLINFO_2(FRIEND, BOOL, js_CloseIterator, CONTEXT, JSVAL, 0, 0)
JS_DEFINE_CALLINFO_2(FRIEND, BOOL, js_CloseIterator, CONTEXT, JSVAL, 0, nanojit::ACC_STORE_ANY)
static JSBool
CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval)
@ -481,10 +481,10 @@ CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval)
JSString *str;
JS_ASSERT(flags & JSITER_ENUMERATE);
JS_ASSERT(STOBJ_GET_CLASS(iterobj) == &js_IteratorClass);
JS_ASSERT(iterobj->getClass() == &js_IteratorClass);
obj = STOBJ_GET_PARENT(iterobj);
origobj = STOBJ_GET_PROTO(iterobj);
obj = iterobj->getParent();
origobj = iterobj->getProto();
state = STOBJ_GET_SLOT(iterobj, JSSLOT_ITER_STATE);
if (JSVAL_IS_NULL(state))
goto stop;
@ -530,9 +530,9 @@ CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval)
} else
#endif
{
obj = OBJ_GET_PROTO(cx, obj);
obj = obj->getProto();
if (obj) {
STOBJ_SET_PARENT(iterobj, obj);
iterobj->setParent(obj);
if (!obj->enumerate(cx, JSENUMERATE_INIT, &state, NULL))
return JS_FALSE;
STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_STATE, state);

View file

@ -563,7 +563,7 @@ math_toSource(JSContext *cx, uintN argc, jsval *vp)
#define MATH_BUILTIN_CFUN_1(name, cfun) \
static jsdouble FASTCALL math_##name##_tn(jsdouble d) { return cfun(d); } \
JS_DEFINE_TRCINFO_1(math_##name, \
(1, (static, DOUBLE, math_##name##_tn, DOUBLE, 1, 1)))
(1, (static, DOUBLE, math_##name##_tn, DOUBLE, 1, nanojit::ACC_NONE)))
MATH_BUILTIN_CFUN_1(abs, fabs)
MATH_BUILTIN_1(atan)
@ -609,7 +609,7 @@ math_exp_tn(JSContext *cx, jsdouble d)
}
JS_DEFINE_TRCINFO_1(math_exp,
(2, (static, DOUBLE, math_exp_tn, CONTEXT, DOUBLE, 1, 1)))
(2, (static, DOUBLE, math_exp_tn, CONTEXT, DOUBLE, 1, nanojit::ACC_NONE)))
#else
@ -692,27 +692,27 @@ math_floor_tn(jsdouble x)
}
JS_DEFINE_TRCINFO_1(math_acos,
(1, (static, DOUBLE, math_acos_tn, DOUBLE, 1, 1)))
(1, (static, DOUBLE, math_acos_tn, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(math_asin,
(1, (static, DOUBLE, math_asin_tn, DOUBLE, 1, 1)))
(1, (static, DOUBLE, math_asin_tn, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(math_atan2,
(2, (static, DOUBLE, math_atan2_kernel, DOUBLE, DOUBLE, 1, 1)))
(2, (static, DOUBLE, math_atan2_kernel, DOUBLE, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(js_math_floor,
(1, (static, DOUBLE, math_floor_tn, DOUBLE, 1, 1)))
(1, (static, DOUBLE, math_floor_tn, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(math_log,
(1, (static, DOUBLE, math_log_tn, DOUBLE, 1, 1)))
(1, (static, DOUBLE, math_log_tn, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(js_math_max,
(2, (static, DOUBLE, math_max_tn, DOUBLE, DOUBLE, 1, 1)))
(2, (static, DOUBLE, math_max_tn, DOUBLE, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(js_math_min,
(2, (static, DOUBLE, math_min_tn, DOUBLE, DOUBLE, 1, 1)))
(2, (static, DOUBLE, math_min_tn, DOUBLE, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(math_pow,
(2, (static, DOUBLE, math_pow_tn, DOUBLE, DOUBLE, 1, 1)))
(2, (static, DOUBLE, math_pow_tn, DOUBLE, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(math_random,
(1, (static, DOUBLE, math_random_tn, CONTEXT, 0, 0)))
(1, (static, DOUBLE, math_random_tn, CONTEXT, 0, nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(js_math_round,
(1, (static, DOUBLE, math_round_tn, DOUBLE, 1, 1)))
(1, (static, DOUBLE, math_round_tn, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(js_math_ceil,
(1, (static, DOUBLE, math_ceil_tn, DOUBLE, 1, 1)))
(1, (static, DOUBLE, math_ceil_tn, DOUBLE, 1, nanojit::ACC_NONE)))
#endif /* JS_TRACER */

View file

@ -73,6 +73,7 @@
#include "jsstrinlines.h"
#include "jsvector.h"
using namespace js;
#ifndef JS_HAVE_STDINT_H /* Native support is innocent until proven guilty. */
@ -254,11 +255,11 @@ const char js_parseInt_str[] = "parseInt";
#ifdef JS_TRACER
JS_DEFINE_TRCINFO_2(num_parseInt,
(2, (static, DOUBLE, ParseInt, CONTEXT, STRING, 1, 1)),
(1, (static, DOUBLE, ParseIntDouble, DOUBLE, 1, 1)))
(2, (static, DOUBLE, ParseInt, CONTEXT, STRING, 1, nanojit::ACC_NONE)),
(1, (static, DOUBLE, ParseIntDouble, DOUBLE, 1, nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(num_parseFloat,
(2, (static, DOUBLE, ParseFloat, CONTEXT, STRING, 1, 1)))
(2, (static, DOUBLE, ParseFloat, CONTEXT, STRING, 1, nanojit::ACC_NONE)))
#endif /* JS_TRACER */
@ -421,7 +422,7 @@ num_toString(JSContext *cx, uintN argc, jsval *vp)
static JSBool
num_toLocaleString(JSContext *cx, uintN argc, jsval *vp)
{
char thousandsLength, decimalLength;
size_t thousandsLength, decimalLength;
const char *numGrouping, *tmpGroup;
JSRuntime *rt;
JSString *numStr, *str;
@ -625,8 +626,10 @@ num_toPrecision(JSContext *cx, uintN argc, jsval *vp)
#ifdef JS_TRACER
JS_DEFINE_TRCINFO_2(num_toString,
(2, (extern, STRING_RETRY, js_NumberToString, CONTEXT, THIS_DOUBLE, 1, 1)),
(3, (static, STRING_RETRY, js_NumberToStringWithBase, CONTEXT, THIS_DOUBLE, INT32, 1, 1)))
(2, (extern, STRING_RETRY, js_NumberToString, CONTEXT, THIS_DOUBLE, 1,
nanojit::ACC_NONE)),
(3, (static, STRING_RETRY, js_NumberToStringWithBase, CONTEXT, THIS_DOUBLE, INT32, 1,
nanojit::ACC_NONE)))
#endif /* JS_TRACER */
@ -884,7 +887,7 @@ js_NumberToStringWithBase(JSContext *cx, jsdouble d, jsint base)
if (jsuint(i) < jsuint(base)) {
if (i < 10)
return JSString::intString(i);
return JSString::unitString('a' + i - 10);
return JSString::unitString(jschar('a' + i - 10));
}
}
numStr = NumberToCString(cx, d, base, buf, sizeof buf);
@ -942,7 +945,6 @@ js_ValueToNumber(JSContext *cx, jsval *vp)
{
jsval v;
JSString *str;
const jschar *bp, *end, *ep;
jsdouble d;
JSObject *obj;
@ -955,28 +957,9 @@ js_ValueToNumber(JSContext *cx, jsval *vp)
if (JSVAL_IS_STRING(v)) {
str = JSVAL_TO_STRING(v);
/*
* Note that ECMA doesn't treat a string beginning with a '0' as
* an octal number here. This works because all such numbers will
* be interpreted as decimal by js_strtod and will never get
* passed to js_strtointeger (which would interpret them as
* octal).
*/
str->getCharsAndEnd(bp, end);
/* ECMA doesn't allow signed hex numbers (bug 273467). */
bp = js_SkipWhiteSpace(bp, end);
if (bp + 2 < end && (*bp == '-' || *bp == '+') &&
bp[1] == '0' && (bp[2] == 'X' || bp[2] == 'x')) {
d = StringToNumberType<jsdouble>(cx, str);
if (JSDOUBLE_IS_NaN(d))
break;
}
if ((!js_strtod(cx, bp, end, &ep, &d) ||
js_SkipWhiteSpace(ep, end) != end) &&
(!js_strtointeger(cx, bp, end, &ep, 0, &d) ||
js_SkipWhiteSpace(ep, end) != end)) {
break;
}
/*
* JSVAL_TRUE indicates that double jsval was never constructed
@ -989,10 +972,9 @@ js_ValueToNumber(JSContext *cx, jsval *vp)
if (JSVAL_TO_BOOLEAN(v)) {
*vp = JSVAL_ONE;
return 1.0;
} else {
*vp = JSVAL_ZERO;
return 0.0;
}
*vp = JSVAL_ZERO;
return 0.0;
}
if (JSVAL_IS_NULL(v)) {
*vp = JSVAL_ZERO;

View file

@ -48,6 +48,8 @@
#include <ieeefp.h>
#endif
#include "jsstr.h"
/*
* JS number (IEEE double) interface.
*
@ -426,4 +428,62 @@ js_strtointeger(JSContext *cx, const jschar *s, const jschar *send,
JS_END_EXTERN_C
namespace js {
template<typename T> struct NumberTraits { };
template<> struct NumberTraits<int32> {
static JS_ALWAYS_INLINE int32 NaN() { return 0; }
static JS_ALWAYS_INLINE int32 toSelfType(int32 i) { return i; }
static JS_ALWAYS_INLINE int32 toSelfType(jsdouble d) { return js_DoubleToECMAUint32(d); }
};
template<> struct NumberTraits<jsdouble> {
static JS_ALWAYS_INLINE jsdouble NaN() { return js_NaN; }
static JS_ALWAYS_INLINE jsdouble toSelfType(int32 i) { return i; }
static JS_ALWAYS_INLINE jsdouble toSelfType(jsdouble d) { return d; }
};
template<typename T>
static JS_ALWAYS_INLINE T
StringToNumberType(JSContext *cx, JSString *str)
{
if (str->length() == 1) {
jschar c = str->chars()[0];
if ('0' <= c && c <= '9')
return NumberTraits<T>::toSelfType(int32(c - '0'));
return NumberTraits<T>::NaN();
}
const jschar* bp;
const jschar* end;
const jschar* ep;
jsdouble d;
str->getCharsAndEnd(bp, end);
bp = js_SkipWhiteSpace(bp, end);
/* ECMA doesn't allow signed hex numbers (bug 273467). */
if (end - bp >= 2 && bp[0] == '0' && (bp[1] == 'x' || bp[1] == 'X')) {
/* Looks like a hex number. */
if (!js_strtointeger(cx, bp, end, &ep, 16, &d) ||
js_SkipWhiteSpace(ep, end) != end) {
return NumberTraits<T>::NaN();
}
return NumberTraits<T>::toSelfType(d);
}
/*
* Note that ECMA doesn't treat a string beginning with a '0' as
* an octal number here. This works because all such numbers will
* be interpreted as decimal by js_strtod. Also, any hex numbers
* that have made it here (which can only be negative ones) will
* be treated as 0 without consuming the 'x' by js_strtod.
*/
if (!js_strtod(cx, bp, end, &ep, &d) ||
js_SkipWhiteSpace(ep, end) != end) {
return NumberTraits<T>::NaN();
}
return NumberTraits<T>::toSelfType(d);
}
}
#endif /* jsnum_h___ */

View file

@ -305,7 +305,7 @@ js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj,
JS_LOCK_OBJ(cx, oldproto);
JSScope *scope = OBJ_SCOPE(oldproto);
scope->protoShapeChange(cx);
JSObject *tmp = STOBJ_GET_PROTO(oldproto);
JSObject *tmp = oldproto->getProto();
JS_UNLOCK_OBJ(cx, oldproto);
oldproto = tmp;
}
@ -1192,7 +1192,7 @@ js_CheckScopeChainValidity(JSContext *cx, JSObject *scopeobj, const char *caller
}
}
scopeobj = OBJ_GET_PARENT(cx, scopeobj);
scopeobj = scopeobj->getParent();
}
return inner;
@ -1290,7 +1290,7 @@ obj_eval(JSContext *cx, uintN argc, jsval *vp)
* the former indirect case.
*/
{
JSObject *parent = OBJ_GET_PARENT(cx, obj);
JSObject *parent = obj->getParent();
if (indirectCall || parent) {
uintN flags = parent
? JSREPORT_ERROR
@ -1364,7 +1364,7 @@ obj_eval(JSContext *cx, uintN argc, jsval *vp)
}
/* NB: We know obj is a global object here. */
JS_ASSERT(!OBJ_GET_PARENT(cx, obj));
JS_ASSERT(!obj->getParent());
scopeobj = obj;
} else {
/*
@ -1483,7 +1483,7 @@ obj_eval(JSContext *cx, uintN argc, jsval *vp)
}
}
if (i < 0 ||
STOBJ_GET_PARENT(objarray->vector[i]) == scopeobj) {
objarray->vector[i]->getParent() == scopeobj) {
JS_ASSERT(staticLevel == script->staticLevel);
EVAL_CACHE_METER(hit);
*scriptp = script->u.nextToGC;
@ -1620,7 +1620,7 @@ obj_watch(JSContext *cx, uintN argc, jsval *vp)
return JS_TRUE;
*vp = JSVAL_VOID;
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_MakeArraySlow(cx, obj))
if (obj->isDenseArray() && !js_MakeArraySlow(cx, obj))
return JS_FALSE;
return JS_SetWatchPoint(cx, obj, userid, obj_watch_handler, callable);
}
@ -2529,8 +2529,8 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des
changed |= JSPROP_ENUMERATE;
attrs = (sprop->attrs & ~changed) | (desc.attrs & changed);
getter = sprop->getter;
setter = sprop->setter;
getter = sprop->getter();
setter = sprop->setter();
} else if (desc.isDataDescriptor()) {
uintN unchanged = 0;
if (!desc.hasConfigurable)
@ -2573,11 +2573,11 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des
if (desc.hasGet)
getter = desc.getterObject() ? desc.getter() : JS_PropertyStub;
else
getter = sprop->getter;
getter = sprop->getter();
if (desc.hasSet)
setter = desc.setterObject() ? desc.setter() : JS_PropertyStub;
else
setter = sprop->setter;
setter = sprop->setter();
}
*rval = true;
@ -2596,7 +2596,7 @@ DefinePropertyArray(JSContext *cx, JSObject *obj, const PropertyDescriptor &desc
* attributes). Such definitions are probably unlikely, so we don't bother
* for now.
*/
if (OBJ_IS_DENSE_ARRAY(cx, obj) && !js_MakeArraySlow(cx, obj))
if (obj->isDenseArray() && !js_MakeArraySlow(cx, obj))
return JS_FALSE;
jsuint oldLen = obj->fslots[JSSLOT_ARRAY_LENGTH];
@ -2641,7 +2641,7 @@ static JSBool
DefineProperty(JSContext *cx, JSObject *obj, const PropertyDescriptor &desc, bool throwError,
bool *rval)
{
if (OBJ_IS_ARRAY(cx, obj))
if (obj->isArray())
return DefinePropertyArray(cx, obj, desc, throwError, rval);
if (!OBJ_IS_NATIVE(obj))
@ -2807,11 +2807,14 @@ const char js_lookupSetter_str[] = "__lookupSetter__";
#endif
JS_DEFINE_TRCINFO_1(obj_valueOf,
(3, (static, JSVAL, Object_p_valueOf, CONTEXT, THIS, STRING, 0, 0)))
(3, (static, JSVAL, Object_p_valueOf, CONTEXT, THIS, STRING, 0,
nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(obj_hasOwnProperty,
(3, (static, BOOL_FAIL, Object_p_hasOwnProperty, CONTEXT, THIS, STRING, 0, 0)))
(3, (static, BOOL_FAIL, Object_p_hasOwnProperty, CONTEXT, THIS, STRING, 0,
nanojit::ACC_STORE_ANY)))
JS_DEFINE_TRCINFO_1(obj_propertyIsEnumerable,
(3, (static, BOOL_FAIL, Object_p_propertyIsEnumerable, CONTEXT, THIS, STRING, 0, 0)))
(3, (static, BOOL_FAIL, Object_p_propertyIsEnumerable, CONTEXT, THIS, STRING, 0,
nanojit::ACC_STORE_ANY)))
static JSFunctionSpec object_methods[] = {
#if JS_HAS_TOSOURCE
@ -2853,7 +2856,7 @@ static inline bool
InitScopeForObject(JSContext* cx, JSObject* obj, JSObject* proto, JSObjectOps* ops)
{
JS_ASSERT(ops->isNative());
JS_ASSERT(proto == OBJ_GET_PROTO(cx, obj));
JS_ASSERT(proto == obj->getProto());
/* Share proto's emptyScope only if obj is similar to proto. */
JSClass *clasp = OBJ_GET_CLASS(cx, obj);
@ -2924,8 +2927,10 @@ js_NewObjectWithGivenProto(JSContext *cx, JSClass *clasp, JSObject *proto,
if (clasp == &js_FunctionClass && !objectSize) {
obj = (JSObject*) js_NewGCFunction(cx);
#ifdef DEBUG
memset((uint8 *) obj + sizeof(JSObject), JS_FREE_PATTERN,
sizeof(JSFunction) - sizeof(JSObject));
if (obj) {
memset((uint8 *) obj + sizeof(JSObject), JS_FREE_PATTERN,
sizeof(JSFunction) - sizeof(JSObject));
}
#endif
} else {
JS_ASSERT(!objectSize || objectSize == sizeof(JSObject));
@ -3053,7 +3058,8 @@ js_Object_tn(JSContext* cx, JSObject* proto)
}
JS_DEFINE_TRCINFO_1(js_Object,
(2, (extern, CONSTRUCTOR_RETRY, js_Object_tn, CONTEXT, CALLEE_PROTOTYPE, 0, 0)))
(2, (extern, CONSTRUCTOR_RETRY, js_Object_tn, CONTEXT, CALLEE_PROTOTYPE, 0,
nanojit::ACC_STORE_ANY)))
JSObject* FASTCALL
js_NonEmptyObject(JSContext* cx, JSObject* proto)
@ -3077,7 +3083,8 @@ js_NonEmptyObject(JSContext* cx, JSObject* proto)
return obj;
}
JS_DEFINE_CALLINFO_2(extern, CONSTRUCTOR_RETRY, js_NonEmptyObject, CONTEXT, CALLEE_PROTOTYPE, 0, 0)
JS_DEFINE_CALLINFO_2(extern, CONSTRUCTOR_RETRY, js_NonEmptyObject, CONTEXT, CALLEE_PROTOTYPE, 0,
nanojit::ACC_STORE_ANY)
static inline JSObject*
NewNativeObject(JSContext* cx, JSClass* clasp, JSObject* proto,
@ -3119,7 +3126,7 @@ js_NewInstance(JSContext *cx, JSClass *clasp, JSObject *ctor)
proto = JSVAL_TO_OBJECT(pval);
} else if (pval == JSVAL_HOLE) {
/* No ctor.prototype yet, inline and optimize fun_resolve's prototype code. */
proto = js_NewObject(cx, clasp, NULL, OBJ_GET_PARENT(cx, ctor));
proto = js_NewObject(cx, clasp, NULL, ctor->getParent());
if (!proto)
return NULL;
if (!js_SetClassPrototype(cx, ctor, proto, JSPROP_ENUMERATE | JSPROP_PERMANENT))
@ -3136,7 +3143,8 @@ js_NewInstance(JSContext *cx, JSClass *clasp, JSObject *ctor)
JSObject::defaultPrivate(clasp));
}
JS_DEFINE_CALLINFO_3(extern, CONSTRUCTOR_RETRY, js_NewInstance, CONTEXT, CLASS, OBJECT, 0, 0)
JS_DEFINE_CALLINFO_3(extern, CONSTRUCTOR_RETRY, js_NewInstance, CONTEXT, CLASS, OBJECT, 0,
nanojit::ACC_STORE_ANY)
#else /* !JS_TRACER */
@ -3262,7 +3270,7 @@ with_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
flags = js_InferFlags(cx, flags);
flags |= JSRESOLVE_WITH;
JSAutoResolveFlags rf(cx, flags);
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return js_LookupProperty(cx, obj, id, objp, propp);
return proto->lookupProperty(cx, id, objp, propp);
@ -3271,7 +3279,7 @@ with_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
static JSBool
with_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return js_GetProperty(cx, obj, id, vp);
return proto->getProperty(cx, id, vp);
@ -3280,7 +3288,7 @@ with_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
static JSBool
with_SetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
{
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return js_SetProperty(cx, obj, id, vp);
return proto->setProperty(cx, id, vp);
@ -3290,7 +3298,7 @@ static JSBool
with_GetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
uintN *attrsp)
{
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return js_GetAttributes(cx, obj, id, prop, attrsp);
return proto->getAttributes(cx, id, prop, attrsp);
@ -3300,7 +3308,7 @@ static JSBool
with_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
uintN *attrsp)
{
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return js_SetAttributes(cx, obj, id, prop, attrsp);
return proto->setAttributes(cx, id, prop, attrsp);
@ -3309,7 +3317,7 @@ with_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
static JSBool
with_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval)
{
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return js_DeleteProperty(cx, obj, id, rval);
return proto->deleteProperty(cx, id, rval);
@ -3318,7 +3326,7 @@ with_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval)
static JSBool
with_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp)
{
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return js_DefaultValue(cx, obj, hint, vp);
return proto->defaultValue(cx, hint, vp);
@ -3328,7 +3336,7 @@ static JSBool
with_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op,
jsval *statep, jsid *idp)
{
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return js_Enumerate(cx, obj, enum_op, statep, idp);
return proto->enumerate(cx, enum_op, statep, idp);
@ -3338,7 +3346,7 @@ static JSBool
with_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
jsval *vp, uintN *attrsp)
{
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return js_CheckAccess(cx, obj, id, mode, vp, attrsp);
return proto->checkAccess(cx, id, mode, vp, attrsp);
@ -3353,7 +3361,7 @@ with_TypeOf(JSContext *cx, JSObject *obj)
static JSObject *
with_ThisObject(JSContext *cx, JSObject *obj)
{
JSObject *proto = OBJ_GET_PROTO(cx, obj);
JSObject *proto = obj->getProto();
if (!proto)
return obj;
return proto->thisObject(cx);
@ -3483,7 +3491,7 @@ js_PutBlockObject(JSContext *cx, JSBool normalUnwind)
/* We must clear the private slot even with errors. */
obj->setPrivate(NULL);
fp->scopeChain = OBJ_GET_PARENT(cx, obj);
fp->scopeChain = obj->getParent();
return normalUnwind;
}
@ -3597,7 +3605,7 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp)
if (xdr->mode == JSXDR_ENCODE) {
obj = *objp;
parent = OBJ_GET_PARENT(cx, obj);
parent = obj->getParent();
parentId = (xdr->script->objectsOffset == 0)
? NO_PARENT_INDEX
: FindObjectIndex(xdr->script->objects(), parent);
@ -3628,7 +3636,7 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp)
parent = NULL;
else
parent = xdr->script->getObject(parentId);
STOBJ_SET_PARENT(obj, parent);
obj->setParent(parent);
}
JSAutoTempValueRooter tvr(cx, obj);
@ -3657,7 +3665,7 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp)
sprop = sprop ? sprop->parent : OBJ_SCOPE(obj)->lastProperty();
} while (!sprop->hasShortID());
JS_ASSERT(sprop->getter == block_getProperty);
JS_ASSERT(sprop->getter() == block_getProperty);
propid = sprop->id;
JS_ASSERT(JSID_IS_ATOM(propid));
atom = JSID_TO_ATOM(propid);
@ -3823,7 +3831,7 @@ js_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto,
/* Bootstrap Function.prototype (see also JS_InitStandardClasses). */
if (OBJ_GET_CLASS(cx, ctor) == clasp)
OBJ_SET_PROTO(cx, ctor, proto);
ctor->setProto(proto);
}
/* Add properties and methods to the prototype and the constructor. */
@ -4022,7 +4030,7 @@ js_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
JSObjectOp init;
jsval v;
while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
while ((tmp = obj->getParent()) != NULL)
obj = tmp;
if (!(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL)) {
*objp = NULL;
@ -4068,7 +4076,7 @@ js_GetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key,
JSBool
js_SetClassObject(JSContext *cx, JSObject *obj, JSProtoKey key, JSObject *cobj)
{
JS_ASSERT(!OBJ_GET_PARENT(cx, obj));
JS_ASSERT(!obj->getParent());
if (!(OBJ_GET_CLASS(cx, obj)->flags & JSCLASS_IS_GLOBAL))
return JS_TRUE;
@ -4099,7 +4107,7 @@ js_FindClassObject(JSContext *cx, JSObject *start, JSProtoKey protoKey,
/* Find the topmost object in the scope chain. */
do {
obj = start;
start = OBJ_GET_PARENT(cx, obj);
start = obj->getParent();
} while (start);
} else {
obj = cx->globalObject;
@ -4178,7 +4186,7 @@ js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto,
*/
ctor = JSVAL_TO_OBJECT(cval);
if (!parent)
parent = OBJ_GET_PARENT(cx, ctor);
parent = ctor->getParent();
if (!proto) {
if (!ctor->getProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.classPrototypeAtom),
&rval)) {
@ -4327,7 +4335,7 @@ PurgeProtoChain(JSContext *cx, JSObject *obj, jsid id)
while (obj) {
if (!OBJ_IS_NATIVE(obj)) {
obj = OBJ_GET_PROTO(cx, obj);
obj = obj->getProto();
continue;
}
JS_LOCK_OBJ(cx, obj);
@ -4338,7 +4346,7 @@ PurgeProtoChain(JSContext *cx, JSObject *obj, jsid id)
scope->shadowingShapeChange(cx, sprop);
JS_UNLOCK_SCOPE(cx, scope);
if (!STOBJ_GET_PARENT(obj)) {
if (!obj->getParent()) {
/*
* All scope chains end in a global object, so this will change
* the global shape. jstracer.cpp assumes that the global shape
@ -4367,7 +4375,7 @@ js_PurgeScopeChainHelper(JSContext *cx, JSObject *obj, jsid id)
* may gain such properties via eval introducing new vars; see bug 490364.
*/
if (STOBJ_GET_CLASS(obj) == &js_CallClass) {
while ((obj = OBJ_GET_PARENT(cx, obj)) != NULL) {
while ((obj = obj->getParent()) != NULL) {
if (PurgeProtoChain(cx, obj, id))
break;
}
@ -4499,10 +4507,10 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
JSPROP_GETTER | JSPROP_SETTER,
(attrs & JSPROP_GETTER)
? getter
: sprop->getter,
: sprop->getter(),
(attrs & JSPROP_SETTER)
? setter
: sprop->setter);
: sprop->setter());
/* NB: obj == pobj, so we can share unlock code at the bottom. */
if (!sprop)
@ -4694,7 +4702,7 @@ js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags,
}
protoIndex = 0;
for (proto = start; proto && proto != obj2;
proto = OBJ_GET_PROTO(cx, proto)) {
proto = proto->getProto()) {
protoIndex++;
}
if (!OBJ_IS_NATIVE(obj2)) {
@ -4807,7 +4815,7 @@ js_FindPropertyHelper(JSContext *cx, jsid id, JSBool cacheResult,
/* Scan entries on the scope chain that we can cache across. */
entry = JS_NO_PROP_CACHE_FILL;
obj = scopeChain;
parent = OBJ_GET_PARENT(cx, obj);
parent = obj->getParent();
for (scopeIndex = 0;
parent
? js_IsCacheableNonGlobalScope(obj)
@ -4834,7 +4842,7 @@ js_FindPropertyHelper(JSContext *cx, jsid id, JSBool cacheResult,
JS_ASSERT(protoIndex == 1);
} else {
/* Call and DeclEnvClass objects have no prototypes. */
JS_ASSERT(!OBJ_GET_PROTO(cx, obj));
JS_ASSERT(!obj->getProto());
JS_ASSERT(protoIndex == 0);
}
}
@ -4853,7 +4861,7 @@ js_FindPropertyHelper(JSContext *cx, jsid id, JSBool cacheResult,
goto out;
}
obj = parent;
parent = OBJ_GET_PARENT(cx, obj);
parent = obj->getParent();
}
for (;;) {
@ -4868,7 +4876,7 @@ js_FindPropertyHelper(JSContext *cx, jsid id, JSBool cacheResult,
* We conservatively assume that a resolve hook could mutate the scope
* chain during JSObject::lookupProperty. So we read parent here again.
*/
parent = OBJ_GET_PARENT(cx, obj);
parent = obj->getParent();
if (!parent) {
pobj = NULL;
break;
@ -4898,7 +4906,7 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id)
* This function should not be called for a global object or from the
* trace and should have a valid cache entry for native scopeChain.
*/
JS_ASSERT(OBJ_GET_PARENT(cx, scopeChain));
JS_ASSERT(scopeChain->getParent());
JS_ASSERT(!JS_ON_TRACE(cx));
JSObject *obj = scopeChain;
@ -4932,8 +4940,8 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id)
}
/* Call and other cacheable objects always have a parent. */
obj = OBJ_GET_PARENT(cx, obj);
if (!OBJ_GET_PARENT(cx, obj))
obj = obj->getParent();
if (!obj->getParent())
return obj;
}
@ -4953,11 +4961,11 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id)
* chain during JSObject::lookupProperty. So we must check if parent is
* not null here even if it wasn't before the lookup.
*/
JSObject *parent = OBJ_GET_PARENT(cx, obj);
JSObject *parent = obj->getParent();
if (!parent)
break;
obj = parent;
} while (OBJ_GET_PARENT(cx, obj));
} while (obj->getParent());
return obj;
}
@ -5091,7 +5099,7 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow,
/* Convert string indices to integers if appropriate. */
id = js_CheckForStringIndex(id);
aobj = js_GetProtoIfDenseArray(cx, obj);
aobj = js_GetProtoIfDenseArray(obj);
protoIndex = js_LookupPropertyWithFlags(cx, aobj, id, cx->resolveFlags,
&obj2, &prop);
if (protoIndex < 0)
@ -5191,7 +5199,7 @@ js_GetMethod(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp)
obj->map->ops->getProperty == js_GetProperty) {
return js_GetPropertyHelper(cx, obj, id, getHow, vp);
}
JS_ASSERT_IF(getHow & JSGET_CACHE_RESULT, OBJ_IS_DENSE_ARRAY(cx, obj));
JS_ASSERT_IF(getHow & JSGET_CACHE_RESULT, obj->isDenseArray());
#if JS_HAS_XML_SUPPORT
if (OBJECT_IS_XML(cx, obj))
return js_GetXMLMethod(cx, obj, id, vp);
@ -5279,7 +5287,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow,
/* We should never add properties to lexical blocks. */
JS_ASSERT(OBJ_GET_CLASS(cx, obj) != &js_BlockClass);
if (!OBJ_GET_PARENT(cx, obj) && !js_CheckUndeclaredVarAssignment(cx))
if (!obj->getParent() && !js_CheckUndeclaredVarAssignment(cx))
return JS_FALSE;
}
sprop = (JSScopeProperty *) prop;
@ -5382,8 +5390,8 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow,
if (sprop->hasShortID()) {
flags = JSScopeProperty::HAS_SHORTID;
shortid = sprop->shortid;
getter = sprop->getter;
setter = sprop->setter;
getter = sprop->getter();
setter = sprop->setter();
}
/*
@ -5531,7 +5539,7 @@ js_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop,
}
sprop = (JSScopeProperty *)prop;
sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop, *attrsp, 0,
sprop->getter, sprop->setter);
sprop->getter(), sprop->setter());
if (noprop)
obj->dropProperty(cx, prop);
return (sprop != NULL);
@ -5981,14 +5989,14 @@ js_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode,
case JSACC_PROTO:
pobj = obj;
if (!writing)
*vp = OBJECT_TO_JSVAL(OBJ_GET_PROTO(cx, obj));
*vp = OBJECT_TO_JSVAL(obj->getProto());
*attrsp = JSPROP_PERMANENT;
break;
case JSACC_PARENT:
JS_ASSERT(!writing);
pobj = obj;
*vp = OBJECT_TO_JSVAL(OBJ_GET_PARENT(cx, obj));
*vp = OBJECT_TO_JSVAL(obj->getParent());
*attrsp = JSPROP_READONLY | JSPROP_PERMANENT;
break;
@ -6099,7 +6107,7 @@ GetCurrentExecutionContext(JSContext *cx, JSObject *obj, jsval *rval)
JSObject *tmp;
jsval xcval;
while ((tmp = OBJ_GET_PARENT(cx, obj)) != NULL)
while ((tmp = obj->getParent()) != NULL)
obj = tmp;
if (!obj->getProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.ExecutionContextAtom), &xcval))
return JS_FALSE;
@ -6229,7 +6237,7 @@ js_IsDelegate(JSContext *cx, JSObject *obj, jsval v, JSBool *bp)
if (JSVAL_IS_PRIMITIVE(v))
return JS_TRUE;
obj2 = js_GetWrappedObject(cx, JSVAL_TO_OBJECT(v));
while ((obj2 = OBJ_GET_PROTO(cx, obj2)) != NULL) {
while ((obj2 = obj2->getProto()) != NULL) {
if (obj2 == obj) {
*bp = JS_TRUE;
break;
@ -6835,7 +6843,7 @@ JSObject::isCallable()
if (isNative())
return isFunction() || getClass()->call;
return map->ops->call;
return !!map->ops->call;
}
JSBool
@ -7039,11 +7047,11 @@ js_DumpObject(JSObject *obj)
}
fprintf(stderr, "proto ");
dumpValue(OBJECT_TO_JSVAL(STOBJ_GET_PROTO(obj)));
dumpValue(OBJECT_TO_JSVAL(obj->getProto()));
fputc('\n', stderr);
fprintf(stderr, "parent ");
dumpValue(OBJECT_TO_JSVAL(STOBJ_GET_PARENT(obj)));
dumpValue(OBJECT_TO_JSVAL(obj->getParent()));
fputc('\n', stderr);
i = JSSLOT_PRIVATE;

View file

@ -188,6 +188,11 @@ struct JSObjectMap {
explicit JSObjectMap(const JSObjectOps *ops, uint32 shape) : ops(ops), shape(shape) {}
enum { SHAPELESS = 0xffffffff };
private:
/* No copy or assignment semantics. */
JSObjectMap(JSObjectMap &);
void operator=(JSObjectMap &);
};
const uint32 JS_INITIAL_NSLOTS = 5;
@ -362,7 +367,7 @@ struct JSObject {
inline void initSharingEmptyScope(JSClass *clasp, JSObject *proto, JSObject *parent,
jsval privateSlotValue);
inline bool hasSlotsArray() const { return dslots; }
inline bool hasSlotsArray() const { return !!dslots; }
/* This method can only be called when hasSlotsArray() returns true. */
inline void freeSlotsArray(JSContext *cx);
@ -438,25 +443,9 @@ struct JSObject {
inline bool unbrand(JSContext *cx);
};
/* Compatibility macros. */
/* Compatibility macro. */
#define OBJ_IS_NATIVE(obj) ((obj)->isNative())
#define STOBJ_GET_PROTO(obj) ((obj)->getProto())
#define STOBJ_SET_PROTO(obj,proto) ((obj)->setProto(proto))
#define STOBJ_CLEAR_PROTO(obj) ((obj)->clearProto())
#define STOBJ_GET_PARENT(obj) ((obj)->getParent())
#define STOBJ_SET_PARENT(obj,parent) ((obj)->setParent(parent))
#define STOBJ_CLEAR_PARENT(obj) ((obj)->clearParent())
#define OBJ_GET_PROTO(cx,obj) STOBJ_GET_PROTO(obj)
#define OBJ_SET_PROTO(cx,obj,proto) STOBJ_SET_PROTO(obj, proto)
#define OBJ_CLEAR_PROTO(cx,obj) STOBJ_CLEAR_PROTO(obj)
#define OBJ_GET_PARENT(cx,obj) STOBJ_GET_PARENT(obj)
#define OBJ_SET_PARENT(cx,obj,parent) STOBJ_SET_PARENT(obj, parent)
#define OBJ_CLEAR_PARENT(cx,obj) STOBJ_CLEAR_PARENT(obj)
#define JSSLOT_START(clasp) (((clasp)->flags & JSCLASS_HAS_PRIVATE) \
? JSSLOT_PRIVATE + 1 \
: JSSLOT_PRIVATE)
@ -892,7 +881,7 @@ js_IsCacheableNonGlobalScope(JSObject *obj)
{
extern JS_FRIEND_DATA(JSClass) js_CallClass;
extern JS_FRIEND_DATA(JSClass) js_DeclEnvClass;
JS_ASSERT(STOBJ_GET_PARENT(obj));
JS_ASSERT(obj->getParent());
JSClass *clasp = STOBJ_GET_CLASS(obj);
bool cacheable = (clasp == &js_CallClass ||

View file

@ -369,7 +369,7 @@ JO(JSContext *cx, jsval *vp, StringifyContext *scx)
if (iterObj) {
// Always close the iterator, but make sure not to stomp on OK
JS_ASSERT(OBJECT_TO_JSVAL(iterObj) == *keySource);
ok &= js_CloseIterator(cx, *keySource);
ok &= !!js_CloseIterator(cx, *keySource);
}
if (!ok)
@ -590,7 +590,7 @@ Walk(JSContext *cx, jsid id, JSObject *holder, jsval reviver, jsval *vp)
jsval propValue = JSVAL_NULL;
JSAutoTempValueRooter tvr(cx, 1, &propValue);
if(OBJ_IS_ARRAY(cx, obj)) {
if(obj->isArray()) {
jsuint length = 0;
if (!js_GetLengthProperty(cx, obj, &length))
return JS_FALSE;
@ -778,7 +778,7 @@ static JSBool
PushValue(JSContext *cx, JSONParser *jp, JSObject *parent, jsval value)
{
JSBool ok;
if (OBJ_IS_ARRAY(cx, parent)) {
if (parent->isArray()) {
jsuint len;
ok = js_GetLengthProperty(cx, parent, &len);
if (ok) {

View file

@ -261,6 +261,12 @@ extern uintN js_NumCodeSpecs;
extern const char *js_CodeName[];
extern const char js_EscapeMap[];
/* Silence unreferenced formal parameter warnings */
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4100)
#endif
/*
* Return a GC'ed string containing the chars in str, with any non-printing
* chars or quotes (' or " as specified by the quote argument) escaped, and
@ -464,6 +470,10 @@ js_DecompileValueGenerator(JSContext *cx, intN spindex, jsval v,
extern uintN
js_ReconstructStackDepth(JSContext *cx, JSScript *script, jsbytecode *pc);
#ifdef _MSC_VER
#pragma warning(pop)
#endif
JS_END_EXTERN_C
#endif /* jsopcode_h___ */

View file

@ -144,7 +144,7 @@ BEGIN_CASE(JSOP_POPN)
JS_ASSERT_IF(obj,
OBJ_BLOCK_DEPTH(cx, obj) + OBJ_BLOCK_COUNT(cx, obj)
<= (size_t) (regs.sp - StackBase(fp)));
for (obj = fp->scopeChain; obj; obj = OBJ_GET_PARENT(cx, obj)) {
for (obj = fp->scopeChain; obj; obj = obj->getParent()) {
clasp = OBJ_GET_CLASS(cx, obj);
if (clasp != &js_BlockClass && clasp != &js_WithClass)
continue;
@ -714,7 +714,7 @@ BEGIN_CASE(JSOP_BINDNAME)
* forms.
*/
obj = fp->scopeChain;
if (!OBJ_GET_PARENT(cx, obj))
if (!obj->getParent())
break;
if (JS_LIKELY(OBJ_IS_NATIVE(obj))) {
PROPERTY_CACHE_TEST(cx, regs.pc, obj, obj2, entry, atom);
@ -1490,7 +1490,7 @@ BEGIN_CASE(JSOP_GETXPROP)
* assuming any property gets it does (e.g., for 'toString'
* from JSOP_NEW) will not be leaked to the calling script.
*/
aobj = js_GetProtoIfDenseArray(cx, obj);
aobj = js_GetProtoIfDenseArray(obj);
if (JS_LIKELY(aobj->map->ops->getProperty == js_GetProperty)) {
PROPERTY_CACHE_TEST(cx, regs.pc, aobj, obj2, entry, atom);
if (!atom) {
@ -1540,7 +1540,7 @@ BEGIN_CASE(JSOP_LENGTH)
str = JSVAL_TO_STRING(lval);
regs.sp[-1] = INT_TO_JSVAL(str->length());
} else if (!JSVAL_IS_PRIMITIVE(lval) &&
(obj = JSVAL_TO_OBJECT(lval), OBJ_IS_ARRAY(cx, obj))) {
(obj = JSVAL_TO_OBJECT(lval), obj->isArray())) {
jsuint length;
/*
@ -1586,7 +1586,7 @@ BEGIN_CASE(JSOP_CALLPROP)
goto error;
}
aobj = js_GetProtoIfDenseArray(cx, obj);
aobj = js_GetProtoIfDenseArray(obj);
if (JS_LIKELY(aobj->map->ops->getProperty == js_GetProperty)) {
PROPERTY_CACHE_TEST(cx, regs.pc, aobj, obj2, entry, atom);
if (!atom) {
@ -1731,7 +1731,7 @@ BEGIN_CASE(JSOP_SETMETHOD)
bool checkForAdd;
if (sprop->attrs & JSPROP_SHARED) {
if (PCVCAP_TAG(entry->vcap) == 0 ||
((obj2 = OBJ_GET_PROTO(cx, obj)) &&
((obj2 = obj->getProto()) &&
OBJ_IS_NATIVE(obj2) &&
OBJ_SHAPE(obj2) == PCVCAP_SHAPE(entry->vcap))) {
goto fast_set_propcache_hit;
@ -1809,7 +1809,7 @@ BEGIN_CASE(JSOP_SETMETHOD)
if (slot != sprop->slot || scope->table) {
JSScopeProperty *sprop2 =
scope->putProperty(cx, sprop->id,
sprop->getter, sprop->setter,
sprop->getter(), sprop->setter(),
slot, sprop->attrs,
sprop->getFlags(), sprop->shortid);
if (!sprop2) {
@ -1897,7 +1897,7 @@ BEGIN_CASE(JSOP_GETELEM)
VALUE_TO_OBJECT(cx, -2, lval, obj);
if (JSVAL_IS_INT(rval)) {
if (OBJ_IS_DENSE_ARRAY(cx, obj)) {
if (obj->isDenseArray()) {
jsuint length;
length = js_DenseArrayCapacity(obj);
@ -1946,7 +1946,7 @@ BEGIN_CASE(JSOP_SETELEM)
FETCH_OBJECT(cx, -3, lval, obj);
FETCH_ELEMENT_ID(obj, -2, id);
do {
if (OBJ_IS_DENSE_ARRAY(cx, obj) && JSID_IS_INT(id)) {
if (obj->isDenseArray() && JSID_IS_INT(id)) {
jsuint length;
length = js_DenseArrayCapacity(obj);
@ -2003,7 +2003,7 @@ BEGIN_CASE(JSOP_NEW)
rval = vp[1];
obj2 = js_NewObject(cx, &js_ObjectClass,
JSVAL_IS_OBJECT(rval) ? JSVAL_TO_OBJECT(rval) : NULL,
OBJ_GET_PARENT(cx, obj));
obj->getParent());
if (!obj2)
goto error;
@ -2133,7 +2133,7 @@ BEGIN_CASE(JSOP_APPLY)
newifp->frame.rval = JSVAL_VOID;
newifp->frame.down = fp;
newifp->frame.annotation = NULL;
newifp->frame.scopeChain = parent = OBJ_GET_PARENT(cx, obj);
newifp->frame.scopeChain = parent = obj->getParent();
newifp->frame.flags = flags;
newifp->frame.blockChain = NULL;
if (script->staticLevel < JS_DISPLAY_SIZE) {
@ -2919,7 +2919,7 @@ BEGIN_CASE(JSOP_DEFFUN)
* windows, and user-defined JS functions precompiled and then shared among
* requests in server-side JS.
*/
if (OBJ_GET_PARENT(cx, obj) != obj2) {
if (obj->getParent() != obj2) {
obj = CloneFunctionObject(cx, fun, obj2);
if (!obj)
goto error;
@ -3091,7 +3091,7 @@ BEGIN_CASE(JSOP_DEFLOCALFUN)
if (!parent)
goto error;
if (OBJ_GET_PARENT(cx, obj) != parent) {
if (obj->getParent() != parent) {
#ifdef JS_TRACER
if (TRACE_RECORDER(cx))
AbortRecording(cx, "DEFLOCALFUN for closure");
@ -3142,7 +3142,7 @@ BEGIN_CASE(JSOP_LAMBDA)
if (FUN_NULL_CLOSURE(fun)) {
parent = fp->scopeChain;
if (OBJ_GET_PARENT(cx, obj) == parent) {
if (obj->getParent() == parent) {
op = JSOp(regs.pc[JSOP_LAMBDA_LENGTH]);
/*
@ -3451,7 +3451,7 @@ BEGIN_CASE(JSOP_INITMETHOD)
scope->shape == scope->lastProperty()->shape);
if (scope->table) {
JSScopeProperty *sprop2 =
scope->addProperty(cx, sprop->id, sprop->getter, sprop->setter, slot,
scope->addProperty(cx, sprop->id, sprop->getter(), sprop->setter(), slot,
sprop->attrs, sprop->getFlags(), sprop->shortid);
if (!sprop2) {
js_FreeSlot(cx, obj, slot);
@ -3528,7 +3528,7 @@ BEGIN_CASE(JSOP_INITELEM)
* initialiser, set the array length to one greater than id.
*/
if (rval == JSVAL_HOLE) {
JS_ASSERT(OBJ_IS_ARRAY(cx, obj));
JS_ASSERT(obj->isArray());
JS_ASSERT(JSID_IS_INT(id));
JS_ASSERT(jsuint(JSID_TO_INT(id)) < JS_ARGS_LENGTH_MAX);
if (js_GetOpcode(cx, script, regs.pc + JSOP_INITELEM_LENGTH) == JSOP_ENDINIT &&
@ -4012,7 +4012,7 @@ BEGIN_CASE(JSOP_ENTERBLOCK)
}
#ifdef DEBUG
JS_ASSERT(fp->blockChain == OBJ_GET_PARENT(cx, obj));
JS_ASSERT(fp->blockChain == obj->getParent());
/*
* The young end of fp->scopeChain may omit blocks if we haven't closed
@ -4022,14 +4022,14 @@ BEGIN_CASE(JSOP_ENTERBLOCK)
* static scope.
*/
obj2 = fp->scopeChain;
while ((clasp = OBJ_GET_CLASS(cx, obj2)) == &js_WithClass)
obj2 = OBJ_GET_PARENT(cx, obj2);
while ((clasp = obj2->getClass()) == &js_WithClass)
obj2 = obj2->getParent();
if (clasp == &js_BlockClass &&
obj2->getPrivate() == fp) {
JSObject *youngestProto = OBJ_GET_PROTO(cx, obj2);
JSObject *youngestProto = obj2->getProto();
JS_ASSERT(!OBJ_IS_CLONED_BLOCK(youngestProto));
parent = obj;
while ((parent = OBJ_GET_PARENT(cx, parent)) != youngestProto)
while ((parent = parent->getParent()) != youngestProto)
JS_ASSERT(parent);
}
#endif
@ -4052,14 +4052,14 @@ BEGIN_CASE(JSOP_LEAVEBLOCK)
* the stack into the clone, and pop it off the chain.
*/
obj = fp->scopeChain;
if (OBJ_GET_PROTO(cx, obj) == fp->blockChain) {
if (obj->getProto() == fp->blockChain) {
JS_ASSERT (OBJ_GET_CLASS(cx, obj) == &js_BlockClass);
if (!js_PutBlockObject(cx, JS_TRUE))
goto error;
}
/* Pop the block chain, too. */
fp->blockChain = OBJ_GET_PARENT(cx, fp->blockChain);
fp->blockChain = fp->blockChain->getParent();
/* Move the result of the expression to the new topmost stack slot. */
if (op == JSOP_LEAVEBLOCKEXPR)

View file

@ -1056,14 +1056,14 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal
JS_DumpArenaStats(stdout);
#endif
script = js_NewScriptFromCG(cx, &cg);
if (script && funbox)
if (script && funbox && script != script->emptyScript())
script->savedCallerFun = true;
#ifdef JS_SCOPE_DEPTH_METER
if (script) {
JSObject *obj = scopeChain;
uintN depth = 1;
while ((obj = OBJ_GET_PARENT(cx, obj)) != NULL)
while ((obj = obj->getParent()) != NULL)
++depth;
JS_BASIC_STATS_ACCUM(&cx->runtime->hostenvScopeDepthStats, depth);
}
@ -1825,8 +1825,8 @@ JSCompiler::newFunction(JSTreeContext *tc, JSAtom *atom, uintN lambda)
parent, atom);
if (fun && !(tc->flags & TCF_COMPILE_N_GO)) {
STOBJ_CLEAR_PARENT(FUN_OBJECT(fun));
STOBJ_CLEAR_PROTO(FUN_OBJECT(fun));
FUN_OBJECT(fun)->clearParent();
FUN_OBJECT(fun)->clearProto();
}
return fun;
}
@ -5601,7 +5601,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc)
JS_SCOPE_DEPTH_METERING(++tc->scopeDepth > tc->maxScopeDepth &&
(tc->maxScopeDepth = tc->scopeDepth));
STOBJ_SET_PARENT(obj, tc->blockChain);
obj->setParent(tc->blockChain);
tc->blockChain = obj;
stmt->blockObj = obj;
@ -8605,8 +8605,8 @@ PrimaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
if (!obj)
return NULL;
if (!(tc->flags & TCF_COMPILE_N_GO)) {
STOBJ_CLEAR_PARENT(obj);
STOBJ_CLEAR_PROTO(obj);
obj->clearParent();
obj->clearProto();
}
pn->pn_objbox = tc->compiler->newObjectBox(obj);

View file

@ -38,12 +38,6 @@
#include "jsversion.h"
#if JS_HAS_SCRIPT_OBJECT
# define SCRIPT_INIT js_InitScriptClass
#else
# define SCRIPT_INIT js_InitNullClass
#endif
#if JS_HAS_XML_SUPPORT
# define XML_INIT js_InitXMLClass
# define NAMESPACE_INIT js_InitNamespaceClass
@ -66,12 +60,6 @@
# define GENERATOR_INIT js_InitNullClass
#endif
#if JS_HAS_FILE_OBJECT
# define FILE_INIT js_InitFileClass
#else
# define FILE_INIT js_InitNullClass
#endif
/*
* Enumerator codes in the second column must not change -- they are part of
* the JS XDR API. Client modules including jsproto.tbl should consider
@ -88,41 +76,36 @@ JS_PROTO(Math, 7, js_InitMathClass)
JS_PROTO(Number, 8, js_InitNumberClass)
JS_PROTO(String, 9, js_InitStringClass)
JS_PROTO(RegExp, 10, js_InitRegExpClass)
JS_PROTO(Script, 11, SCRIPT_INIT)
JS_PROTO(XML, 12, XML_INIT)
JS_PROTO(Namespace, 13, NAMESPACE_INIT)
JS_PROTO(QName, 14, QNAME_INIT)
JS_PROTO(AnyName, 15, ANYNAME_INIT)
JS_PROTO(AttributeName, 16, ATTRIBUTE_INIT)
JS_PROTO(Error, 17, js_InitExceptionClasses)
JS_PROTO(InternalError, 18, js_InitExceptionClasses)
JS_PROTO(EvalError, 19, js_InitExceptionClasses)
JS_PROTO(RangeError, 20, js_InitExceptionClasses)
JS_PROTO(ReferenceError, 21, js_InitExceptionClasses)
JS_PROTO(SyntaxError, 22, js_InitExceptionClasses)
JS_PROTO(TypeError, 23, js_InitExceptionClasses)
JS_PROTO(URIError, 24, js_InitExceptionClasses)
JS_PROTO(Generator, 25, GENERATOR_INIT)
JS_PROTO(Iterator, 26, js_InitIteratorClasses)
JS_PROTO(StopIteration, 27, js_InitIteratorClasses)
JS_PROTO(File, 28, FILE_INIT)
JS_PROTO(XML, 11, XML_INIT)
JS_PROTO(Namespace, 12, NAMESPACE_INIT)
JS_PROTO(QName, 13, QNAME_INIT)
JS_PROTO(AnyName, 14, ANYNAME_INIT)
JS_PROTO(AttributeName, 15, ATTRIBUTE_INIT)
JS_PROTO(Error, 16, js_InitExceptionClasses)
JS_PROTO(InternalError, 17, js_InitExceptionClasses)
JS_PROTO(EvalError, 18, js_InitExceptionClasses)
JS_PROTO(RangeError, 19, js_InitExceptionClasses)
JS_PROTO(ReferenceError, 20, js_InitExceptionClasses)
JS_PROTO(SyntaxError, 21, js_InitExceptionClasses)
JS_PROTO(TypeError, 22, js_InitExceptionClasses)
JS_PROTO(URIError, 23, js_InitExceptionClasses)
JS_PROTO(Generator, 24, GENERATOR_INIT)
JS_PROTO(Iterator, 25, js_InitIteratorClasses)
JS_PROTO(StopIteration, 26, js_InitIteratorClasses)
JS_PROTO(ArrayBuffer, 27, js_InitTypedArrayClasses)
JS_PROTO(Int8Array, 28, js_InitTypedArrayClasses)
JS_PROTO(Uint8Array, 29, js_InitTypedArrayClasses)
JS_PROTO(Int16Array, 30, js_InitTypedArrayClasses)
JS_PROTO(Uint16Array, 31, js_InitTypedArrayClasses)
JS_PROTO(Int32Array, 32, js_InitTypedArrayClasses)
JS_PROTO(Uint32Array, 33, js_InitTypedArrayClasses)
JS_PROTO(Float32Array, 34, js_InitTypedArrayClasses)
JS_PROTO(Float64Array, 35, js_InitTypedArrayClasses)
JS_PROTO(Uint8ClampedArray, 36, js_InitTypedArrayClasses)
JS_PROTO(ArrayBuffer, 29, js_InitTypedArrayClasses)
JS_PROTO(Int8Array, 30, js_InitTypedArrayClasses)
JS_PROTO(Uint8Array, 31, js_InitTypedArrayClasses)
JS_PROTO(Int16Array, 32, js_InitTypedArrayClasses)
JS_PROTO(Uint16Array, 33, js_InitTypedArrayClasses)
JS_PROTO(Int32Array, 34, js_InitTypedArrayClasses)
JS_PROTO(Uint32Array, 35, js_InitTypedArrayClasses)
JS_PROTO(Float32Array, 36, js_InitTypedArrayClasses)
JS_PROTO(Float64Array, 37, js_InitTypedArrayClasses)
JS_PROTO(Uint8ClampedArray, 38, js_InitTypedArrayClasses)
#undef SCRIPT_INIT
#undef XML_INIT
#undef NAMESPACE_INIT
#undef QNAME_INIT
#undef ANYNAME_INIT
#undef ATTRIBUTE_INIT
#undef GENERATOR_INIT
#undef FILE_INIT

View file

@ -147,7 +147,7 @@ extern "C++" {
namespace js {
class TraceRecorder;
class TraceMonitor;
struct TraceMonitor;
class CallStack;
class ContextAllocPolicy;

View file

@ -156,9 +156,20 @@ TraceRecorder::downSnapshot(FrameInfo* downFrame)
unsigned exitTypeMapLen = downPostSlots + 1 + ngslots;
TraceType* exitTypeMap = (TraceType*)alloca(sizeof(TraceType) * exitTypeMapLen);
TraceType* typeMap = downFrame->get_typemap();
/* Add stack slots. */
for (unsigned i = 0; i < downPostSlots; i++)
exitTypeMap[i] = typeMap[i];
exitTypeMap[downPostSlots] = determineSlotType(&stackval(-1));
/* Add the return type. */
JS_ASSERT_IF(*cx->fp->regs->pc != JSOP_RETURN, *cx->fp->regs->pc == JSOP_STOP);
if (*cx->fp->regs->pc == JSOP_RETURN)
exitTypeMap[downPostSlots] = determineSlotType(&stackval(-1));
else
exitTypeMap[downPostSlots] = TT_PSEUDOBOOLEAN;
/* Add global types. */
determineGlobalTypes(&exitTypeMap[downPostSlots + 1]);
VMSideExit* exit = (VMSideExit*)
@ -236,7 +247,7 @@ TraceRecorder::upRecursion()
*/
fi->spdist = cx->fp->down->regs->sp - cx->fp->down->slots;
JS_ASSERT(cx->fp->argc == cx->fp->down->argc);
fi->set_argc(cx->fp->argc, false);
fi->set_argc(uint16(cx->fp->argc), false);
fi->callerHeight = downPostSlots;
fi->callerArgc = cx->fp->down->argc;
@ -246,9 +257,9 @@ TraceRecorder::upRecursion()
* This is always safe because this point is only reached on simple "call myself"
* recursive functions.
*/
#if defined DEBUG
#if defined DEBUG
AssertDownFrameIsConsistent(cx, anchor, fi);
#endif
#endif
fi = anchor->recursive_down;
} else if (recursive_pc != fragment->root->ip) {
/*
@ -296,12 +307,19 @@ TraceRecorder::upRecursion()
*/
exit = downSnapshot(fi);
LIns* rval_ins = (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) ?
get(&stackval(-1)) :
NULL;
JS_ASSERT(rval_ins != NULL);
LIns* rval_ins;
if (*cx->fp->regs->pc == JSOP_RETURN) {
rval_ins = (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) ?
get(&stackval(-1)) :
NULL;
JS_ASSERT(rval_ins);
} else {
rval_ins = INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID));
}
TraceType returnType = exit->stackTypeMap()[downPostSlots];
if (returnType == TT_INT32) {
JS_ASSERT(*cx->fp->regs->pc == JSOP_RETURN);
JS_ASSERT(determineSlotType(&stackval(-1)) == TT_INT32);
JS_ASSERT(isPromoteInt(rval_ins));
rval_ins = demote(lir, rval_ins);
@ -310,7 +328,10 @@ TraceRecorder::upRecursion()
UpRecursiveSlotMap slotMap(*this, downPostSlots, rval_ins);
for (unsigned i = 0; i < downPostSlots; i++)
slotMap.addSlot(exit->stackType(i));
slotMap.addSlot(&stackval(-1));
if (*cx->fp->regs->pc == JSOP_RETURN)
slotMap.addSlot(&stackval(-1));
else
slotMap.addSlot(TT_PSEUDOBOOLEAN);
VisitGlobalSlots(slotMap, cx, *tree->globalSlots);
if (recursive_pc == (jsbytecode*)fragment->root->ip) {
debug_only_print0(LC_TMTracer, "Compiling up-recursive loop...\n");
@ -447,10 +468,15 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
cx->fp->regs->pc = exit->pc;
CaptureStackTypes(cx, frameDepth, typeMap);
cx->fp->regs->pc = oldpc;
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT)
typeMap[downPostSlots] = determineSlotType(&stackval(-1));
else
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
JS_ASSERT_IF(*cx->fp->regs->pc != JSOP_RETURN, *cx->fp->regs->pc == JSOP_STOP);
if (*cx->fp->regs->pc == JSOP_RETURN)
typeMap[downPostSlots] = determineSlotType(&stackval(-1));
else
typeMap[downPostSlots] = TT_PSEUDOBOOLEAN;
} else {
typeMap[downPostSlots] = anchor->stackTypeMap()[anchor->numStackSlots - 1];
}
determineGlobalTypes(&typeMap[exit->numStackSlots]);
#if defined JS_JIT_SPEW
TreevisLogExit(cx, exit);
@ -466,39 +492,43 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
* grabbed safely.
*/
LIns* rval_ins;
TraceType returnType = exit->stackTypeMap()[downPostSlots];
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
rval_ins = get(&stackval(-1));
if (returnType == TT_INT32) {
JS_ASSERT(determineSlotType(&stackval(-1)) == TT_INT32);
JS_ASSERT(isPromoteInt(rval_ins));
rval_ins = demote(lir, rval_ins);
if (*cx->fp->regs->pc == JSOP_RETURN) {
TraceType returnType = exit->stackTypeMap()[downPostSlots];
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
rval_ins = get(&stackval(-1));
if (returnType == TT_INT32) {
JS_ASSERT(determineSlotType(&stackval(-1)) == TT_INT32);
JS_ASSERT(isPromoteInt(rval_ins));
rval_ins = demote(lir, rval_ins);
}
/*
* The return value must be written out early, before slurping can fail,
* otherwise it will not be available when there's a type mismatch.
*/
lir->insStorei(rval_ins, lirbuf->sp, exit->sp_adj - sizeof(double));
} else {
switch (returnType)
{
case TT_PSEUDOBOOLEAN:
case TT_INT32:
rval_ins = lir->insLoad(LIR_ld, lirbuf->sp, exit->sp_adj - sizeof(double));
break;
case TT_DOUBLE:
rval_ins = lir->insLoad(LIR_ldf, lirbuf->sp, exit->sp_adj - sizeof(double));
break;
case TT_FUNCTION:
case TT_OBJECT:
case TT_STRING:
case TT_NULL:
rval_ins = lir->insLoad(LIR_ldp, lirbuf->sp, exit->sp_adj - sizeof(double));
break;
default:
JS_NOT_REACHED("unknown type");
RETURN_STOP_A("unknown type");
}
}
/*
* The return value must be written out early, before slurping can fail,
* otherwise it will not be available when there's a type mismatch.
*/
lir->insStorei(rval_ins, lirbuf->sp, exit->sp_adj - sizeof(double));
} else {
switch (returnType)
{
case TT_PSEUDOBOOLEAN:
case TT_INT32:
rval_ins = lir->insLoad(LIR_ld, lirbuf->sp, exit->sp_adj - sizeof(double));
break;
case TT_DOUBLE:
rval_ins = lir->insLoad(LIR_ldf, lirbuf->sp, exit->sp_adj - sizeof(double));
break;
case TT_FUNCTION:
case TT_OBJECT:
case TT_STRING:
case TT_NULL:
rval_ins = lir->insLoad(LIR_ldp, lirbuf->sp, exit->sp_adj - sizeof(double));
break;
default:
JS_NOT_REACHED("unknown type");
RETURN_STOP_A("unknown type");
}
rval_ins = INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID));
}
/* Slurp */
@ -526,7 +556,7 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
&info);
/* scopeChain */
slurpSlot(addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, scopeChain)), "scopeChain"),
(jsval*) &fp->scopeChain,
&fp->scopeChainVal,
&info);
/* vars */
LIns* slots_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, slots)),
@ -560,7 +590,10 @@ TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
RecursiveSlotMap slotMap(*this, downPostSlots, rval_ins);
for (unsigned i = 0; i < downPostSlots; i++)
slotMap.addSlot(typeMap[i]);
slotMap.addSlot(&stackval(-1), typeMap[downPostSlots]);
if (*cx->fp->regs->pc == JSOP_RETURN)
slotMap.addSlot(&stackval(-1), typeMap[downPostSlots]);
else
slotMap.addSlot(TT_PSEUDOBOOLEAN);
VisitGlobalSlots(slotMap, cx, *tree->globalSlots);
debug_only_print0(LC_TMTracer, "Compiling up-recursive slurp...\n");
exit = copy(exit);

View file

@ -502,7 +502,7 @@ ProcessOp(CompilerState *state, REOpData *opData, RENode **operandStack,
(state->flags & JSREG_FOLD) == 0) {
result->op = REOP_ALTPREREQ2;
result->u.altprereq.ch1 = ((RENode *) result->u.kid2)->u.flat.chr;
result->u.altprereq.ch2 = ((RENode *) result->kid)->u.ucclass.index;
result->u.altprereq.ch2 = jschar(((RENode *) result->kid)->u.ucclass.index);
/* ALTPREREQ2, <end>, uch1, uch2, <next>, ...,
JUMP, <end> ... ENDALT */
state->progLength += 13;
@ -515,7 +515,7 @@ ProcessOp(CompilerState *state, REOpData *opData, RENode **operandStack,
result->op = REOP_ALTPREREQ2;
result->u.altprereq.ch1 = ((RENode *) result->kid)->u.flat.chr;
result->u.altprereq.ch2 =
((RENode *) result->u.kid2)->u.ucclass.index;
jschar(((RENode *) result->u.kid2)->u.ucclass.index);
/* ALTPREREQ2, <end>, uch1, uch2, <next>, ...,
JUMP, <end> ... ENDALT */
state->progLength += 13;
@ -934,7 +934,7 @@ CalculateBitmapSize(CompilerState *state, RENode *target, const jschar *src,
while (src != end) {
JSBool canStartRange = JS_TRUE;
uintN localMax = 0;
jschar localMax = 0;
switch (*src) {
case '\\':
@ -987,7 +987,7 @@ lexHex:
}
n = (n << 4) | digit;
}
localMax = n;
localMax = jschar(n);
break;
case 'd':
canStartRange = JS_FALSE;
@ -1048,7 +1048,7 @@ lexHex:
src--;
}
}
localMax = n;
localMax = jschar(n);
break;
default:
@ -1089,8 +1089,8 @@ lexHex:
for (i = rangeStart; i <= localMax; i++) {
jschar uch, dch;
uch = upcase(i);
dch = inverse_upcase(i);
uch = jschar(upcase(i));
dch = inverse_upcase(jschar(i));
maxch = JS_MAX(maxch, uch);
maxch = JS_MAX(maxch, dch);
}
@ -1098,9 +1098,9 @@ lexHex:
}
if (localMax > max)
max = localMax;
max = uintN(localMax);
}
target->u.ucclass.bmsize = max;
target->u.ucclass.bmsize = uint16(max);
return JS_TRUE;
}
@ -1973,7 +1973,7 @@ CompileRegExpToAST(JSContext* cx, JSTokenStream* ts,
return JS_FALSE;
state.cpbegin = state.cp;
state.cpend = state.cp + len;
state.flags = flags;
state.flags = uint16(flags);
state.parenCount = 0;
state.classCount = 0;
state.progLength = 0;
@ -2352,7 +2352,7 @@ class RegExpNativeCompiler {
LIns* to_fail = lir->insBranch(LIR_jf, lir->ins2(LIR_plt, pos, cpend), 0);
if (!fails.append(to_fail))
return NULL;
LIns* text_ch = lir->insLoad(LIR_ldcs, pos, 0);
LIns* text_ch = lir->insLoad(LIR_ldzs, pos, 0, ACC_READONLY);
// Extra characters that need to be compared against when doing folding.
struct extra {
@ -2573,7 +2573,7 @@ class RegExpNativeCompiler {
LIns* to_fail = lir->insBranch(LIR_jf, lir->ins2(LIR_plt, pos, cpend), 0);
if (!fails.append(to_fail))
return NULL;
LIns* text_ch = lir->insLoad(LIR_ldcs, pos, 0);
LIns* text_ch = lir->insLoad(LIR_ldzs, pos, 0, ACC_READONLY);
if (!fails.append(lir->insBranch(LIR_jf,
lir->ins2(LIR_le, text_ch, lir->insImm(charSet->length)),
0))) {
@ -2581,7 +2581,8 @@ class RegExpNativeCompiler {
}
LIns* byteIndex = lir->ins_i2p(lir->ins2(LIR_rsh, text_ch, lir->insImm(3)));
LIns* bitmap = lir->insImmPtr(bitmapData);
LIns* byte = lir->insLoad(LIR_ldcb, lir->ins2(LIR_piadd, bitmap, byteIndex), (int) 0);
LIns* byte = lir->insLoad(LIR_ldzb, lir->ins2(LIR_piadd, bitmap, byteIndex), (int) 0,
ACC_READONLY);
LIns* bitMask = lir->ins2(LIR_lsh, lir->insImm(1),
lir->ins2(LIR_and, text_ch, lir->insImm(0x7)));
LIns* test = lir->ins2(LIR_eq, lir->ins2(LIR_and, byte, bitMask), lir->insImm(0));
@ -2600,7 +2601,7 @@ class RegExpNativeCompiler {
chr = lir->ins2(LIR_lsh, chr, sizeLog2);
}
LIns *addr = lir->ins2(LIR_piadd, lir->insImmPtr(tbl), lir->ins_u2p(chr));
return lir->insLoad(LIR_ldcb, addr, 0);
return lir->insLoad(LIR_ldzb, addr, 0, ACC_READONLY);
}
/* Compile a builtin character class. */
@ -2609,7 +2610,7 @@ class RegExpNativeCompiler {
/* All the builtins checked below consume one character. */
if (!fails.append(lir->insBranch(LIR_jf, lir->ins2(LIR_plt, pos, cpend), 0)))
return NULL;
LIns *chr = lir->insLoad(LIR_ldcs, pos, 0);
LIns *chr = lir->insLoad(LIR_ldzs, pos, 0, ACC_READONLY);
switch (node->op) {
case REOP_DOT:
@ -3404,7 +3405,7 @@ js_NewRegExp(JSContext *cx, JSTokenStream *ts,
re = tmp;
}
re->flags = flags;
re->flags = uint16(flags);
re->parenCount = state.parenCount;
re->source = str;
@ -3887,9 +3888,9 @@ ProcessCharSet(JSContext *cx, JSRegExp *re, RECharSet *charSet)
for (i = rangeStart; i <= thisCh; i++) {
jschar uch, dch;
AddCharacterToCharSet(charSet, i);
uch = upcase(i);
dch = inverse_upcase(i);
AddCharacterToCharSet(charSet, jschar(i));
uch = jschar(upcase(i));
dch = inverse_upcase(jschar(i));
if (i != uch)
AddCharacterToCharSet(charSet, uch);
if (i != dch)
@ -3901,7 +3902,7 @@ ProcessCharSet(JSContext *cx, JSRegExp *re, RECharSet *charSet)
inRange = JS_FALSE;
} else {
if (re->flags & JSREG_FOLD) {
AddCharacterToCharSet(charSet, upcase(thisCh));
AddCharacterToCharSet(charSet, jschar(upcase(thisCh)));
AddCharacterToCharSet(charSet, inverse_upcase(thisCh));
} else {
AddCharacterToCharSet(charSet, thisCh);
@ -4976,7 +4977,7 @@ js_ExecuteRegExp(JSContext *cx, JSRegExp *re, JSString *str, size_t *indexp,
res = &cx->regExpStatics;
res->input = str;
res->parenCount = re->parenCount;
res->parenCount = uint16(re->parenCount);
if (re->parenCount == 0) {
res->lastParen = js_EmptySubString;
} else {
@ -5114,7 +5115,7 @@ regexp_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
if (!JSVAL_IS_INT(id))
return JS_TRUE;
while (OBJ_GET_CLASS(cx, obj) != &js_RegExpClass) {
obj = OBJ_GET_PROTO(cx, obj);
obj = obj->getProto();
if (!obj)
return JS_TRUE;
}
@ -5160,7 +5161,7 @@ regexp_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
if (!JSVAL_IS_INT(id))
return ok;
while (OBJ_GET_CLASS(cx, obj) != &js_RegExpClass) {
obj = OBJ_GET_PROTO(cx, obj);
obj = obj->getProto();
if (!obj)
return JS_TRUE;
}
@ -5436,8 +5437,8 @@ js_XDRRegExpObject(JSXDRState *xdr, JSObject **objp)
obj = js_NewObject(xdr->cx, &js_RegExpClass, NULL, NULL);
if (!obj)
return JS_FALSE;
STOBJ_CLEAR_PARENT(obj);
STOBJ_CLEAR_PROTO(obj);
obj->clearParent();
obj->clearProto();
re = js_NewRegExp(xdr->cx, NULL, source, (uint8)flagsword, JS_FALSE);
if (!re)
return JS_FALSE;
@ -5875,7 +5876,8 @@ js_CloneRegExpObject(JSContext *cx, JSObject *obj, JSObject *proto)
}
#ifdef JS_TRACER
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_CloneRegExpObject, CONTEXT, OBJECT, OBJECT, 0, 0)
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_CloneRegExpObject, CONTEXT, OBJECT, OBJECT, 0,
ACC_STORE_ANY)
#endif
bool

View file

@ -494,7 +494,8 @@ ReportCompileErrorNumberVA(JSContext *cx, JSTokenStream *ts, JSParseNode *pn,
size_t linelength;
jschar *linechars;
char *linebytes;
JSBool warning, ok;
bool warning;
JSBool ok;
JSTokenPos *tp;
uintN index, i;
JSErrorReporter onError;
@ -522,7 +523,7 @@ ReportCompileErrorNumberVA(JSContext *cx, JSTokenStream *ts, JSParseNode *pn,
errorNumber, &message, &report,
!(flags & JSREPORT_UC), ap);
if (!ok) {
warning = JS_FALSE;
warning = false;
goto out;
}
@ -541,14 +542,14 @@ ReportCompileErrorNumberVA(JSContext *cx, JSTokenStream *ts, JSParseNode *pn,
linelength = ts->linebuf.limit - ts->linebuf.base;
linechars = (jschar *)cx->malloc((linelength + 1) * sizeof(jschar));
if (!linechars) {
warning = JS_FALSE;
warning = false;
goto out;
}
memcpy(linechars, ts->linebuf.base, linelength * sizeof(jschar));
linechars[linelength] = 0;
linebytes = js_DeflateString(cx, linechars, linelength);
if (!linebytes) {
warning = JS_FALSE;
warning = false;
goto out;
}
report.linebuf = linebytes;

View file

@ -922,13 +922,8 @@ locked_not_found:
if (!sprop)
goto out_of_memory;
sprop->id = child.id;
sprop->getter = child.getter;
sprop->setter = child.setter;
sprop->slot = child.slot;
sprop->attrs = child.attrs;
sprop->flags = child.flags;
sprop->shortid = child.shortid;
new(sprop) JSScopeProperty(child.id, child.rawGetter, child.rawSetter, child.slot,
child.attrs, child.flags, child.shortid);
sprop->parent = sprop->kids = NULL;
sprop->shape = js_GenerateShape(cx, true);
@ -1105,13 +1100,9 @@ JSScope::newDictionaryProperty(JSContext *cx, const JSScopeProperty &child,
return NULL;
}
dprop->id = child.id;
dprop->getter = child.getter;
dprop->setter = child.setter;
dprop->slot = child.slot;
dprop->attrs = child.attrs;
dprop->flags = child.flags | JSScopeProperty::IN_DICTIONARY;
dprop->shortid = child.shortid;
new (dprop) JSScopeProperty(child.id, child.rawGetter, child.rawSetter, child.slot,
child.attrs, child.flags | JSScopeProperty::IN_DICTIONARY,
child.shortid);
dprop->shape = js_GenerateShape(cx, false);
dprop->childp = NULL;
@ -1291,15 +1282,7 @@ JSScope::addPropertyHelper(JSContext *cx, jsid id,
/* Find or create a property tree node labeled by our arguments. */
JSScopeProperty *sprop;
{
JSScopeProperty child;
child.id = id;
child.getter = getter;
child.setter = setter;
child.slot = slot;
child.attrs = attrs;
child.flags = flags;
child.shortid = shortid;
JSScopeProperty child(id, getter, setter, slot, attrs, flags, shortid);
sprop = getChildProperty(cx, lastProp, child);
}
@ -1415,16 +1398,8 @@ JSScope::putProperty(JSContext *cx, jsid id,
CHECK_ANCESTOR_LINE(this, true);
{
JSScopeProperty child;
/* Find or create a property tree node labeled by our arguments. */
child.id = id;
child.getter = getter;
child.setter = setter;
child.slot = slot;
child.attrs = attrs;
child.flags = flags;
child.shortid = shortid;
JSScopeProperty child(id, getter, setter, slot, attrs, flags, shortid);
sprop = getChildProperty(cx, lastProp, child);
}
@ -1456,7 +1431,7 @@ JSScope::changeProperty(JSContext *cx, JSScopeProperty *sprop,
uintN attrs, uintN mask,
JSPropertyOp getter, JSPropertyOp setter)
{
JSScopeProperty child, *newsprop;
JSScopeProperty *newsprop;
JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, this));
CHECK_ANCESTOR_LINE(this, true);
@ -1471,26 +1446,17 @@ JSScope::changeProperty(JSContext *cx, JSScopeProperty *sprop,
!(attrs & JSPROP_SHARED));
/* Don't allow method properties to be changed to have a getter. */
JS_ASSERT_IF(getter != sprop->getter, !sprop->isMethod());
JS_ASSERT_IF(getter != sprop->rawGetter, !sprop->isMethod());
if (getter == JS_PropertyStub)
getter = NULL;
if (setter == JS_PropertyStub)
setter = NULL;
if (sprop->attrs == attrs &&
sprop->getter == getter &&
sprop->setter == setter) {
if (sprop->attrs == attrs && sprop->getter() == getter && sprop->setter() == setter)
return sprop;
}
child.id = sprop->id;
child.getter = getter;
child.setter = setter;
child.slot = sprop->slot;
child.attrs = attrs;
child.flags = sprop->flags;
child.shortid = sprop->shortid;
JSScopeProperty child(sprop->id, getter, setter, sprop->slot, attrs, sprop->flags,
sprop->shortid);
if (inDictionaryMode()) {
removeDictionaryProperty(sprop);
newsprop = newDictionaryProperty(cx, child, &lastProp);
@ -1518,7 +1484,7 @@ JSScope::changeProperty(JSContext *cx, JSScopeProperty *sprop,
* JSScope::removeProperty because it will free a valid sprop->slot and
* JSScope::putProperty won't re-allocate it.
*/
newsprop = putProperty(cx, child.id, child.getter, child.setter, child.slot,
newsprop = putProperty(cx, child.id, child.rawGetter, child.rawSetter, child.slot,
child.attrs, child.flags, child.shortid);
}
@ -1669,7 +1635,7 @@ JSScope::methodShapeChange(JSContext *cx, JSScopeProperty *sprop, jsval toval)
JS_ASSERT(sprop->methodValue() == prev);
JS_ASSERT(hasMethodBarrier());
JS_ASSERT(object->getClass() == &js_ObjectClass);
JS_ASSERT(!sprop->setter || sprop->setter == js_watch_set);
JS_ASSERT(!sprop->rawSetter || sprop->rawSetter == js_watch_set);
#endif
/*
@ -1678,7 +1644,7 @@ JSScope::methodShapeChange(JSContext *cx, JSScopeProperty *sprop, jsval toval)
* are despecializing from a method memoized in the property tree to a
* plain old function-valued property.
*/
sprop = putProperty(cx, sprop->id, NULL, sprop->setter, sprop->slot,
sprop = putProperty(cx, sprop->id, NULL, sprop->rawSetter, sprop->slot,
sprop->attrs,
sprop->getFlags() & ~JSScopeProperty::METHOD,
sprop->shortid);
@ -1788,11 +1754,11 @@ JSScopeProperty::trace(JSTracer *trc)
#if JS_HAS_GETTER_SETTER
if (attrs & (JSPROP_GETTER | JSPROP_SETTER)) {
if ((attrs & JSPROP_GETTER) && getter) {
if ((attrs & JSPROP_GETTER) && rawGetter) {
JS_SET_TRACING_DETAILS(trc, PrintPropertyGetterOrSetter, this, 0);
js_CallGCMarker(trc, getterObject(), JSTRACE_OBJECT);
}
if ((attrs & JSPROP_SETTER) && setter) {
if ((attrs & JSPROP_SETTER) && rawSetter) {
JS_SET_TRACING_DETAILS(trc, PrintPropertyGetterOrSetter, this, 1);
js_CallGCMarker(trc, setterObject(), JSTRACE_OBJECT);
}
@ -1878,8 +1844,8 @@ JSScopeProperty::dump(JSContext *cx, FILE *fp)
}
fprintf(fp, " g/s %p/%p slot %u attrs %x ",
JS_FUNC_TO_DATA_PTR(void *, getter),
JS_FUNC_TO_DATA_PTR(void *, setter),
JS_FUNC_TO_DATA_PTR(void *, rawGetter),
JS_FUNC_TO_DATA_PTR(void *, rawSetter),
slot, attrs);
if (attrs) {
int first = 1;

View file

@ -56,6 +56,8 @@
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4800)
#pragma warning(push)
#pragma warning(disable:4100) /* Silence unreferenced formal parameter warnings */
#endif
JS_BEGIN_EXTERN_C
@ -553,15 +555,17 @@ js_CastAsObjectJSVal(JSPropertyOp op)
}
struct JSScopeProperty {
friend class JSScope;
friend struct JSScope;
friend void js_SweepScopeProperties(JSContext *cx);
friend JSScopeProperty * js_GetPropertyTreeChild(JSContext *cx, JSScopeProperty *parent,
const JSScopeProperty &child);
jsid id; /* int-tagged jsval/untagged JSAtom* */
JSPropertyOp getter; /* getter and setter hooks or objects */
JSPropertyOp setter; /* getter is JSObject* and setter is 0
private:
JSPropertyOp rawGetter; /* getter and setter hooks or objects */
JSPropertyOp rawSetter; /* getter is JSObject* and setter is 0
if sprop->isMethod() */
public:
uint32 slot; /* abstract index in object slots */
uint8 attrs; /* attributes, see jsapi.h JSPROP_* */
private:
@ -596,6 +600,17 @@ private:
IN_DICTIONARY = 0x20
};
JSScopeProperty(jsid id, JSPropertyOp getter, JSPropertyOp setter, uint32 slot,
uintN attrs, uintN flags, intN shortid)
: id(id), rawGetter(getter), rawSetter(setter), slot(slot), attrs(uint8(attrs)),
flags(uint8(flags)), shortid(int16(shortid))
{
JS_ASSERT_IF(getter && (attrs & JSPROP_GETTER),
JSVAL_TO_OBJECT(getterValue())->isCallable());
JS_ASSERT_IF(setter && (attrs & JSPROP_SETTER),
JSVAL_TO_OBJECT(setterValue())->isCallable());
}
bool marked() const { return (flags & MARK) != 0; }
void mark() { flags |= MARK; }
void clearMark() { flags &= ~MARK; }
@ -622,33 +637,41 @@ public:
JSObject *methodObject() const {
JS_ASSERT(isMethod());
return js_CastAsObject(getter);
return js_CastAsObject(rawGetter);
}
jsval methodValue() const {
JS_ASSERT(isMethod());
return js_CastAsObjectJSVal(getter);
return js_CastAsObjectJSVal(rawGetter);
}
JSPropertyOp getter() const { return rawGetter; }
bool hasDefaultGetter() const { return !rawGetter; }
JSPropertyOp getterOp() const {
JS_ASSERT(!(attrs & JSPROP_GETTER));
return rawGetter;
}
JSObject *getterObject() const {
JS_ASSERT(attrs & JSPROP_GETTER);
return js_CastAsObject(getter);
return js_CastAsObject(rawGetter);
}
jsval getterValue() const {
JS_ASSERT(attrs & JSPROP_GETTER);
jsval getterVal = getter ? js_CastAsObjectJSVal(getter) : JSVAL_VOID;
JS_ASSERT_IF(getter, JSVAL_TO_OBJECT(getterVal)->isCallable());
return getterVal;
return rawGetter ? js_CastAsObjectJSVal(rawGetter) : JSVAL_VOID;
}
JSPropertyOp setter() const { return rawSetter; }
bool hasDefaultSetter() const { return !rawSetter; }
JSPropertyOp setterOp() const {
JS_ASSERT(!(attrs & JSPROP_SETTER));
return rawSetter;
}
JSObject *setterObject() const {
JS_ASSERT((attrs & JSPROP_SETTER) && setter);
return js_CastAsObject(setter);
JS_ASSERT((attrs & JSPROP_SETTER) && rawSetter);
return js_CastAsObject(rawSetter);
}
jsval setterValue() const {
JS_ASSERT(attrs & JSPROP_SETTER);
jsval setterVal = setter ? js_CastAsObjectJSVal(setter) : JSVAL_VOID;
JS_ASSERT_IF(setter, JSVAL_TO_OBJECT(setterVal)->isCallable());
return setterVal;
return rawSetter ? js_CastAsObjectJSVal(rawSetter) : JSVAL_VOID;
}
inline JSDHashNumber hash() const;
@ -795,8 +818,8 @@ JSScope::insertDictionaryProperty(JSScopeProperty *sprop, JSScopeProperty **chil
#define SLOT_IN_SCOPE(slot,scope) ((slot) < (scope)->freeslot)
#define SPROP_HAS_VALID_SLOT(sprop,scope) SLOT_IN_SCOPE((sprop)->slot, scope)
#define SPROP_HAS_STUB_GETTER(sprop) (!(sprop)->getter)
#define SPROP_HAS_STUB_SETTER(sprop) (!(sprop)->setter)
#define SPROP_HAS_STUB_GETTER(sprop) ((sprop)->hasDefaultGetter())
#define SPROP_HAS_STUB_SETTER(sprop) ((sprop)->hasDefaultSetter())
#define SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) \
(SPROP_HAS_STUB_GETTER(sprop) || (sprop)->isMethod())
@ -898,7 +921,7 @@ JSScopeProperty::get(JSContext* cx, JSObject* obj, JSObject *pobj, jsval* vp)
*/
if (STOBJ_GET_CLASS(obj) == &js_WithClass)
obj = obj->map->ops->thisObject(cx, obj);
return getter(cx, obj, SPROP_USERID(this), vp);
return getterOp()(cx, obj, SPROP_USERID(this), vp);
}
inline bool
@ -917,7 +940,7 @@ JSScopeProperty::set(JSContext* cx, JSObject* obj, jsval* vp)
/* See the comment in JSScopeProperty::get as to why we can check for With. */
if (STOBJ_GET_CLASS(obj) == &js_WithClass)
obj = obj->map->ops->thisObject(cx, obj);
return setter(cx, obj, SPROP_USERID(this), vp);
return setterOp()(cx, obj, SPROP_USERID(this), vp);
}
/* Macro for common expression to test for shared permanent attributes. */
@ -943,6 +966,7 @@ JS_END_EXTERN_C
#ifdef _MSC_VER
#pragma warning(pop)
#pragma warning(pop)
#endif
#endif /* jsscope_h___ */

View file

@ -126,11 +126,11 @@ JSScope::methodReadBarrier(JSContext *cx, JSScopeProperty *sprop, jsval *vp)
JSFunction *fun = GET_FUNCTION_PRIVATE(cx, funobj);
JS_ASSERT(FUN_OBJECT(fun) == funobj && FUN_NULL_CLOSURE(fun));
funobj = CloneFunctionObject(cx, fun, OBJ_GET_PARENT(cx, funobj));
funobj = CloneFunctionObject(cx, fun, funobj->getParent());
if (!funobj)
return false;
*vp = OBJECT_TO_JSVAL(funobj);
return js_SetPropertyHelper(cx, object, sprop->id, 0, vp);
return !!js_SetPropertyHelper(cx, object, sprop->id, 0, vp);
}
inline bool
@ -208,11 +208,11 @@ JSScopeProperty::hash() const
JSDHashNumber hash = 0;
/* Accumulate from least to most random so the low bits are most random. */
JS_ASSERT_IF(isMethod(), !setter || setter == js_watch_set);
if (getter)
hash = JS_ROTATE_LEFT32(hash, 4) ^ jsuword(getter);
if (setter)
hash = JS_ROTATE_LEFT32(hash, 4) ^ jsuword(setter);
JS_ASSERT_IF(isMethod(), !rawSetter || rawSetter == js_watch_set);
if (rawGetter)
hash = JS_ROTATE_LEFT32(hash, 4) ^ jsuword(rawGetter);
if (rawSetter)
hash = JS_ROTATE_LEFT32(hash, 4) ^ jsuword(rawSetter);
hash = JS_ROTATE_LEFT32(hash, 4) ^ (flags & PUBLIC_FLAGS);
hash = JS_ROTATE_LEFT32(hash, 4) ^ attrs;
hash = JS_ROTATE_LEFT32(hash, 4) ^ shortid;
@ -227,7 +227,8 @@ JSScopeProperty::matches(const JSScopeProperty *p) const
JS_ASSERT(!JSVAL_IS_NULL(id));
JS_ASSERT(!JSVAL_IS_NULL(p->id));
return id == p->id &&
matchesParamsAfterId(p->getter, p->setter, p->slot, p->attrs, p->flags, p->shortid);
matchesParamsAfterId(p->rawGetter, p->rawSetter, p->slot, p->attrs, p->flags,
p->shortid);
}
inline bool
@ -235,8 +236,8 @@ JSScopeProperty::matchesParamsAfterId(JSPropertyOp agetter, JSPropertyOp asetter
uintN aattrs, uintN aflags, intN ashortid) const
{
JS_ASSERT(!JSVAL_IS_NULL(id));
return getter == agetter &&
setter == asetter &&
return rawGetter == agetter &&
rawSetter == asetter &&
slot == aslot &&
attrs == aattrs &&
((flags ^ aflags) & PUBLIC_FLAGS) == 0 &&

View file

@ -72,337 +72,6 @@ using namespace js;
const uint32 JSSLOT_EXEC_DEPTH = JSSLOT_PRIVATE + 1;
const uint32 JSSCRIPT_RESERVED_SLOTS = 1;
#if JS_HAS_SCRIPT_OBJECT
static const char js_script_exec_str[] = "Script.prototype.exec";
static const char js_script_compile_str[] = "Script.prototype.compile";
static jsint
GetScriptExecDepth(JSObject *obj)
{
jsval v = obj->fslots[JSSLOT_EXEC_DEPTH];
return JSVAL_IS_VOID(v) ? 0 : JSVAL_TO_INT(v);
}
static void
AdjustScriptExecDepth(JSObject *obj, jsint delta)
{
jsint execDepth = GetScriptExecDepth(obj);
obj->fslots[JSSLOT_EXEC_DEPTH] = INT_TO_JSVAL(execDepth + delta);
}
#if JS_HAS_TOSOURCE
static JSBool
script_toSource(JSContext *cx, uintN argc, jsval *vp)
{
JSObject *obj;
uint32 indent;
JSScript *script;
size_t i, j, k, n;
char buf[16];
jschar *s, *t;
JSString *str;
obj = JS_THIS_OBJECT(cx, vp);
if (!JS_InstanceOf(cx, obj, &js_ScriptClass, vp + 2))
return JS_FALSE;
indent = 0;
if (argc != 0) {
indent = js_ValueToECMAUint32(cx, &vp[2]);
if (JSVAL_IS_NULL(vp[2]))
return JS_FALSE;
}
script = (JSScript *) obj->getPrivate();
/* Let n count the source string length, j the "front porch" length. */
j = JS_snprintf(buf, sizeof buf, "(new %s(", js_ScriptClass.name);
n = j + 2;
if (!script) {
/* Let k count the constructor argument string length. */
k = 0;
s = NULL; /* quell GCC overwarning */
} else {
str = JS_DecompileScript(cx, script, "Script.prototype.toSource",
(uintN)indent);
if (!str)
return JS_FALSE;
str = js_QuoteString(cx, str, '\'');
if (!str)
return JS_FALSE;
const jschar *cs;
str->getCharsAndLength(cs, k);
s = const_cast<jschar *>(cs);
n += k;
}
/* Allocate the source string and copy into it. */
t = (jschar *) cx->malloc((n + 1) * sizeof(jschar));
if (!t)
return JS_FALSE;
for (i = 0; i < j; i++)
t[i] = buf[i];
for (j = 0; j < k; i++, j++)
t[i] = s[j];
t[i++] = ')';
t[i++] = ')';
t[i] = 0;
/* Create and return a JS string for t. */
str = JS_NewUCString(cx, t, n);
if (!str) {
cx->free(t);
return JS_FALSE;
}
*vp = STRING_TO_JSVAL(str);
return JS_TRUE;
}
#endif /* JS_HAS_TOSOURCE */
static JSBool
script_toString(JSContext *cx, uintN argc, jsval *vp)
{
uint32 indent;
JSObject *obj;
JSScript *script;
JSString *str;
indent = 0;
if (argc != 0) {
indent = js_ValueToECMAUint32(cx, &vp[2]);
if (JSVAL_IS_NULL(vp[2]))
return JS_FALSE;
}
obj = JS_THIS_OBJECT(cx, vp);
if (!JS_InstanceOf(cx, obj, &js_ScriptClass, vp + 2))
return JS_FALSE;
script = (JSScript *) obj->getPrivate();
if (!script) {
*vp = STRING_TO_JSVAL(cx->runtime->emptyString);
return JS_TRUE;
}
str = JS_DecompileScript(cx, script, "Script.prototype.toString",
(uintN)indent);
if (!str)
return JS_FALSE;
*vp = STRING_TO_JSVAL(str);
return JS_TRUE;
}
static JSBool
script_compile_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
jsval *rval)
{
JSString *str;
JSObject *scopeobj;
JSScript *script, *oldscript;
JSStackFrame *caller;
const char *file;
uintN line;
JSPrincipals *principals;
jsint execDepth;
/* Make sure obj is a Script object. */
if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
return JS_FALSE;
/* If no args, leave private undefined and return early. */
if (argc == 0)
goto out;
/* Otherwise, the first arg is the script source to compile. */
str = js_ValueToString(cx, argv[0]);
if (!str)
return JS_FALSE;
argv[0] = STRING_TO_JSVAL(str);
scopeobj = NULL;
if (argc >= 2) {
if (!js_ValueToObject(cx, argv[1], &scopeobj))
return JS_FALSE;
argv[1] = OBJECT_TO_JSVAL(scopeobj);
}
/* Compile using the caller's scope chain, which js_Invoke passes to fp. */
caller = js_GetScriptedCaller(cx, NULL);
JS_ASSERT(!caller || cx->fp->scopeChain == caller->scopeChain);
if (caller) {
if (!scopeobj) {
scopeobj = js_GetScopeChain(cx, caller);
if (!scopeobj)
return JS_FALSE;
}
principals = JS_EvalFramePrincipals(cx, cx->fp, caller);
file = js_ComputeFilename(cx, caller, principals, &line);
} else {
file = NULL;
line = 0;
principals = NULL;
}
/* Ensure we compile this script with the right (inner) principals. */
scopeobj = js_CheckScopeChainValidity(cx, scopeobj, js_script_compile_str);
if (!scopeobj)
return JS_FALSE;
/*
* Compile the new script using the caller's scope chain, a la eval().
* Unlike jsobj.c:obj_eval, however, we do not pass TCF_COMPILE_N_GO in
* tcflags and use NULL for the callerFrame argument, because compilation
* is here separated from execution, and the run-time scope chain may not
* match the compile-time. TCF_COMPILE_N_GO is tested in jsemit.c and
* jsparse.c to optimize based on identity of run- and compile-time scope.
*/
script = JSCompiler::compileScript(cx, scopeobj, NULL, principals,
TCF_NEED_MUTABLE_SCRIPT,
str->chars(), str->length(),
NULL, file, line);
if (!script)
return JS_FALSE;
JS_LOCK_OBJ(cx, obj);
execDepth = GetScriptExecDepth(obj);
/*
* execDepth must be 0 to allow compilation here, otherwise the JSScript
* struct can be released while running.
*/
if (execDepth > 0) {
JS_UNLOCK_OBJ(cx, obj);
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
JSMSG_COMPILE_EXECED_SCRIPT);
return JS_FALSE;
}
/* Swap script for obj's old script, if any. */
oldscript = (JSScript*) obj->getPrivate();
obj->setPrivate(script);
JS_UNLOCK_OBJ(cx, obj);
if (oldscript)
js_DestroyScript(cx, oldscript);
script->u.object = obj;
js_CallNewScriptHook(cx, script, NULL);
out:
/* Return the object. */
*rval = OBJECT_TO_JSVAL(obj);
return JS_TRUE;
}
static JSBool
script_compile(JSContext *cx, uintN argc, jsval *vp)
{
return script_compile_sub(cx, JS_THIS_OBJECT(cx, vp), argc, vp + 2, vp);
}
static JSBool
script_exec_sub(JSContext *cx, JSObject *obj, uintN argc, jsval *argv,
jsval *rval)
{
JSObject *scopeobj;
JSStackFrame *caller;
JSPrincipals *principals;
JSScript *script;
JSBool ok;
if (!JS_InstanceOf(cx, obj, &js_ScriptClass, argv))
return JS_FALSE;
scopeobj = NULL;
if (argc != 0) {
if (!js_ValueToObject(cx, argv[0], &scopeobj))
return JS_FALSE;
argv[0] = OBJECT_TO_JSVAL(scopeobj);
}
/*
* Emulate eval() by using caller's this, var object, sharp array, etc.,
* all propagated by js_Execute via a non-null fourth (down) argument to
* js_Execute. If there is no scripted caller, js_Execute uses its second
* (chain) argument to set the exec frame's varobj, thisv, and scopeChain.
*
* Unlike eval, which the compiler detects, Script.prototype.exec may be
* called from a lightweight function, or even from native code (in which
* fp->scopeChain is null). If exec is called from a lightweight function,
* we will need to get a Call object representing its frame, to act as the
* var object and scope chain head.
*/
caller = js_GetScriptedCaller(cx, NULL);
if (caller && !caller->varobj(cx)) {
/* Called from a lightweight function. */
JS_ASSERT(caller->fun && !JSFUN_HEAVYWEIGHT_TEST(caller->fun->flags));
/* Scope chain links from Call object to caller's scope chain. */
if (!js_GetCallObject(cx, caller))
return JS_FALSE;
}
if (!scopeobj) {
/* No scope object passed in: try to use the caller's scope chain. */
if (caller) {
/*
* Load caller->scopeChain after the conditional js_GetCallObject
* call above, which resets scopeChain as well as the callobj.
*/
scopeobj = js_GetScopeChain(cx, caller);
if (!scopeobj)
return JS_FALSE;
} else {
/*
* Called from native code, so we don't know what scope object to
* use. We could use the caller's scope chain (see above), but Script.prototype.exec
* might be a shared/sealed "superglobal" method. A more general
* approach would use cx->globalObject, which will be the same as
* exec.__parent__ in the non-superglobal case. In the superglobal
* case it's the right object: the global, not the superglobal.
*/
scopeobj = cx->globalObject;
}
}
scopeobj = js_CheckScopeChainValidity(cx, scopeobj, js_script_exec_str);
if (!scopeobj)
return JS_FALSE;
/* Keep track of nesting depth for the script. */
AdjustScriptExecDepth(obj, 1);
/* Must get to out label after this */
script = (JSScript *) obj->getPrivate();
if (!script) {
ok = JS_FALSE;
goto out;
}
/* Belt-and-braces: check that this script object has access to scopeobj. */
principals = script->principals;
ok = js_CheckPrincipalsAccess(cx, scopeobj, principals,
CLASS_ATOM(cx, Script));
if (!ok)
goto out;
ok = js_Execute(cx, scopeobj, script, caller, JSFRAME_EVAL, rval);
out:
AdjustScriptExecDepth(obj, -1);
return ok;
}
static JSBool
script_exec(JSContext *cx, uintN argc, jsval *vp)
{
return script_exec_sub(cx, JS_THIS_OBJECT(cx, vp), argc, vp + 2, vp);
}
#endif /* JS_HAS_SCRIPT_OBJECT */
static const jsbytecode emptyScriptCode[] = {JSOP_STOP, SRC_NULL};
/* static */ const JSScript JSScript::emptyScriptConst = {
@ -717,199 +386,8 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, bool needMutableScript,
return JS_FALSE;
}
#if JS_HAS_SCRIPT_OBJECT && JS_HAS_XDR_FREEZE_THAW
/*
* These cannot be exposed to web content, and chrome does not need them, so
* we take them out of the Mozilla client altogether. Fortunately, there is
* no way to serialize a native function (see fun_xdrObject in jsfun.c).
*/
static JSBool
script_freeze(JSContext *cx, uintN argc, jsval *vp)
{
JSObject *obj;
JSXDRState *xdr;
JSScript *script;
JSBool ok, hasMagic;
uint32 len;
void *buf;
JSString *str;
obj = JS_THIS_OBJECT(cx, vp);
if (!JS_InstanceOf(cx, obj, &js_ScriptClass, vp + 2))
return JS_FALSE;
script = (JSScript *) obj->getPrivate();
if (!script)
return JS_TRUE;
/* create new XDR */
xdr = JS_XDRNewMem(cx, JSXDR_ENCODE);
if (!xdr)
return JS_FALSE;
/* write */
ok = js_XDRScript(xdr, &script, false, &hasMagic);
if (!ok)
goto out;
if (!hasMagic) {
*vp = JSVAL_VOID;
goto out;
}
buf = JS_XDRMemGetData(xdr, &len);
if (!buf) {
ok = JS_FALSE;
goto out;
}
JS_ASSERT((jsword)buf % sizeof(jschar) == 0);
len /= sizeof(jschar);
#if IS_BIG_ENDIAN
{
jschar *chars;
uint32 i;
/* Swap bytes in Unichars to keep frozen strings machine-independent. */
chars = (jschar *)buf;
for (i = 0; i < len; i++)
chars[i] = JSXDR_SWAB16(chars[i]);
}
#endif
str = JS_NewUCStringCopyN(cx, (jschar *)buf, len);
if (!str) {
ok = JS_FALSE;
goto out;
}
*vp = STRING_TO_JSVAL(str);
out:
JS_XDRDestroy(xdr);
return ok;
}
static JSBool
script_thaw(JSContext *cx, uintN argc, jsval *vp)
{
JSObject *obj;
JSXDRState *xdr;
JSString *str;
void *buf;
size_t len;
JSScript *script, *oldscript;
JSBool ok, hasMagic;
jsint execDepth;
obj = JS_THIS_OBJECT(cx, vp);
if (!JS_InstanceOf(cx, obj, &js_ScriptClass, vp + 2))
return JS_FALSE;
if (argc == 0)
return JS_TRUE;
str = js_ValueToString(cx, vp[2]);
if (!str)
return JS_FALSE;
vp[2] = STRING_TO_JSVAL(str);
/* create new XDR */
xdr = JS_XDRNewMem(cx, JSXDR_DECODE);
if (!xdr)
return JS_FALSE;
const jschar *cs;
str->getCharsAndLength(cs, len);
buf = const_cast<jschar *>(cs);
#if IS_BIG_ENDIAN
{
jschar *from, *to;
uint32 i;
/* Swap bytes in Unichars to keep frozen strings machine-independent. */
from = (jschar *)buf;
to = (jschar *) cx->malloc(len * sizeof(jschar));
if (!to) {
JS_XDRDestroy(xdr);
return JS_FALSE;
}
for (i = 0; i < len; i++)
to[i] = JSXDR_SWAB16(from[i]);
buf = (char *)to;
}
#endif
len *= sizeof(jschar);
JS_XDRMemSetData(xdr, buf, len);
/* XXXbe should magic mismatch be error, or false return value? */
ok = js_XDRScript(xdr, &script, true, &hasMagic);
if (!ok)
goto out;
if (!hasMagic) {
*vp = JSVAL_FALSE;
goto out;
}
JS_LOCK_OBJ(cx, obj);
execDepth = GetScriptExecDepth(obj);
/*
* execDepth must be 0 to allow compilation here, otherwise the JSScript
* struct can be released while running.
*/
if (execDepth > 0) {
JS_UNLOCK_OBJ(cx, obj);
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
JSMSG_COMPILE_EXECED_SCRIPT);
goto out;
}
/* Swap script for obj's old script, if any. */
oldscript = (JSScript *) obj->getPrivate();
obj->setPrivate(script);
JS_UNLOCK_OBJ(cx, obj);
if (oldscript)
js_DestroyScript(cx, oldscript);
script->u.object = obj;
js_CallNewScriptHook(cx, script, NULL);
out:
/*
* We reset the buffer to be NULL so that it doesn't free the chars
* memory owned by str (vp[2]).
*/
JS_XDRMemSetData(xdr, NULL, 0);
JS_XDRDestroy(xdr);
#if IS_BIG_ENDIAN
cx->free(buf);
#endif
*vp = JSVAL_TRUE;
return ok;
}
static const char js_thaw_str[] = "thaw";
#endif /* JS_HAS_SCRIPT_OBJECT && JS_HAS_XDR_FREEZE_THAW */
#endif /* JS_HAS_XDR */
#if JS_HAS_SCRIPT_OBJECT
static JSFunctionSpec script_methods[] = {
#if JS_HAS_TOSOURCE
JS_FN(js_toSource_str, script_toSource, 0,0),
#endif
JS_FN(js_toString_str, script_toString, 0,0),
JS_FN("compile", script_compile, 2,0),
JS_FN("exec", script_exec, 1,0),
#if JS_HAS_XDR_FREEZE_THAW
JS_FN("freeze", script_freeze, 0,0),
JS_FN(js_thaw_str, script_thaw, 1,0),
#endif /* JS_HAS_XDR_FREEZE_THAW */
JS_FS_END
};
#endif /* JS_HAS_SCRIPT_OBJECT */
static void
script_finalize(JSContext *cx, JSObject *obj)
{
@ -918,16 +396,6 @@ script_finalize(JSContext *cx, JSObject *obj)
js_DestroyScript(cx, script);
}
static JSBool
script_call(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
{
#if JS_HAS_SCRIPT_OBJECT
return script_exec_sub(cx, JSVAL_TO_OBJECT(argv[-2]), argc, argv, rval);
#else
return JS_FALSE;
#endif
}
static void
script_trace(JSTracer *trc, JSObject *obj)
{
@ -936,81 +404,16 @@ script_trace(JSTracer *trc, JSObject *obj)
js_TraceScript(trc, script);
}
#if !JS_HAS_SCRIPT_OBJECT
#define JSProto_Script JSProto_Object
#endif
JS_FRIEND_DATA(JSClass) js_ScriptClass = {
js_Script_str,
JSClass js_ScriptClass = {
"Script",
JSCLASS_HAS_PRIVATE | JSCLASS_HAS_RESERVED_SLOTS(JSSCRIPT_RESERVED_SLOTS) |
JSCLASS_MARK_IS_TRACE | JSCLASS_HAS_CACHED_PROTO(JSProto_Script),
JSCLASS_MARK_IS_TRACE | JSCLASS_HAS_CACHED_PROTO(JSProto_Object),
JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub,
JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, script_finalize,
NULL, NULL, script_call, NULL,/*XXXbe xdr*/
NULL, NULL, NULL, NULL,/*XXXbe xdr*/
NULL, NULL, JS_CLASS_TRACE(script_trace), NULL
};
#if JS_HAS_SCRIPT_OBJECT
static JSBool
Script(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
{
/* If not constructing, replace obj with a new Script object. */
if (!JS_IsConstructing(cx)) {
obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL);
if (!obj)
return JS_FALSE;
/*
* script_compile_sub does not use rval to root its temporaries so we
* can use it to root obj.
*/
*rval = OBJECT_TO_JSVAL(obj);
}
if (!JS_SetReservedSlot(cx, obj, 0, INT_TO_JSVAL(0)))
return JS_FALSE;
return script_compile_sub(cx, obj, argc, argv, rval);
}
#if JS_HAS_SCRIPT_OBJECT && JS_HAS_XDR_FREEZE_THAW
static JSBool
script_static_thaw(JSContext *cx, uintN argc, jsval *vp)
{
JSObject *obj;
obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL);
if (!obj)
return JS_FALSE;
vp[1] = OBJECT_TO_JSVAL(obj);
if (!script_thaw(cx, argc, vp))
return JS_FALSE;
*vp = OBJECT_TO_JSVAL(obj);
return JS_TRUE;
}
static JSFunctionSpec script_static_methods[] = {
JS_FN(js_thaw_str, script_static_thaw, 1,0),
JS_FS_END
};
#else /* !JS_HAS_SCRIPT_OBJECT || !JS_HAS_XDR_FREEZE_THAW */
#define script_static_methods NULL
#endif /* !JS_HAS_SCRIPT_OBJECT || !JS_HAS_XDR_FREEZE_THAW */
JSObject *
js_InitScriptClass(JSContext *cx, JSObject *obj)
{
return JS_InitClass(cx, obj, NULL, &js_ScriptClass, Script, 1,
NULL, script_methods, NULL, script_static_methods);
}
#endif /* JS_HAS_SCRIPT_OBJECT */
/*
* Shared script filename management.
*/
@ -1616,7 +1019,7 @@ js_NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg)
goto bad;
}
script->nslots = script->nfixed + cg->maxStackDepth;
script->staticLevel = cg->staticLevel;
script->staticLevel = uint16(cg->staticLevel);
script->principals = cg->compiler->principals;
if (script->principals)
JSPRINCIPALS_HOLD(cx, script->principals);

View file

@ -1029,7 +1029,8 @@ js_String_p_charCodeAt_int_int(JSString* str, jsint i)
return 0;
return str->chars()[i];
}
JS_DEFINE_CALLINFO_2(extern, INT32, js_String_p_charCodeAt_int_int, STRING, INT32, 1, 1)
JS_DEFINE_CALLINFO_2(extern, INT32, js_String_p_charCodeAt_int_int, STRING, INT32, 1,
nanojit::ACC_NONE)
int32 FASTCALL
js_String_p_charCodeAt_double_int(JSString* str, double d)
@ -1039,7 +1040,8 @@ js_String_p_charCodeAt_double_int(JSString* str, double d)
return 0;
return str->chars()[jsuint(d)];
}
JS_DEFINE_CALLINFO_2(extern, INT32, js_String_p_charCodeAt_double_int, STRING, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(extern, INT32, js_String_p_charCodeAt_double_int, STRING, DOUBLE, 1,
nanojit::ACC_NONE)
jsdouble FASTCALL
js_String_p_charCodeAt0(JSString* str)
@ -1060,7 +1062,7 @@ js_String_p_charCodeAt0_int(JSString* str)
return 0;
return str->chars()[0];
}
JS_DEFINE_CALLINFO_1(extern, INT32, js_String_p_charCodeAt0_int, STRING, 1, 1)
JS_DEFINE_CALLINFO_1(extern, INT32, js_String_p_charCodeAt0_int, STRING, 1, nanojit::ACC_NONE)
#endif
jsint
@ -1785,7 +1787,7 @@ FindReplaceLength(JSContext *cx, ReplaceData &rdata, size_t *sizep)
/* Push lambda and its 'this' parameter. */
jsval *sp = invokevp;
*sp++ = OBJECT_TO_JSVAL(lambda);
*sp++ = OBJECT_TO_JSVAL(OBJ_GET_PARENT(cx, lambda));
*sp++ = OBJECT_TO_JSVAL(lambda->getParent());
/* Push $&, $1, $2, ... */
if (!PushRegExpSubstr(cx, cx->regExpStatics.lastMatch, sp))
@ -2543,14 +2545,19 @@ js_String_getelem(JSContext* cx, JSString* str, int32 i)
#endif
JS_DEFINE_TRCINFO_1(js_str_toString,
(2, (extern, STRING_RETRY, String_p_toString, CONTEXT, THIS, 1, 1)))
(2, (extern, STRING_RETRY, String_p_toString, CONTEXT, THIS, 1,
nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(str_charAt,
(3, (extern, STRING_RETRY, js_String_getelem, CONTEXT, THIS_STRING, INT32, 1, 1)))
(3, (extern, STRING_RETRY, js_String_getelem, CONTEXT, THIS_STRING, INT32, 1,
nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_2(str_charCodeAt,
(1, (extern, DOUBLE, js_String_p_charCodeAt0, THIS_STRING, 1, 1)),
(2, (extern, DOUBLE, js_String_p_charCodeAt, THIS_STRING, DOUBLE, 1, 1)))
(1, (extern, DOUBLE, js_String_p_charCodeAt0, THIS_STRING, 1,
nanojit::ACC_NONE)),
(2, (extern, DOUBLE, js_String_p_charCodeAt, THIS_STRING, DOUBLE, 1,
nanojit::ACC_NONE)))
JS_DEFINE_TRCINFO_1(str_concat,
(3, (extern, STRING_RETRY, js_ConcatStrings, CONTEXT, THIS_STRING, STRING, 1, 1)))
(3, (extern, STRING_RETRY, js_ConcatStrings, CONTEXT, THIS_STRING, STRING, 1,
nanojit::ACC_NONE)))
#define GENERIC JSFUN_GENERIC_NATIVE
#define PRIMITIVE JSFUN_THISP_PRIMITIVE
@ -2952,7 +2959,8 @@ js_String_tn(JSContext* cx, JSObject* proto, JSString* str)
JS_ASSERT(JS_ON_TRACE(cx));
return js_NewObjectWithClassProto(cx, &js_StringClass, proto, STRING_TO_JSVAL(str));
}
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_String_tn, CONTEXT, CALLEE_PROTOTYPE, STRING, 0, 0)
JS_DEFINE_CALLINFO_3(extern, OBJECT, js_String_tn, CONTEXT, CALLEE_PROTOTYPE, STRING, 0,
nanojit::ACC_STORE_ANY)
#endif /* !JS_TRACER */
@ -3009,7 +3017,7 @@ String_fromCharCode(JSContext* cx, int32 i)
#endif
JS_DEFINE_TRCINFO_1(str_fromCharCode,
(2, (static, STRING_RETRY, String_fromCharCode, CONTEXT, INT32, 1, 1)))
(2, (static, STRING_RETRY, String_fromCharCode, CONTEXT, INT32, 1, nanojit::ACC_NONE)))
static JSFunctionSpec string_static_methods[] = {
JS_TN("fromCharCode", str_fromCharCode, 1, 0, &str_fromCharCode_trcinfo),
@ -3430,7 +3438,7 @@ js_EqualStrings(JSString *str1, JSString *str2)
return JS_TRUE;
}
JS_DEFINE_CALLINFO_2(extern, BOOL, js_EqualStrings, STRING, STRING, 1, 1)
JS_DEFINE_CALLINFO_2(extern, BOOL, js_EqualStrings, STRING, STRING, 1, nanojit::ACC_NONE)
int32 JS_FASTCALL
js_CompareStrings(JSString *str1, JSString *str2)
@ -3456,7 +3464,7 @@ js_CompareStrings(JSString *str1, JSString *str2)
}
return (intN)(l1 - l2);
}
JS_DEFINE_CALLINFO_2(extern, INT32, js_CompareStrings, STRING, STRING, 1, 1)
JS_DEFINE_CALLINFO_2(extern, INT32, js_CompareStrings, STRING, STRING, 1, nanojit::ACC_NONE)
size_t
js_strlen(const jschar *s)

View file

@ -171,6 +171,10 @@ template <class T, size_t N> inline T *ArrayEnd(T (&arr)[N]) { return arr + N; }
/* Useful for implementing containers that assert non-reentrancy */
class ReentrancyGuard
{
/* ReentrancyGuard is not copyable. */
ReentrancyGuard(const ReentrancyGuard &);
void operator=(const ReentrancyGuard &);
#ifdef DEBUG
bool &entered;
#endif

File diff suppressed because it is too large Load diff

View file

@ -1109,8 +1109,7 @@ class TraceRecorder
nanojit::LIns* writeBack(nanojit::LIns* i, nanojit::LIns* base, ptrdiff_t offset,
bool demote);
JS_REQUIRES_STACK void set(jsval* p, nanojit::LIns* l, bool initializing = false,
bool demote = true);
JS_REQUIRES_STACK void set(jsval* p, nanojit::LIns* l, bool demote = true);
nanojit::LIns* getFromTracker(jsval* p);
JS_REQUIRES_STACK nanojit::LIns* get(jsval* p);
JS_REQUIRES_STACK nanojit::LIns* attemptImport(jsval* p);
@ -1294,9 +1293,10 @@ class TraceRecorder
JS_REQUIRES_STACK nanojit::LIns* box_jsval(jsval v, nanojit::LIns* v_ins);
JS_REQUIRES_STACK nanojit::LIns* unbox_jsval(jsval v, nanojit::LIns* v_ins, VMSideExit* exit);
JS_REQUIRES_STACK bool guardClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp,
VMSideExit* exit, nanojit::LOpcode loadOp = nanojit::LIR_ldp);
VMSideExit* exit,
nanojit::AccSet accSet = nanojit::ACC_LOAD_ANY);
bool guardConstClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp, VMSideExit* exit) {
return guardClass(obj, obj_ins, clasp, exit, nanojit::LIR_ldcp);
return guardClass(obj, obj_ins, clasp, exit, nanojit::ACC_READONLY);
}
JS_REQUIRES_STACK bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins,
ExitType exitType = MISMATCH_EXIT);
@ -1363,8 +1363,8 @@ class TraceRecorder
JS_REQUIRES_STACK AbortableRecordingStatus endLoop(VMSideExit* exit);
JS_REQUIRES_STACK void joinEdgesToEntry(TreeFragment* peer_root);
JS_REQUIRES_STACK void adjustCallerTypes(TreeFragment* f);
JS_REQUIRES_STACK void prepareTreeCall(TreeFragment* inner, nanojit::LIns*& inner_sp_ins);
JS_REQUIRES_STACK void emitTreeCall(TreeFragment* inner, VMSideExit* exit, nanojit::LIns* inner_sp_ins);
JS_REQUIRES_STACK void prepareTreeCall(TreeFragment* inner);
JS_REQUIRES_STACK void emitTreeCall(TreeFragment* inner, VMSideExit* exit);
JS_REQUIRES_STACK void determineGlobalTypes(TraceType* typeMap);
JS_REQUIRES_STACK VMSideExit* downSnapshot(FrameInfo* downFrame);
JS_REQUIRES_STACK TreeFragment* findNestedCompatiblePeer(TreeFragment* f);
@ -1518,6 +1518,9 @@ SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes);
extern bool
NativeToValue(JSContext* cx, jsval& v, TraceType type, double* slot);
extern bool
InCustomIterNextTryRegion(jsbytecode *pc);
#ifdef MOZ_TRACEVIS
extern JS_FRIEND_API(bool)

View file

@ -1,5 +1,5 @@
/* -*- Mode: c++; c-basic-offset: 4; tab-width: 40; indent-tabs-mode: nil -*- */
/* vim: set ts=40 sw=4 et tw=78: */
/* vim: set ts=40 sw=4 et tw=99: */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
@ -255,7 +255,7 @@ TypedArray::obj_lookupProperty(JSContext *cx, JSObject *obj, jsid id,
return true;
}
JSObject *proto = STOBJ_GET_PROTO(obj);
JSObject *proto = obj->getProto();
if (!proto) {
*objp = NULL;
*propp = NULL;
@ -339,7 +339,7 @@ js_TypedArray_uint8_clamp_double(const double x)
return y;
}
JS_DEFINE_CALLINFO_1(extern, INT32, js_TypedArray_uint8_clamp_double, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_1(extern, INT32, js_TypedArray_uint8_clamp_double, DOUBLE, 1, nanojit::ACC_NONE)
struct uint8_clamped {
@ -401,7 +401,7 @@ struct uint8_clamped {
}
inline uint8_clamped& operator= (const jsdouble x) {
val = js_TypedArray_uint8_clamp_double(x);
val = uint8(js_TypedArray_uint8_clamp_double(x));
return *this;
}
@ -483,7 +483,7 @@ class TypedArrayTemplate
JSProperty *prop;
JSScopeProperty *sprop;
JSObject *proto = STOBJ_GET_PROTO(obj);
JSObject *proto = obj->getProto();
if (!proto) {
*vp = JSVAL_VOID;
return true;
@ -1346,31 +1346,31 @@ TypedArrayConstruct(JSContext *cx, jsint atype, uintN argc, jsval *argv, jsval *
{
switch (atype) {
case TypedArray::TYPE_INT8:
return Int8Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
return !!Int8Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_UINT8:
return Uint8Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
return !!Uint8Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_INT16:
return Int16Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
return !!Int16Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_UINT16:
return Uint16Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
return !!Uint16Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_INT32:
return Int32Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
return !!Int32Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_UINT32:
return Uint32Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
return !!Uint32Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_FLOAT32:
return Float32Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
return !!Float32Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_FLOAT64:
return Float64Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
return !!Float64Array::class_constructor(cx, cx->globalObject, argc, argv, rv);
case TypedArray::TYPE_UINT8_CLAMPED:
return Uint8ClampedArray::class_constructor(cx, cx->globalObject, argc, argv, rv);
return !!Uint8ClampedArray::class_constructor(cx, cx->globalObject, argc, argv, rv);
default:
JS_NOT_REACHED("shouldn't have gotten here");

View file

@ -1,5 +1,5 @@
/* -*- Mode: c++; c-basic-offset: 4; tab-width: 40; indent-tabs-mode: nil -*- */
/* vim: set ts=40 sw=4 et tw=78: */
/* vim: set ts=40 sw=4 et tw=99: */
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*

View file

@ -88,9 +88,7 @@
#define JS_HAS_OBJ_WATCHPOINT 0 /* has o.watch and o.unwatch */
#define JS_HAS_EVAL_THIS_SCOPE 0 /* Math.eval is same as with (Math) */
#define JS_HAS_SHARP_VARS 0 /* has #n=, #n# for object literals */
#define JS_HAS_SCRIPT_OBJECT 0 /* has (new Script("x++")).exec() */
#define JS_HAS_XDR 0 /* has XDR API and internal support */
#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
#define JS_HAS_TOSOURCE 0 /* has Object/Array toSource method */
#define JS_HAS_DEBUGGER_KEYWORD 0 /* has hook for debugger keyword */
#define JS_HAS_CATCH_GUARD 0 /* has exception handling catch guard */
@ -120,9 +118,7 @@
#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
#define JS_HAS_SCRIPT_OBJECT 1 /* has (new Script("x++")).exec() */
#define JS_HAS_XDR 1 /* has XDR API and internal support */
#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */
@ -148,9 +144,7 @@
#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
#define JS_HAS_SCRIPT_OBJECT 1 /* has (new Script("x++")).exec() */
#define JS_HAS_XDR 1 /* has XDR API and internal support */
#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */
@ -176,9 +170,7 @@
#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
#define JS_HAS_SCRIPT_OBJECT 0 /* has (new Script("x++")).exec() */
#define JS_HAS_XDR 1 /* has XDR API and internal support */
#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */
@ -204,9 +196,7 @@
#define JS_HAS_OBJ_WATCHPOINT 1 /* has o.watch and o.unwatch */
#define JS_HAS_EVAL_THIS_SCOPE 1 /* Math.eval is same as with (Math) */
#define JS_HAS_SHARP_VARS 1 /* has #n=, #n# for object literals */
#define JS_HAS_SCRIPT_OBJECT 0 /* has (new Script("x++")).exec() */
#define JS_HAS_XDR 1 /* has XDR API and internal support */
#define JS_HAS_XDR_FREEZE_THAW 0 /* has XDR freeze/thaw script methods */
#define JS_HAS_TOSOURCE 1 /* has Object/Array toSource method */
#define JS_HAS_DEBUGGER_KEYWORD 1 /* has hook for debugger keyword */
#define JS_HAS_CATCH_GUARD 1 /* has exception handling catch guard */

View file

@ -205,7 +205,7 @@ JS_XDRFindClassById(JSXDRState *xdr, uint32 id);
* before deserialization of bytecode. If the saved version does not match
* the current version, abort deserialization and invalidate the file.
*/
#define JSXDR_BYTECODE_VERSION (0xb973c0de - 61)
#define JSXDR_BYTECODE_VERSION (0xb973c0de - 62)
/*
* Library-private functions.

View file

@ -7492,8 +7492,8 @@ js_GetFunctionNamespace(JSContext *cx, jsval *vp)
* refer to this instance in scripts. When used to qualify method
* names, its prefix and uri references are copied to the QName.
*/
OBJ_CLEAR_PROTO(cx, obj);
OBJ_CLEAR_PARENT(cx, obj);
obj->clearProto();
obj->clearParent();
JS_LOCK_GC(rt);
if (!rt->functionNamespaceObject)
@ -7532,7 +7532,7 @@ js_GetDefaultXMLNamespace(JSContext *cx, jsval *vp)
fp = js_GetTopStackFrame(cx);
obj = NULL;
for (tmp = fp->scopeChain; tmp; tmp = OBJ_GET_PARENT(cx, tmp)) {
for (tmp = fp->scopeChain; tmp; tmp = tmp->getParent()) {
JSClass *clasp = OBJ_GET_CLASS(cx, tmp);
if (clasp == &js_BlockClass || clasp == &js_WithClass)
continue;
@ -7711,8 +7711,8 @@ js_GetAnyName(JSContext *cx, jsval *vp)
ok = JS_FALSE;
break;
}
JS_ASSERT(!OBJ_GET_PROTO(cx, obj));
JS_ASSERT(!OBJ_GET_PARENT(cx, obj));
JS_ASSERT(!obj->getProto());
JS_ASSERT(!obj->getParent());
} while (0);
js_LeaveLocalRootScopeWithResult(cx, OBJECT_TO_JSVAL(obj));
@ -7766,7 +7766,7 @@ js_FindXMLProperty(JSContext *cx, jsval nameval, JSObject **objp, jsid *idp)
/* Skip any With object that can wrap XML. */
target = obj;
while (OBJ_GET_CLASS(cx, target) == &js_WithClass) {
proto = OBJ_GET_PROTO(cx, target);
proto = target->getProto();
if (!proto)
break;
target = proto;
@ -7795,7 +7795,7 @@ js_FindXMLProperty(JSContext *cx, jsval nameval, JSObject **objp, jsid *idp)
return JS_TRUE;
}
}
} while ((obj = OBJ_GET_PARENT(cx, obj)) != NULL);
} while ((obj = obj->getParent()) != NULL);
printable = js_ValueToPrintableString(cx, OBJECT_TO_JSVAL(nameobj));
if (printable) {
@ -7832,7 +7832,7 @@ GetXMLFunction(JSContext *cx, JSObject *obj, jsid id, jsval *vp)
ok = JS_TRUE;
goto out;
}
target = OBJ_GET_PROTO(cx, target);
target = target->getProto();
if (target == NULL)
break;
tvr.u.object = target;

View file

@ -151,7 +151,7 @@ enum ReturnType {
#endif
#define CI(name, args) \
{(uintptr_t) (&name), args, /*_cse*/0, /*_fold*/0, nanojit::ABI_CDECL \
{(uintptr_t) (&name), args, nanojit::ABI_CDECL, /*isPure*/0, ACC_STORE_ANY \
DEBUG_ONLY_NAME(name)}
#define FN(name, args) \
@ -1064,23 +1064,15 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
break;
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
case LIR_ldzb:
case LIR_ldzs:
case LIR_ldsb:
case LIR_ldss:
case LIR_ldcsb:
case LIR_ldcss:
case LIR_ld32f:
case LIR_ldc32f:
#endif
case LIR_ldzb:
case LIR_ldzs:
case LIR_ld:
case LIR_ldc:
CASE64(LIR_ldq:)
CASE64(LIR_ldqc:)
case LIR_ldf:
case LIR_ldfc:
case LIR_ldcb:
case LIR_ldcs:
ins = assemble_load();
break;
@ -1334,6 +1326,10 @@ const CallInfo ci_N_IQF = CI(f_N_IQF, argMask(I32, 1, 3) |
// - LIR_file/LIR_line (#ifdef VTUNE only)
// - LIR_fmod (not implemented in NJ backends)
//
// Other limitations:
// - Loads always use accSet==ACC_LOAD_ANY
// - Stores always use accSet==ACC_STORE_ANY
//
void
FragmentAssembler::assembleRandomFragment(int nIns)
{
@ -1459,34 +1455,23 @@ FragmentAssembler::assembleRandomFragment(int nIns)
I_loads.push_back(LIR_ld); // weight LIR_ld more heavily
I_loads.push_back(LIR_ld);
I_loads.push_back(LIR_ld);
I_loads.push_back(LIR_ldc);
I_loads.push_back(LIR_ldcb);
I_loads.push_back(LIR_ldcs);
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
I_loads.push_back(LIR_ldzb);
I_loads.push_back(LIR_ldzs);
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
I_loads.push_back(LIR_ldsb);
I_loads.push_back(LIR_ldss);
I_loads.push_back(LIR_ldcsb);
I_loads.push_back(LIR_ldcss);
#endif
#ifdef NANOJIT_64BIT
vector<LOpcode> Q_loads;
Q_loads.push_back(LIR_ldq); // weight LIR_ld more heavily
Q_loads.push_back(LIR_ldq);
Q_loads.push_back(LIR_ldqc);
#endif
vector<LOpcode> F_loads;
F_loads.push_back(LIR_ldf); // weight LIR_ldf more heavily
F_loads.push_back(LIR_ldf);
F_loads.push_back(LIR_ldfc);
#if NJ_EXPANDED_LOADSTORE_SUPPORTED
// this loads a 32-bit float and expands to 64-bit float
F_loads.push_back(LIR_ld32f); // weight LIR_ld32f more heavily
F_loads.push_back(LIR_ld32f);
F_loads.push_back(LIR_ldc32f);
// this loads a 32-bit float and expands it to 64-bit float
F_loads.push_back(LIR_ld32f);
#endif
enum LInsClass {
@ -1990,7 +1975,7 @@ Lirasm::Lirasm(bool verbose) :
#endif
// Populate the mOpMap table.
#define OP___(op, number, repKind, retType) \
#define OP___(op, number, repKind, retType, isCse) \
mOpMap[#op] = LIR_##op;
#include "nanojit/LIRopcode.tbl"
#undef OP___
@ -2024,13 +2009,13 @@ Lirasm::lookupFunction(const string &name, CallInfo *&ci)
// The ABI, arg types and ret type will be overridden by the caller.
if (func->second.mReturnType == RT_FLOAT) {
CallInfo target = {(uintptr_t) func->second.rfloat,
0, 0, 0, ABI_FASTCALL
0, ABI_FASTCALL, /*isPure*/0, ACC_STORE_ANY
verbose_only(, func->first.c_str()) };
*ci = target;
} else {
CallInfo target = {(uintptr_t) func->second.rint,
0, 0, 0, ABI_FASTCALL
0, ABI_FASTCALL, /*isPure*/0, ACC_STORE_ANY
verbose_only(, func->first.c_str()) };
*ci = target;
}

View file

@ -1,5 +1,5 @@
base = alloc 512
five = int 5
sti five base 256
x = ldcs base 256
x = ldzs base 256
ret x

View file

@ -1 +1 @@
3fa60904a28ec82b54d7e8a9e9fac6c29e0a5324
4adbf1bbb16cf4751b46a49a4f9c474c0ab0a3b9

View file

@ -417,23 +417,23 @@ namespace nanojit
// optimize the LIR_alloc case by indexing off FP, thus saving the use of
// a GpReg.
//
Register Assembler::getBaseReg(LIns *i, int &d, RegisterMask allow)
Register Assembler::getBaseReg(LInsp base, int &d, RegisterMask allow)
{
#if !PEDANTIC
if (i->isop(LIR_alloc)) {
if (base->isop(LIR_alloc)) {
// The value of a LIR_alloc is a pointer to its stack memory,
// which is always relative to FP. So we can just return FP if we
// also adjust 'd' (and can do so in a valid manner). Or, in the
// PEDANTIC case, we can just assign a register as normal;
// findRegFor() will allocate the stack memory for LIR_alloc if
// necessary.
d += findMemFor(i);
d += findMemFor(base);
return FP;
}
#else
(void) d;
#endif
return findRegFor(i, allow);
return findRegFor(base, allow);
}
// Like findRegFor2(), but used for stores where the base value has the
@ -1384,12 +1384,7 @@ namespace nanojit
case LIR_ldzs:
case LIR_ldsb:
case LIR_ldss:
case LIR_ldcsb:
case LIR_ldcss:
case LIR_ld:
case LIR_ldc:
case LIR_ldcb:
case LIR_ldcs:
{
countlir_ld();
asm_load32(ins);
@ -1397,11 +1392,8 @@ namespace nanojit
}
case LIR_ld32f:
case LIR_ldc32f:
case LIR_ldf:
case LIR_ldfc:
CASE64(LIR_ldq:)
CASE64(LIR_ldqc:)
{
countlir_ldq();
asm_load64(ins);
@ -2086,20 +2078,20 @@ namespace nanojit
}
/**
* move regs around so the SavedRegs contains the highest priority regs.
* Move regs around so the SavedRegs contains the highest priority regs.
*/
void Assembler::evictScratchRegs()
void Assembler::evictScratchRegsExcept(RegisterMask ignore)
{
// find the top GpRegs that are candidates to put in SavedRegs
// Find the top GpRegs that are candidates to put in SavedRegs.
// tosave is a binary heap stored in an array. the root is tosave[0],
// 'tosave' is a binary heap stored in an array. The root is tosave[0],
// left child is at i+1, right child is at i+2.
Register tosave[LastReg-FirstReg+1];
int len=0;
RegAlloc *regs = &_allocator;
for (Register r = FirstReg; r <= LastReg; r = nextreg(r)) {
if (rmask(r) & GpRegs) {
if (rmask(r) & GpRegs & ~ignore) {
LIns *ins = regs->getActive(r);
if (ins) {
if (canRemat(ins)) {
@ -2121,8 +2113,8 @@ namespace nanojit
}
}
// now primap has the live exprs in priority order.
// allocate each of the top priority exprs to a SavedReg
// Now primap has the live exprs in priority order.
// Allocate each of the top priority exprs to a SavedReg.
RegisterMask allow = SavedRegs;
while (allow && len > 0) {
@ -2156,7 +2148,7 @@ namespace nanojit
}
// now evict everything else.
evictSomeActiveRegs(~SavedRegs);
evictSomeActiveRegs(~(SavedRegs | ignore));
}
void Assembler::evictAllActiveRegs()

View file

@ -337,7 +337,7 @@ namespace nanojit
void registerResetAll();
void evictAllActiveRegs();
void evictSomeActiveRegs(RegisterMask regs);
void evictScratchRegs();
void evictScratchRegsExcept(RegisterMask ignore);
void intersectRegisterState(RegAlloc& saved);
void unionRegisterState(RegAlloc& saved);
void assignSaved(RegAlloc &saved, RegisterMask skip);
@ -368,7 +368,7 @@ namespace nanojit
verbose_only(, size_t &nBytes));
bool canRemat(LIns*);
bool isKnownReg(Register r) {
bool deprecated_isKnownReg(Register r) {
return r != deprecated_UnknownReg;
}

View file

@ -45,7 +45,7 @@ namespace nanojit
#ifdef FEATURE_NANOJIT
const uint8_t repKinds[] = {
#define OP___(op, number, repKind, retType) \
#define OP___(op, number, repKind, retType, isCse) \
LRK_##repKind,
#include "LIRopcode.tbl"
#undef OP___
@ -53,18 +53,26 @@ namespace nanojit
};
const LTy retTypes[] = {
#define OP___(op, number, repKind, retType) \
#define OP___(op, number, repKind, retType, isCse) \
LTy_##retType,
#include "LIRopcode.tbl"
#undef OP___
LTy_Void
};
const int8_t isCses[] = {
#define OP___(op, number, repKind, retType, isCse) \
isCse,
#include "LIRopcode.tbl"
#undef OP___
0
};
// LIR verbose specific
#ifdef NJ_VERBOSE
const char* lirNames[] = {
#define OP___(op, number, repKind, retType) \
#define OP___(op, number, repKind, retType, isCse) \
#op,
#include "LIRopcode.tbl"
#undef OP___
@ -201,12 +209,17 @@ namespace nanojit
return startOfRoom;
}
LInsp LirBufWriter::insStore(LOpcode op, LInsp val, LInsp base, int32_t d)
LInsp LirBufWriter::insStore(LOpcode op, LInsp val, LInsp base, int32_t d, AccSet accSet)
{
LInsSti* insSti = (LInsSti*)_buf->makeRoom(sizeof(LInsSti));
LIns* ins = insSti->getLIns();
ins->initLInsSti(op, val, base, d);
return ins;
if (isS16(d)) {
LInsSti* insSti = (LInsSti*)_buf->makeRoom(sizeof(LInsSti));
LIns* ins = insSti->getLIns();
ins->initLInsSti(op, val, base, d, accSet);
return ins;
} else {
// If the displacement is more than 16 bits, put it in a separate instruction.
return insStore(op, val, ins2(LIR_addp, base, insImmWord(d)), 0, accSet);
}
}
LInsp LirBufWriter::ins0(LOpcode op)
@ -241,12 +254,19 @@ namespace nanojit
return ins;
}
LInsp LirBufWriter::insLoad(LOpcode op, LInsp base, int32_t d)
LInsp LirBufWriter::insLoad(LOpcode op, LInsp base, int32_t d, AccSet accSet)
{
LInsLd* insLd = (LInsLd*)_buf->makeRoom(sizeof(LInsLd));
LIns* ins = insLd->getLIns();
ins->initLInsLd(op, base, d);
return ins;
if (isS16(d)) {
LInsLd* insLd = (LInsLd*)_buf->makeRoom(sizeof(LInsLd));
LIns* ins = insLd->getLIns();
ins->initLInsLd(op, base, d, accSet);
return ins;
} else {
// If the displacement is more than 16 bits, put it in a separate instruction.
// Note that CseFilter::insLoad() also does this, so this will
// only occur if CseFilter has been removed from the pipeline.
return insLoad(op, ins2(LIR_addp, base, insImmWord(d)), 0, accSet);
}
}
LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, GuardRecord *gr)
@ -334,7 +354,7 @@ namespace nanojit
{
static const uint8_t insSizes[] = {
// LIR_start is treated specially -- see below.
#define OP___(op, number, repKind, retType) \
#define OP___(op, number, repKind, retType, isCse) \
((number) == LIR_start ? 0 : sizeof(LIns##repKind)),
#include "LIRopcode.tbl"
#undef OP___
@ -666,17 +686,12 @@ namespace nanojit
oprnd1 = t;
break;
default:
if (v >= LIR_lt && v <= LIR_uge) {
NanoStaticAssert((LIR_lt ^ 1) == LIR_gt);
NanoStaticAssert((LIR_le ^ 1) == LIR_ge);
NanoStaticAssert((LIR_ult ^ 1) == LIR_ugt);
NanoStaticAssert((LIR_ule ^ 1) == LIR_uge);
if (isICmpOpcode(v)) {
// move const to rhs, swap the operator
LIns *t = oprnd2;
oprnd2 = oprnd1;
oprnd1 = t;
v = LOpcode(v^1);
v = invertICmpOpcode(v);
}
break;
}
@ -810,11 +825,10 @@ namespace nanojit
}
}
else {
NanoStaticAssert((LIR_xt ^ 1) == LIR_xf);
while (c->isop(LIR_eq) && c->oprnd1()->isCmp() &&
c->oprnd2()->isconstval(0)) {
// xt(eq(cmp,0)) => xf(cmp) or xf(eq(cmp,0)) => xt(cmp)
v = LOpcode(v^1);
v = invertCondGuardOpcode(v);
c = c->oprnd1();
}
}
@ -827,7 +841,7 @@ namespace nanojit
if (oprnd1->isconst() && oprnd2->isconst()) {
int32_t c1 = oprnd1->imm32();
int32_t c2 = oprnd2->imm32();
double d;
double d = 0.0;
switch (op) {
case LIR_addxov: d = double(c1) + double(c2); break;
@ -883,7 +897,7 @@ namespace nanojit
case LIR_jf:
while (c->isop(LIR_eq) && c->oprnd1()->isCmp() && c->oprnd2()->isconstval(0)) {
// jt(eq(cmp,0)) => jf(cmp) or jf(eq(cmp,0)) => jt(cmp)
v = LOpcode(v ^ 1);
v = invertCondJmpOpcode(v);
c = c->oprnd1();
}
break;
@ -893,7 +907,7 @@ namespace nanojit
return out->insBranch(v, c, t);
}
LIns* ExprFilter::insLoad(LOpcode op, LIns* base, int32_t off) {
LIns* ExprFilter::insLoad(LOpcode op, LIns* base, int32_t off, AccSet accSet) {
if (base->isconstp() && !isS8(off)) {
// if the effective address is constant, then transform:
// ld const[bigconst] => ld (const+bigconst)[0]
@ -901,9 +915,9 @@ namespace nanojit
// under the assumption that we're more likely to CSE-match the
// constant base address if we dont const-fold small offsets.
uintptr_t p = (uintptr_t)base->constvalp() + off;
return out->insLoad(op, insImmPtr((void*)p), 0);
return out->insLoad(op, insImmPtr((void*)p), 0, accSet);
}
return out->insLoad(op, base, off);
return out->insLoad(op, base, off, accSet);
}
LIns* LirWriter::ins_eq0(LIns* oprnd1)
@ -934,7 +948,7 @@ namespace nanojit
#endif
}
LIns* LirWriter::insStorei(LIns* value, LIns* base, int32_t d)
LIns* LirWriter::insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet)
{
// Determine which kind of store should be used for 'value' based on
// its type.
@ -948,7 +962,7 @@ namespace nanojit
case LTy_Void: NanoAssert(0); break;
default: NanoAssert(0); break;
}
return insStore(op, value, base, d);
return insStore(op, value, base, d, accSet);
}
#if NJ_SOFTFLOAT_SUPPORTED
@ -1573,21 +1587,13 @@ namespace nanojit
break;
case LIR_ld:
case LIR_ldc:
CASE64(LIR_ldq:)
CASE64(LIR_ldqc:)
case LIR_ldf:
case LIR_ldfc:
case LIR_ldzb:
case LIR_ldzs:
case LIR_ldcb:
case LIR_ldcs:
case LIR_ldsb:
case LIR_ldss:
case LIR_ldcsb:
case LIR_ldcss:
case LIR_ld32f:
case LIR_ldc32f:
case LIR_ret:
CASE64(LIR_qret:)
case LIR_fret:
@ -1753,6 +1759,26 @@ namespace nanojit
}
}
char* LirNameMap::formatAccSet(LInsp ins, bool isLoad, char* buf) {
AccSet accSet = ins->accSet();
int i = 0;
if ((isLoad && accSet == ACC_LOAD_ANY) ||
(!isLoad && accSet == ACC_STORE_ANY))
{
// boring, don't bother with a suffix
} else {
buf[i++] = '.';
if (accSet & ACC_READONLY) { buf[i++] = 'r'; accSet &= ~ACC_READONLY; }
if (accSet & ACC_STACK) { buf[i++] = 's'; accSet &= ~ACC_STACK; }
if (accSet & ACC_OTHER) { buf[i++] = 'o'; accSet &= ~ACC_OTHER; }
// This assertion will fail if we add a new accSet value but
// forget to handle it here.
NanoAssert(accSet == 0);
}
buf[i] = 0;
return buf;
}
void LirNameMap::copyName(LInsp i, const char *s, int suffix) {
char s2[200];
if (VMPI_isdigit(s[VMPI_strlen(s)-1])) {
@ -2012,37 +2038,35 @@ namespace nanojit
break;
case LIR_ld:
case LIR_ldc:
CASE64(LIR_ldq:)
CASE64(LIR_ldqc:)
case LIR_ldf:
case LIR_ldfc:
case LIR_ldzb:
case LIR_ldzs:
case LIR_ldcb:
case LIR_ldcs:
case LIR_ldsb:
case LIR_ldss:
case LIR_ldcsb:
case LIR_ldcss:
case LIR_ld32f:
case LIR_ldc32f:
VMPI_sprintf(s, "%s = %s %s[%d]", formatRef(i), lirNames[op],
case LIR_ld32f: {
char b[32];
VMPI_sprintf(s, "%s = %s%s %s[%d]", formatRef(i), lirNames[op],
formatAccSet(i, /*isLoad*/true, b),
formatRef(i->oprnd1()),
i->disp());
break;
}
case LIR_sti:
CASE64(LIR_stqi:)
case LIR_stfi:
case LIR_stb:
case LIR_sts:
case LIR_st32f:
VMPI_sprintf(s, "%s %s[%d] = %s", lirNames[op],
case LIR_st32f: {
char b[32];
VMPI_sprintf(s, "%s%s %s[%d] = %s", lirNames[op],
formatAccSet(i, /*isLoad*/false, b),
formatRef(i->oprnd2()),
i->disp(),
formatRef(i->oprnd1()));
break;
}
default:
NanoAssertMsgf(0, "Can't handle opcode %s\n", lirNames[op]);
@ -2150,7 +2174,7 @@ namespace nanojit
LIns* CseFilter::ins3(LOpcode v, LInsp a, LInsp b, LInsp c)
{
NanoAssert(isCmovOpcode(v));
NanoAssert(isCseOpcode(v));
uint32_t k;
LInsp ins = exprs->find3(v, a, b, c, k);
if (ins)
@ -2161,18 +2185,27 @@ namespace nanojit
return exprs->add(LIns3, ins, k);
}
LIns* CseFilter::insLoad(LOpcode v, LInsp base, int32_t disp)
LIns* CseFilter::insLoad(LOpcode v, LInsp base, int32_t disp, AccSet accSet)
{
if (isCseOpcode(v)) {
uint32_t k;
LInsp ins = exprs->findLoad(v, base, disp, k);
if (ins)
return ins;
ins = out->insLoad(v, base, disp);
NanoAssert(ins->opcode() == v && ins->oprnd1() == base && ins->disp() == disp);
return exprs->add(LInsLoad, ins, k);
if (isS16(disp)) {
// XXX: This condition is overly strict. Bug 517910 will make it better.
if (accSet == ACC_READONLY) {
uint32_t k;
LInsp ins = exprs->findLoad(v, base, disp, k);
if (ins)
return ins;
ins = out->insLoad(v, base, disp, accSet);
NanoAssert(ins->opcode() == v && ins->oprnd1() == base && ins->disp() == disp);
return exprs->add(LInsLoad, ins, k);
}
return out->insLoad(v, base, disp, accSet);
} else {
// If the displacement is more than 16 bits, put it in a separate
// instruction. LirBufWriter also does this, we do it here as
// well because CseFilter relies on LirBufWriter not changing
// code.
return insLoad(v, ins2(LIR_addp, base, insImmWord(disp)), 0, accSet);
}
return out->insLoad(v, base, disp);
}
LInsp CseFilter::insGuard(LOpcode v, LInsp c, GuardRecord *gr)
@ -2223,7 +2256,8 @@ namespace nanojit
LInsp CseFilter::insCall(const CallInfo *ci, LInsp args[])
{
if (ci->_cse) {
if (ci->_isPure) {
NanoAssert(ci->_storeAccSet == ACC_NONE);
uint32_t k;
uint32_t argc = ci->count_args();
LInsp ins = exprs->findCall(ci, argc, args, k);
@ -2236,7 +2270,7 @@ namespace nanojit
return out->insCall(ci, args);
}
LInsp LoadFilter::insLoad(LOpcode v, LInsp base, int32_t disp)
LInsp LoadFilter::insLoad(LOpcode v, LInsp base, int32_t disp, AccSet accSet)
{
if (base != sp && base != rp)
{
@ -2255,7 +2289,7 @@ namespace nanojit
LInsp ins = exprs->findLoad(v, base, disp, k);
if (ins)
return ins;
ins = out->insLoad(v, base, disp);
ins = out->insLoad(v, base, disp, accSet);
return exprs->add(LInsLoad, ins, k);
}
default:
@ -2263,7 +2297,7 @@ namespace nanojit
break;
}
}
return out->insLoad(v, base, disp);
return out->insLoad(v, base, disp, accSet);
}
void LoadFilter::clear(LInsp p)
@ -2272,15 +2306,15 @@ namespace nanojit
exprs->clear();
}
LInsp LoadFilter::insStore(LOpcode op, LInsp v, LInsp b, int32_t d)
LInsp LoadFilter::insStore(LOpcode op, LInsp v, LInsp b, int32_t d, AccSet accSet)
{
clear(b);
return out->insStore(op, v, b, d);
return out->insStore(op, v, b, d, accSet);
}
LInsp LoadFilter::insCall(const CallInfo *ci, LInsp args[])
{
if (!ci->_cse)
if (!ci->_isPure)
exprs->clear();
return out->insCall(ci, args);
}
@ -2314,7 +2348,7 @@ namespace nanojit
#define SF_CALLINFO(name, typesig) \
static const CallInfo name##_ci = \
{ (intptr_t)&name, typesig, /*cse*/1, /*fold*/1, ABI_FASTCALL verbose_only(, #name) }
{ (intptr_t)&name, typesig, ABI_FASTCALL, /*isPure*/1, ACC_NONE verbose_only(, #name) }
SF_CALLINFO(i2f, SIG_F_I);
SF_CALLINFO(u2f, SIG_F_U);
@ -2392,7 +2426,7 @@ namespace nanojit
LIns* SoftFloatFilter::ins2(LOpcode op, LIns *a, LIns *b) {
const CallInfo *ci = softFloatOps.opmap[op];
if (ci) {
if ((op >= LIR_feq && op <= LIR_fge))
if (isFCmpOpcode(op))
return fcmp(ci, a, b);
return fcall2(ci, a, b);
}
@ -2508,6 +2542,8 @@ namespace nanojit
void ValidateWriter::typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args[])
{
NanoAssert(nArgs >= 0);
// Type-check the arguments.
for (int i = 0; i < nArgs; i++) {
LTy formal = formals[i];
@ -2538,6 +2574,13 @@ namespace nanojit
lirNames[op], lirNames[arg->opcode()], shouldBeDesc);
}
void ValidateWriter::errorAccSetShould(const char* what, AccSet accSet, const char* shouldDesc)
{
NanoAssertMsgf(0,
"LIR AccSet error (%s): '%s' AccSet is %d; it should %s",
_whereInPipeline, what, accSet, shouldDesc);
}
void ValidateWriter::checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins)
{
// We could introduce a LTy_B32 type in the type system but that's a
@ -2564,29 +2607,27 @@ namespace nanojit
: LirWriter(out), _whereInPipeline(stageName)
{}
LIns* ValidateWriter::insLoad(LOpcode op, LIns* base, int32_t d)
LIns* ValidateWriter::insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet)
{
if (accSet == ACC_NONE)
errorAccSetShould(lirNames[op], accSet, "not equal ACC_NONE");
if (accSet & ~ACC_LOAD_ANY)
errorAccSetShould(lirNames[op], accSet, "not contain bits that aren't in ACC_LOAD_ANY");
int nArgs = 1;
LTy formals[1] = { LTy_Ptr };
LIns* args[1] = { base };
switch (op) {
case LIR_ld:
case LIR_ldc:
case LIR_ldf:
case LIR_ldfc:
case LIR_ldzb:
case LIR_ldzs:
case LIR_ldcb:
case LIR_ldcs:
case LIR_ldsb:
case LIR_ldss:
case LIR_ldcsb:
case LIR_ldcss:
case LIR_ld32f:
case LIR_ldc32f:
CASE64(LIR_ldq:)
CASE64(LIR_ldqc:)
break;
default:
NanoAssert(0);
@ -2594,11 +2635,17 @@ namespace nanojit
typeCheckArgs(op, nArgs, formals, args);
return out->insLoad(op, base, d);
return out->insLoad(op, base, d, accSet);
}
LIns* ValidateWriter::insStore(LOpcode op, LIns* value, LIns* base, int32_t d)
LIns* ValidateWriter::insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet)
{
if (accSet == ACC_NONE)
errorAccSetShould(lirNames[op], accSet, "not equal ACC_NONE");
if (accSet & ~ACC_STORE_ANY)
errorAccSetShould(lirNames[op], accSet, "not contain bits that aren't in ACC_STORE_ANY");
int nArgs = 2;
LTy formals[2] = { LTy_Void, LTy_Ptr }; // LTy_Void is overwritten shortly
LIns* args[2] = { value, base };
@ -2869,6 +2916,13 @@ namespace nanojit
LOpcode op = getCallOpcode(ci);
if (ci->_isPure && ci->_storeAccSet != ACC_NONE)
errorAccSetShould(ci->_name, ci->_storeAccSet, "equal ACC_NONE for pure functions");
if (ci->_storeAccSet & ~ACC_STORE_ANY)
errorAccSetShould(lirNames[op], ci->_storeAccSet,
"not contain bits that aren't in ACC_STORE_ANY");
// This loop iterates over the args from right-to-left (because
// arg() and get_sizes() use right-to-left order), but puts the
// results into formals[] and args[] in left-to-right order so
@ -2894,7 +2948,7 @@ namespace nanojit
LIns* ValidateWriter::insGuard(LOpcode op, LIns *cond, GuardRecord *gr)
{
int nArgs;
int nArgs = -1; // init to shut compilers up
LTy formals[1];
LIns* args[1];
@ -2951,7 +3005,7 @@ namespace nanojit
LIns* ValidateWriter::insBranch(LOpcode op, LIns* cond, LIns* to)
{
int nArgs = 0;
int nArgs = -1; // init to shut compilers up
LTy formals[1];
LIns* args[1];

View file

@ -48,7 +48,7 @@ namespace nanojit
: unsigned
#endif
{
#define OP___(op, number, repKind, retType) \
#define OP___(op, number, repKind, retType, isCse) \
LIR_##op = (number),
#include "LIRopcode.tbl"
LIR_sentinel,
@ -62,7 +62,6 @@ namespace nanojit
// pointer op aliases
LIR_ldp = PTR_SIZE(LIR_ld, LIR_ldq),
LIR_ldcp = PTR_SIZE(LIR_ldc, LIR_ldqc),
LIR_stpi = PTR_SIZE(LIR_sti, LIR_stqi),
LIR_piadd = PTR_SIZE(LIR_add, LIR_qiadd),
LIR_piand = PTR_SIZE(LIR_and, LIR_qiand),
@ -89,6 +88,54 @@ namespace nanojit
LIR_pret = PTR_SIZE(LIR_ret, LIR_qret)
};
// 32-bit integer comparisons must be contiguous, as must 64-bit integer
// comparisons and 64-bit float comparisons.
NanoStaticAssert(LIR_eq + 1 == LIR_lt &&
LIR_eq + 2 == LIR_gt &&
LIR_eq + 3 == LIR_le &&
LIR_eq + 4 == LIR_ge &&
LIR_eq + 5 == LIR_ult &&
LIR_eq + 6 == LIR_ugt &&
LIR_eq + 7 == LIR_ule &&
LIR_eq + 8 == LIR_uge);
#ifdef NANOJIT_64BIT
NanoStaticAssert(LIR_qeq + 1 == LIR_qlt &&
LIR_qeq + 2 == LIR_qgt &&
LIR_qeq + 3 == LIR_qle &&
LIR_qeq + 4 == LIR_qge &&
LIR_qeq + 5 == LIR_qult &&
LIR_qeq + 6 == LIR_qugt &&
LIR_qeq + 7 == LIR_qule &&
LIR_qeq + 8 == LIR_quge);
#endif
NanoStaticAssert(LIR_feq + 1 == LIR_flt &&
LIR_feq + 2 == LIR_fgt &&
LIR_feq + 3 == LIR_fle &&
LIR_feq + 4 == LIR_fge);
// Various opcodes must be changeable to their opposite with op^1
// (although we use invertXyz() when possible, ie. outside static
// assertions).
NanoStaticAssert((LIR_jt^1) == LIR_jf && (LIR_jf^1) == LIR_jt);
NanoStaticAssert((LIR_xt^1) == LIR_xf && (LIR_xf^1) == LIR_xt);
NanoStaticAssert((LIR_lt^1) == LIR_gt && (LIR_gt^1) == LIR_lt);
NanoStaticAssert((LIR_le^1) == LIR_ge && (LIR_ge^1) == LIR_le);
NanoStaticAssert((LIR_ult^1) == LIR_ugt && (LIR_ugt^1) == LIR_ult);
NanoStaticAssert((LIR_ule^1) == LIR_uge && (LIR_uge^1) == LIR_ule);
#ifdef NANOJIT_64BIT
NanoStaticAssert((LIR_qlt^1) == LIR_qgt && (LIR_qgt^1) == LIR_qlt);
NanoStaticAssert((LIR_qle^1) == LIR_qge && (LIR_qge^1) == LIR_qle);
NanoStaticAssert((LIR_qult^1) == LIR_qugt && (LIR_qugt^1) == LIR_qult);
NanoStaticAssert((LIR_qule^1) == LIR_quge && (LIR_quge^1) == LIR_qule);
#endif
NanoStaticAssert((LIR_flt^1) == LIR_fgt && (LIR_fgt^1) == LIR_flt);
NanoStaticAssert((LIR_fle^1) == LIR_fge && (LIR_fge^1) == LIR_fle);
struct GuardRecord;
struct SideExit;
@ -122,13 +169,132 @@ namespace nanojit
CALL_INDIRECT = 0
};
//-----------------------------------------------------------------------
// Aliasing
// --------
// *Aliasing* occurs when a single memory location can be accessed through
// multiple names. For example, consider this code:
//
// ld a[0]
// sti b[0]
// ld a[0]
//
// In general, it's possible that a[0] and b[0] may refer to the same
// memory location. This means, for example, that you cannot safely
// perform CSE on the two loads. However, if you know that 'a' cannot be
// an alias of 'b' (ie. the two loads do not alias with the store) then
// you can safely perform CSE.
//
// Access regions
// --------------
// Doing alias analysis precisely is difficult. But it turns out that
// keeping track of aliasing at a very coarse level is enough to help with
// many optimisations. So we conceptually divide the memory that is
// accessible from LIR into a small number of "access regions". An access
// region may be non-contiguous. No two access regions can overlap. The
// union of all access regions covers all memory accessible from LIR.
//
// In general a (static) load or store may be executed more than once, and
// thus may access multiple regions; however, in practice almost all
// loads and stores will obviously access only a single region. A
// function called from LIR may load and/or store multiple access regions
// (even if executed only once).
//
// If two loads/stores/calls are known to not access the same region(s),
// then they do not alias.
//
// The access regions used are as follows:
//
// - READONLY: all memory that is read-only, ie. never stored to.
// A load from a READONLY region will never alias with any stores.
//
// - STACK: the stack. Stack loads/stores can usually be easily
// identified because they use SP as the stack pointer.
//
// - OTHER: all other regions of memory.
//
// It makes sense to add new access regions when doing so will help with
// one or more optimisations.
//
// One subtlety is that the meanings of the access region markings only
// apply to the LIR fragment that they are in. For example, if a memory
// location M is read-only in a particular LIR fragment, all loads
// involving M in that fragment can be safely marked READONLY, even if M
// is modified elsewhere. This is safe because the a LIR fragment is the
// unit of analysis in which the markings are used. In other words alias
// region markings are only used for intra-fragment optimisations.
//
// Access region sets and instruction markings
// -------------------------------------------
// The LIR generator must mark each load/store with an "access region
// set", which is a set of one or more access regions. This indicates
// which parts of LIR-accessible memory the load/store may touch.
//
// The LIR generator must also mark each function called from LIR with an
// access region set for memory stored to by the function. (We could also
// have a marking for memory loads, but there's no need at the moment.)
// These markings apply to the function itself, not the call site (ie.
// they're not context-sensitive).
//
// These load/store/call markings MUST BE ACCURATE -- if they are wrong
// then invalid optimisations might occur that change the meaning of the
// code. However, they can safely be imprecise (ie. conservative), in the
// following ways:
//
// - A load that accesses a READONLY region can be safely marked instead
// as loading from OTHER. In other words, it's safe to underestimate
// the size of the READONLY region. (This would also apply to the load
// set of a function, if we recorded that.)
//
// - A load/store can safely be marked as accessing regions that it
// doesn't, so long as the regions it does access are also included (one
// exception: marking a store with READONLY is nonsense and will cause
// assertions).
//
// In other words, a load/store can be marked with an access region set
// that is a superset of its actual access region set. Taking this to
// its logical conclusion, any load can be safely marked with LOAD_ANY and
// any store can be safely marked with with STORE_ANY (and the latter is
// true for the store set of a function.)
//
// Such imprecision is safe but may reduce optimisation opportunities.
//-----------------------------------------------------------------------
// An access region set is represented as a bitset. Nb: this restricts us
// to at most eight alias regions for the moment.
typedef uint8_t AccSet;
// The access regions. Note that because of the bitset representation
// these constants are also valid (singleton) AccSet values. If you add
// new ones please update ACC_ALL_WRITABLE and LirNameMap::formatAccSet().
//
static const AccSet ACC_READONLY = 1 << 0; // 0000_0001b
static const AccSet ACC_STACK = 1 << 1; // 0000_0010b
static const AccSet ACC_OTHER = 1 << 2; // 0000_0100b
// Some common (non-singleton) access region sets. ACC_NONE does not make
// sense for loads or stores (which must access at least one region), it
// only makes sense for calls.
//
// A convention that's worth using: use ACC_LOAD_ANY/ACC_STORE_ANY for
// cases that you're unsure about or haven't considered carefully. Use
// ACC_ALL/ACC_ALL_WRITABLE for cases that you have considered carefully.
// That way it's easy to tell which ones have been considered and which
// haven't.
static const AccSet ACC_NONE = 0x0;
static const AccSet ACC_ALL_WRITABLE = ACC_STACK | ACC_OTHER;
static const AccSet ACC_ALL = ACC_READONLY | ACC_ALL_WRITABLE;
static const AccSet ACC_LOAD_ANY = ACC_ALL; // synonym
static const AccSet ACC_STORE_ANY = ACC_ALL_WRITABLE; // synonym
struct CallInfo
{
uintptr_t _address;
uint32_t _argtypes:27; // 9 3-bit fields indicating arg type, by ARGSIZE above (including ret type): a1 a2 a3 a4 a5 ret
uint8_t _cse:1; // true if no side effects
uint8_t _fold:1; // true if no side effects
AbiKind _abi:3;
uint8_t _isPure:1; // _isPure=1 means no side-effects, result only depends on args
AccSet _storeAccSet; // access regions stored by the function
verbose_only ( const char* _name; )
uint32_t _count_args(uint32_t mask) const;
@ -171,14 +337,12 @@ namespace nanojit
uint32_t index;
};
// Array holding the 'isCse' field from LIRopcode.tbl.
extern const int8_t isCses[]; // cannot be uint8_t, some values are negative
inline bool isCseOpcode(LOpcode op) {
return
#if defined NANOJIT_64BIT
(op >= LIR_quad && op <= LIR_quge) ||
#else
(op >= LIR_i2f && op <= LIR_float) || // XXX: yuk; use a table (bug 542932)
#endif
(op >= LIR_int && op <= LIR_uge);
NanoAssert(isCses[op] != -1); // see LIRopcode.tbl to understand this
return isCses[op] == 1;
}
inline bool isRetOpcode(LOpcode op) {
return
@ -194,6 +358,53 @@ namespace nanojit
#endif
op == LIR_cmov;
}
inline bool isICmpOpcode(LOpcode op) {
return LIR_eq <= op && op <= LIR_uge;
}
inline bool isSICmpOpcode(LOpcode op) {
return LIR_eq <= op && op <= LIR_ge;
}
inline bool isUICmpOpcode(LOpcode op) {
return LIR_eq == op || (LIR_ult <= op && op <= LIR_uge);
}
#ifdef NANOJIT_64BIT
inline bool isQCmpOpcode(LOpcode op) {
return LIR_qeq <= op && op <= LIR_quge;
}
inline bool isSQCmpOpcode(LOpcode op) {
return LIR_qeq <= op && op <= LIR_qge;
}
inline bool isUQCmpOpcode(LOpcode op) {
return LIR_qeq == op || (LIR_qult <= op && op <= LIR_quge);
}
#endif
inline bool isFCmpOpcode(LOpcode op) {
return LIR_feq <= op && op <= LIR_fge;
}
inline LOpcode invertCondJmpOpcode(LOpcode op) {
NanoAssert(op == LIR_jt || op == LIR_jf);
return LOpcode(op ^ 1);
}
inline LOpcode invertCondGuardOpcode(LOpcode op) {
NanoAssert(op == LIR_xt || op == LIR_xf);
return LOpcode(op ^ 1);
}
inline LOpcode invertICmpOpcode(LOpcode op) {
NanoAssert(isICmpOpcode(op));
return LOpcode(op ^ 1);
}
#ifdef NANOJIT_64BIT
inline LOpcode invertQCmpOpcode(LOpcode op) {
NanoAssert(isQCmpOpcode(op));
return LOpcode(op ^ 1);
}
#endif
inline LOpcode invertFCmpOpcode(LOpcode op) {
NanoAssert(isFCmpOpcode(op));
return LOpcode(op ^ 1);
}
inline LOpcode getCallOpcode(const CallInfo* ci) {
LOpcode op = LIR_pcall;
switch (ci->returnType()) {
@ -386,8 +597,8 @@ namespace nanojit
inline void initLInsOp1(LOpcode opcode, LIns* oprnd1);
inline void initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2);
inline void initLInsOp3(LOpcode opcode, LIns* oprnd1, LIns* oprnd2, LIns* oprnd3);
inline void initLInsLd(LOpcode opcode, LIns* val, int32_t d);
inline void initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d);
inline void initLInsLd(LOpcode opcode, LIns* val, int32_t d, AccSet accSet);
inline void initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d, AccSet accSet);
inline void initLInsSk(LIns* prevLIns);
// Nb: args[] must be allocated and initialised before being passed in;
// initLInsC() just copies the pointer into the LInsC.
@ -470,8 +681,9 @@ namespace nanojit
// For guards.
inline GuardRecord* record() const;
// Displacement for LInsLd/LInsSti
// For loads/stores.
inline int32_t disp() const;
inline int32_t accSet() const;
// For LInsSk.
inline LIns* prevLIns() const;
@ -560,37 +772,33 @@ namespace nanojit
}
// LIns predicates.
bool isCse() const {
return isCseOpcode(opcode()) || (isCall() && callInfo()->_cse);
bool isop(LOpcode o) const {
return opcode() == o;
}
bool isRet() const {
return isRetOpcode(opcode());
}
bool isLive() const {
LOpcode op = opcode();
return
return isop(LIR_live) ||
#if defined NANOJIT_64BIT
op == LIR_qlive ||
isop(LIR_qlive) ||
#endif
op == LIR_live || op == LIR_flive;
}
bool isop(LOpcode o) const {
return opcode() == o;
isop(LIR_flive);
}
bool isCmp() const {
LOpcode op = opcode();
return (op >= LIR_eq && op <= LIR_uge) ||
return isICmpOpcode(op) ||
#if defined NANOJIT_64BIT
(op >= LIR_qeq && op <= LIR_quge) ||
isQCmpOpcode(op) ||
#endif
(op >= LIR_feq && op <= LIR_fge);
isFCmpOpcode(op);
}
bool isCall() const {
return
return isop(LIR_icall) ||
#if defined NANOJIT_64BIT
isop(LIR_qcall) ||
isop(LIR_qcall) ||
#endif
isop(LIR_icall) || isop(LIR_fcall);
isop(LIR_fcall);
}
bool isCmov() const {
return isCmovOpcode(opcode());
@ -681,10 +889,10 @@ namespace nanojit
// affect the control flow.
bool isStmt() {
NanoAssert(!isop(LIR_start) && !isop(LIR_skip));
// All instructions with Void retType are statements. And some
// calls are statements too.
// All instructions with Void retType are statements, as are calls
// to impure functions.
if (isCall())
return !isCse();
return !callInfo()->_isPure;
else
return isVoid();
}
@ -771,7 +979,12 @@ namespace nanojit
private:
friend class LIns;
int32_t disp;
// Nb: the LIR writer pipeline handles things if a displacement
// exceeds 16 bits. This is rare, but does happen occasionally. We
// could go to 24 bits but then it would happen so rarely that the
// handler code would be difficult to test and thus untrustworthy.
int16_t disp;
AccSet accSet;
LIns* oprnd_1;
@ -787,7 +1000,8 @@ namespace nanojit
private:
friend class LIns;
int32_t disp;
int16_t disp;
AccSet accSet;
LIns* oprnd_2;
@ -944,21 +1158,23 @@ namespace nanojit
toLInsOp3()->oprnd_3 = oprnd3;
NanoAssert(isLInsOp3());
}
void LIns::initLInsLd(LOpcode opcode, LIns* val, int32_t d) {
void LIns::initLInsLd(LOpcode opcode, LIns* val, int32_t d, AccSet accSet) {
clearReg();
clearArIndex();
lastWord.opcode = opcode;
toLInsLd()->oprnd_1 = val;
toLInsLd()->disp = d;
toLInsLd()->accSet = accSet;
NanoAssert(isLInsLd());
}
void LIns::initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d) {
void LIns::initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d, AccSet accSet) {
clearReg();
clearArIndex();
lastWord.opcode = opcode;
toLInsSti()->oprnd_1 = val;
toLInsSti()->oprnd_2 = base;
toLInsSti()->disp = d;
toLInsSti()->accSet = accSet;
NanoAssert(isLInsSti());
}
void LIns::initLInsSk(LIns* prevLIns) {
@ -1077,6 +1293,15 @@ namespace nanojit
}
}
int32_t LIns::accSet() const {
if (isLInsSti()) {
return toLInsSti()->accSet;
} else {
NanoAssert(isLInsLd());
return toLInsLd()->accSet;
}
}
LIns* LIns::prevLIns() const {
NanoAssert(isLInsSk());
return toLInsSk()->prevLIns;
@ -1189,11 +1414,11 @@ namespace nanojit
virtual LInsp insImmf(double d) {
return out->insImmf(d);
}
virtual LInsp insLoad(LOpcode op, LIns* base, int32_t d) {
return out->insLoad(op, base, d);
virtual LInsp insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet) {
return out->insLoad(op, base, d, accSet);
}
virtual LInsp insStore(LOpcode op, LIns* value, LIns* base, int32_t d) {
return out->insStore(op, value, base, d);
virtual LInsp insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet) {
return out->insStore(op, value, base, d, accSet);
}
// args[] is in reverse order, ie. args[0] holds the rightmost arg.
virtual LInsp insCall(const CallInfo *call, LInsp args[]) {
@ -1211,24 +1436,41 @@ namespace nanojit
// Inserts a conditional to execute and branches to execute if
// the condition is true and false respectively.
LIns* ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov);
LIns* ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov);
// Inserts an integer comparison to 0
LIns* ins_eq0(LIns* oprnd1);
LIns* ins_eq0(LIns* oprnd1);
// Inserts a pointer comparison to 0
LIns* ins_peq0(LIns* oprnd1);
LIns* ins_peq0(LIns* oprnd1);
// Inserts a binary operation where the second operand is an
// integer immediate.
LIns* ins2i(LOpcode op, LIns *oprnd1, int32_t);
LIns* ins2i(LOpcode op, LIns *oprnd1, int32_t);
#if NJ_SOFTFLOAT_SUPPORTED
LIns* qjoin(LInsp lo, LInsp hi);
LIns* qjoin(LInsp lo, LInsp hi);
#endif
LIns* insImmPtr(const void *ptr);
LIns* insImmWord(intptr_t ptr);
LIns* insImmPtr(const void *ptr);
LIns* insImmWord(intptr_t ptr);
// Sign or zero extend integers to native integers. On 32-bit this is a no-op.
LIns* ins_i2p(LIns* intIns);
LIns* ins_u2p(LIns* uintIns);
// choose LIR_sti or LIR_stqi based on size of value
LIns* insStorei(LIns* value, LIns* base, int32_t d);
LIns* ins_i2p(LIns* intIns);
LIns* ins_u2p(LIns* uintIns);
// Chooses LIR_sti or LIR_stqi based on size of value.
LIns* insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet);
// Insert a load/store with the most pessimistic region access info, which is always safe.
LIns* insLoad(LOpcode op, LIns* base, int32_t d) {
return insLoad(op, base, d, ACC_LOAD_ANY);
}
LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d) {
return insStore(op, value, base, d, ACC_STORE_ANY);
}
LIns* insStorei(LIns* value, LIns* base, int32_t d) {
return insStorei(value, base, d, ACC_STORE_ANY);
}
};
@ -1303,6 +1545,7 @@ namespace nanojit
void addName(LInsp i, const char *s);
void copyName(LInsp i, const char *s, int suffix);
char* formatAccSet(LInsp ins, bool isLoad, char* buf);
const char *formatRef(LIns *ref);
const char *formatIns(LInsp i);
void formatGuard(LInsp i, char *buf);
@ -1390,11 +1633,11 @@ namespace nanojit
LIns* insParam(int32_t i, int32_t kind) {
return add(out->insParam(i, kind));
}
LIns* insLoad(LOpcode v, LInsp base, int32_t disp) {
return add(out->insLoad(v, base, disp));
LIns* insLoad(LOpcode v, LInsp base, int32_t disp, AccSet accSet) {
return add(out->insLoad(v, base, disp, accSet));
}
LIns* insStore(LOpcode op, LInsp v, LInsp b, int32_t d) {
return add(out->insStore(op, v, b, d));
LIns* insStore(LOpcode op, LInsp v, LInsp b, int32_t d, AccSet accSet) {
return add(out->insStore(op, v, b, d, accSet));
}
LIns* insAlloc(int32_t size) {
return add(out->insAlloc(size));
@ -1424,7 +1667,7 @@ namespace nanojit
LIns* insGuard(LOpcode, LIns *cond, GuardRecord *);
LIns* insGuardXov(LOpcode, LIns* a, LIns* b, GuardRecord *);
LIns* insBranch(LOpcode, LIns *cond, LIns *target);
LIns* insLoad(LOpcode op, LInsp base, int32_t off);
LIns* insLoad(LOpcode op, LInsp base, int32_t off, AccSet accSet);
};
enum LInsHashKind {
@ -1528,7 +1771,7 @@ namespace nanojit
LIns* ins1(LOpcode v, LInsp);
LIns* ins2(LOpcode v, LInsp, LInsp);
LIns* ins3(LOpcode v, LInsp, LInsp, LInsp);
LIns* insLoad(LOpcode op, LInsp cond, int32_t d);
LIns* insLoad(LOpcode op, LInsp cond, int32_t d, AccSet accSet);
LIns* insCall(const CallInfo *call, LInsp args[]);
LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
LIns* insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr);
@ -1586,8 +1829,8 @@ namespace nanojit
}
// LirWriter interface
LInsp insLoad(LOpcode op, LInsp base, int32_t disp);
LInsp insStore(LOpcode op, LInsp o1, LInsp o2, int32_t disp);
LInsp insLoad(LOpcode op, LInsp base, int32_t disp, AccSet accSet);
LInsp insStore(LOpcode op, LInsp o1, LInsp o2, int32_t disp, AccSet accSet);
LInsp ins0(LOpcode op);
LInsp ins1(LOpcode op, LInsp o1);
LInsp ins2(LOpcode op, LInsp o1, LInsp o2);
@ -1694,8 +1937,8 @@ namespace nanojit
}
LInsp ins0(LOpcode);
LInsp insLoad(LOpcode, LInsp base, int32_t disp);
LInsp insStore(LOpcode op, LInsp v, LInsp b, int32_t d);
LInsp insLoad(LOpcode op, LInsp base, int32_t disp, AccSet accSet);
LInsp insStore(LOpcode op, LInsp value, LInsp base, int32_t disp, AccSet accSet);
LInsp insCall(const CallInfo *call, LInsp args[]);
};
@ -1748,14 +1991,15 @@ namespace nanojit
void typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args[]);
void errorStructureShouldBe(LOpcode op, const char* argDesc, int argN, LIns* arg,
const char* shouldBeDesc);
void errorAccSetShould(const char* what, AccSet accSet, const char* shouldDesc);
void checkLInsHasOpcode(LOpcode op, int argN, LIns* ins, LOpcode op2);
void checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins);
void checkLInsIsNull(LOpcode op, int argN, LIns* ins);
public:
ValidateWriter(LirWriter* out, const char* stageName);
LIns* insLoad(LOpcode op, LIns* base, int32_t d);
LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d);
LIns* insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet);
LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet);
LIns* ins0(LOpcode v);
LIns* ins1(LOpcode v, LIns* a);
LIns* ins2(LOpcode v, LIns* a, LIns* b);

View file

@ -40,18 +40,21 @@
/*
* Definitions of LIR opcodes. If you need to allocate an opcode, look
* for a name beginning with "__" and claim it.
* for one defined using OP_UN() and claim it.
*
* Includers must define an OPxyz macro of the following form:
*
* #define OPxyz(op, val, repKind, retType) ...
* #define OPxyz(op, number, repKind, retType) ...
*
* Selected arguments can then be used within the macro expansions.
* - op Bytecode name, token-pasted after "LIR_" to form an LOpcode.
* - val Bytecode value, which is the LOpcode enumerator value.
* - number Bytecode number, used as the LOpcode enum value.
* - repKind Indicates how the instruction is represented in memory; XYZ
* corresponds to LInsXYZ and LRK_XYZ.
* - retType Type (LTy) of the value returned by the instruction.
* - isCse 0 if the opcode can never be CSE'd safely, 1 if it always
* can, -1 if things are more complicated -- in which case
* isCseOpcode() shouldn't be called on this opcode.
*
* This file is best viewed with 128 columns:
12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678
@ -63,239 +66,232 @@
* the following:
*
* OP___: for opcodes supported on all platforms.
* OP_UN: for opcodes not yet used on any platform.
* OP_32: for opcodes supported only on 32-bit platforms.
* OP_64: for opcodes supported only on 64-bit platforms.
* OP_SF: for opcodes supported only on SoftFloat platforms.
* OP_86: for opcodes supported only on i386/X64.
*/
#define OP_UN(n) OP___(__##n, n, None, Void, -1)
#ifdef NANOJIT_64BIT
# define OP_32(a, b, c, d) OP___(__##b, b, None, Void)
# define OP_64 OP___
# define OP_32(a, b, c, d, e) OP_UN(b)
# define OP_64 OP___
#else
# define OP_32 OP___
# define OP_64(a, b, c, d) OP___(__##b, b, None, Void)
# define OP_32 OP___
# define OP_64(a, b, c, d, e) OP_UN(b)
#endif
#if NJ_SOFTFLOAT_SUPPORTED
# define OP_SF OP___
# define OP_SF OP___
#else
# define OP_SF(a, b, c, d) OP___(__##b, b, None, Void)
# define OP_SF(a, b, c, d, e) OP_UN(b)
#endif
#if defined NANOJIT_IA32 || defined NANOJIT_X64
# define OP_86 OP___
# define OP_86 OP___
#else
# define OP_86(a, b, c, d) OP___(__##b, b, None, Void)
# define OP_86(a, b, c, d, e) OP_UN(b)
#endif
/* special operations */
OP___(start, 0, Op0, Void) // start of a fragment
OP___(regfence, 1, Op0, Void) // register fence, no register allocation is allowed across this meta instruction
OP___(skip, 2, Sk, Void) // used to link code chunks
/* non-pure operations */
OP___(ldsb, 3, Ld, I32) // 8-bit integer load, sign-extend to 32-bit
OP___(ldss, 4, Ld, I32) // 16-bit integer load, sign-extend to 32-bit
OP___(ldzb, 5, Ld, I32) // 8-bit integer load, zero extend to 32-bit
OP___(ldzs, 6, Ld, I32) // 16-bit integer load, zero extend to 32-bit
OP_32(iaddp, 7, Op2, I32) // 32-bit integer addition for temporary pointer calculations
OP_32(iparam, 8, P, I32) // load a parameter (32-bit register or stack location)
OP___(stb, 9, Sti, Void) // 8-bit integer store
OP___(ld, 10, Ld, I32) // 32-bit integer load
OP_32(ialloc, 11, I, I32) // allocate some stack space (result is a 32-bit address)
OP___(sti, 12, Sti, Void) // 32-bit integer store
OP___(ret, 13, Op1, Void) // return a 32-bit integer
OP___(live, 14, Op1, Void) // extend live range of a 32-bit integer
OP___(flive, 15, Op1, Void) // extend live range of a 64-bit float
OP___(icall, 16, C, I32) // subroutine call returning a 32-bit value
OP___(sts, 17, Sti, Void) // 16-bit integer store
// Special operations.
OP___(start, 0, Op0, Void, 0) // start of a fragment
OP___(regfence, 1, Op0, Void, 0) // register fence, no register allocation is allowed across this meta instruction
OP___(skip, 2, Sk, Void, 0) // used to link code chunks
/* guards */
OP___(x, 18, Op2, Void) // exit always
// Impure operations.
OP___(ldsb, 3, Ld, I32, -1) // 8-bit integer load, sign-extend to 32-bit
OP___(ldss, 4, Ld, I32, -1) // 16-bit integer load, sign-extend to 32-bit
OP___(ldzb, 5, Ld, I32, -1) // 8-bit integer load, zero-extend to 32-bit
OP___(ldzs, 6, Ld, I32, -1) // 16-bit integer load, zero-extend to 32-bit
OP_32(iaddp, 7, Op2, I32, 0) // 32-bit integer addition for temporary pointer calculations
OP_32(iparam, 8, P, I32, 0) // load a parameter (32-bit register or stack location)
OP___(stb, 9, Sti, Void, 0) // 8-bit integer store
OP___(ld, 10, Ld, I32, -1) // 32-bit integer load
OP_32(ialloc, 11, I, I32, 0) // allocate some stack space (result is a 32-bit address)
OP___(sti, 12, Sti, Void, 0) // 32-bit integer store
OP___(ret, 13, Op1, Void, 0) // return a 32-bit integer
OP___(live, 14, Op1, Void, 0) // extend live range of a 32-bit integer
OP___(flive, 15, Op1, Void, 0) // extend live range of a 64-bit float
OP___(icall, 16, C, I32, -1) // subroutine call returning a 32-bit value
OP___(sts, 17, Sti, Void, 0) // 16-bit integer store
/* branches */
OP___(j, 19, Op2, Void) // jump always
OP___(jt, 20, Op2, Void) // jump if true
OP___(jf, 21, Op2, Void) // jump if false
OP___(label, 22, Op0, Void) // a jump target (no machine code is emitted for this)
OP___(jtbl, 23, Jtbl, Void) // jump to address in table
OP___(x, 18, Op2, Void, 0) // exit always
/* operators */
// Branches. 'jt' and 'jf' must be adjacent so that (op ^ 1) gives the
// opposite one. Static assertions in LIR.h check this requirement.
OP___(j, 19, Op2, Void, 0) // jump always
OP___(jt, 20, Op2, Void, 0) // jump if true
OP___(jf, 21, Op2, Void, 0) // jump if false
OP___(label, 22, Op0, Void, 0) // a jump target (no machine code is emitted for this)
OP___(jtbl, 23, Jtbl, Void, 0) // jump to address in table
/*
* NB: Opcodes LIR_int through LIR_uge must remain continuous to aid in
* common-subexpression-elimination detection code.
*/
OP___(int, 24, I, I32) // constant 32-bit integer
OP___(cmov, 25, Op3, I32) // conditional move
OP___(int, 24, I, I32, 1) // constant 32-bit integer
OP___(cmov, 25, Op3, I32, 1) // conditional move
// LIR_callh is a hack that's only used on 32-bit platforms that use SoftFloat.
// Its operand is always a LIR_icall, but one that specifies a function that
// returns a 64-bit float. It indicates that the 64-bit float return value is
// being returned via two 32-bit integer registers. The result is always used
// as the second operand of a LIR_qjoin.
OP_SF(callh, 26, Op1, I32)
OP_SF(callh, 26, Op1, I32, 1)
// feq though fge must only be used on float arguments. They return integers.
// For all except feq, (op ^ 1) is the op which flips the
// left and right sides of the comparison, so (lt ^ 1) == gt, or the operator
// "<" is xored with 1 to get ">". Similarly, (op ^ 3) is the complement of
// op, so (lt ^ 1) == ge, or the complement of the operator "<" is ">=" xored
// with 3. NB: These opcodes must remain continuous so that comparison-opcode
// detection works correctly.
OP___(feq, 27, Op2, I32) // floating-point equality
OP___(flt, 28, Op2, I32) // floating-point less-than
OP___(fgt, 29, Op2, I32) // floating-point greater-than
OP___(fle, 30, Op2, I32) // floating-point less-than-or-equal
OP___(fge, 31, Op2, I32) // floating-point greater-than-or-equal
// 64-bit float comparisons. Their order must be preserved so that, except for
// 'feq', (op ^ 1) gives the opposite one (eg. flt ^ 1 == fgt). They must also
// remain contiguous so that opcode range checking works correctly.
// Static assertions in LIR.h check these requirements.
OP___(feq, 27, Op2, I32, 1) // floating-point equality
OP___(flt, 28, Op2, I32, 1) // floating-point less-than
OP___(fgt, 29, Op2, I32, 1) // floating-point greater-than
OP___(fle, 30, Op2, I32, 1) // floating-point less-than-or-equal
OP___(fge, 31, Op2, I32, 1) // floating-point greater-than-or-equal
OP___(ldcb, 32, Ld, I32) // non-volatile 8-bit integer load, zero-extended to 32-bit
OP___(ldcs, 33, Ld, I32) // non-volatile 16-bit integer load, zero-extended to 32-bit
OP___(ldc, 34, Ld, I32) // non-volatile 32-bit integer load, zero-extended to 32-bit
OP_UN(32)
OP_UN(33)
OP_UN(34)
OP___(neg, 35, Op1, I32) // 32-bit integer negation
OP___(add, 36, Op2, I32) // 32-bit integer addition
OP___(sub, 37, Op2, I32) // 32-bit integer subtraction
OP___(mul, 38, Op2, I32) // 32-bit integer multiplication
OP_86(div, 39, Op2, I32) // 32-bit integer division
OP___(neg, 35, Op1, I32, 1) // 32-bit integer negation
OP___(add, 36, Op2, I32, 1) // 32-bit integer addition
OP___(sub, 37, Op2, I32, 1) // 32-bit integer subtraction
OP___(mul, 38, Op2, I32, 1) // 32-bit integer multiplication
OP_86(div, 39, Op2, I32, 1) // 32-bit integer division
// LIR_mod is a hack. It's only used on i386/X64. The operand is the result
// of a LIR_div because on i386/X64 div and mod results are computed by the
// same instruction.
OP_86(mod, 40, Op1, I32) // 32-bit integer modulus
OP_86(mod, 40, Op1, I32, 1) // 32-bit integer modulus
OP___(and, 41, Op2, I32) // 32-bit bitwise AND
OP___(or, 42, Op2, I32) // 32-bit bitwise OR
OP___(xor, 43, Op2, I32) // 32-bit bitwise XOR
OP___(not, 44, Op1, I32) // 32-bit bitwise NOT
OP___(lsh, 45, Op2, I32) // 32-bit left shift
OP___(rsh, 46, Op2, I32) // 32-bit right shift with sign-extend (>>)
OP___(ush, 47, Op2, I32) // 32-bit unsigned right shift (>>>)
OP___(and, 41, Op2, I32, 1) // 32-bit bitwise AND
OP___(or, 42, Op2, I32, 1) // 32-bit bitwise OR
OP___(xor, 43, Op2, I32, 1) // 32-bit bitwise XOR
OP___(not, 44, Op1, I32, 1) // 32-bit bitwise NOT
OP___(lsh, 45, Op2, I32, 1) // 32-bit left shift
OP___(rsh, 46, Op2, I32, 1) // 32-bit right shift with sign-extend (>>)
OP___(ush, 47, Op2, I32, 1) // 32-bit unsigned right shift (>>>)
// Conditional guards, op^1 to complement. Only things that are
// isCond() can be passed to these.
OP___(xt, 48, Op2, Void) // exit if true (0x30 0011 0000)
OP___(xf, 49, Op2, Void) // exit if false (0x31 0011 0001)
// Conditional guards. 'xt' and 'xf' must be adjacent so that (op ^ 1) gives
// the opposite one. Static assertions in LIR.h check this requirement.
OP___(xt, 48, Op2, Void, 1) // exit if true (0x30 0011 0000)
OP___(xf, 49, Op2, Void, 1) // exit if false (0x31 0011 0001)
OP_SF(qlo, 50, Op1, I32) // get the low 32 bits of a 64-bit value
OP_SF(qhi, 51, Op1, I32) // get the high 32 bits of a 64-bit value
OP_SF(qlo, 50, Op1, I32, 1) // get the low 32 bits of a 64-bit value
OP_SF(qhi, 51, Op1, I32, 1) // get the high 32 bits of a 64-bit value
OP___(ldcsb, 52, Ld, I32) // non-volatile 8-bit integer load, sign-extended to 32-bit
OP___(ldcss, 53, Ld, I32) // non-volatile 16-bit integer load, sign-extended to 32-bit
OP_UN(52)
OP_UN(53)
OP___(addxov, 54, Op3, I32) // 32-bit integer addition; exit if overflow occurred, result is valid on either path
OP___(subxov, 55, Op3, I32) // 32-bit integer subtraction; exit if overflow occurred, result is valid on either path
OP___(mulxov, 56, Op3, I32) // 32-bit integer multiplication; exit if overflow occurred, result is valid on either path
OP___(addxov, 54, Op3, I32, 1) // 32-bit integer addition; exit if overflow occurred, result is valid on either path
OP___(subxov, 55, Op3, I32, 1) // 32-bit integer subtraction; exit if overflow occurred, result is valid on either path
OP___(mulxov, 56, Op3, I32, 1) // 32-bit integer multiplication; exit if overflow occurred, result is valid on either path
// Integer (32-bit) relational operators. (op ^ 1) is the op which flips the
// left and right sides of the comparison, so (lt ^ 1) == gt, or the operator
// "<" is xored with 1 to get ">". 'u' prefix indicates the unsigned integer
// variant.
// NB: These opcodes must remain continuous so that comparison-opcode detection
// works correctly.
OP___(eq, 57, Op2, I32) // integer equality
OP___(lt, 58, Op2, I32) // signed integer less-than (0x38 0011 1000)
OP___(gt, 59, Op2, I32) // signed integer greater-than (0x39 0011 1001)
OP___(le, 60, Op2, I32) // signed integer less-than-or-equal (0x3A 0011 1010)
OP___(ge, 61, Op2, I32) // signed integer greater-than-or-equal (0x3B 0011 1011)
OP___(ult, 62, Op2, I32) // unsigned integer less-than (0x3C 0011 1100)
OP___(ugt, 63, Op2, I32) // unsigned integer greater-than (0x3D 0011 1101)
OP___(ule, 64, Op2, I32) // unsigned integer less-than-or-equal (0x3E 0011 1110)
OP___(uge, 65, Op2, I32) // unsigned integer greater-than-or-equal (0x3F 0011 1111)
// 32-bit integer comparisons. Their order must be preserved so that, except
// for 'eq', (op ^ 1) gives the opposite one (eg. lt ^ 1 == gt). They must
// also remain contiguous so that opcode range checking works correctly.
// Static assertions in LIR.h check these requirements.
OP___(eq, 57, Op2, I32, 1) // integer equality
OP___(lt, 58, Op2, I32, 1) // signed integer less-than (0x38 0011 1000)
OP___(gt, 59, Op2, I32, 1) // signed integer greater-than (0x39 0011 1001)
OP___(le, 60, Op2, I32, 1) // signed integer less-than-or-equal (0x3A 0011 1010)
OP___(ge, 61, Op2, I32, 1) // signed integer greater-than-or-equal (0x3B 0011 1011)
OP___(ult, 62, Op2, I32, 1) // unsigned integer less-than (0x3C 0011 1100)
OP___(ugt, 63, Op2, I32, 1) // unsigned integer greater-than (0x3D 0011 1101)
OP___(ule, 64, Op2, I32, 1) // unsigned integer less-than-or-equal (0x3E 0011 1110)
OP___(uge, 65, Op2, I32, 1) // unsigned integer greater-than-or-equal (0x3F 0011 1111)
OP___(file, 66, Op1, Void) // source filename for debug symbols
OP___(line, 67, Op1, Void) // source line number for debug symbols
OP___(file, 66, Op1, Void, 0) // source filename for debug symbols
OP___(line, 67, Op1, Void, 0) // source line number for debug symbols
OP___(xbarrier, 68, Op2, Void) // memory barrier; doesn't exit, but flushes all values to the stack
OP___(xtbl, 69, Op2, Void) // exit via indirect jump
OP___(xbarrier, 68, Op2, Void, 0) // memory barrier; doesn't exit, but flushes all values to the stack
OP___(xtbl, 69, Op2, Void, 0) // exit via indirect jump
OP_64(qlive, 70, Op1, Void) // extend live range of a 64-bit integer
OP_64(qlive, 70, Op1, Void, 0) // extend live range of a 64-bit integer
OP_64(qaddp, 71, Op2, I64) // 64-bit integer addition for temp pointer calculations
OP_64(qparam, 72, P, I64) // load a parameter (64bit register or stack location)
OP_64(qaddp, 71, Op2, I64, 0) // 64-bit integer addition for temp pointer calculations
OP_64(qparam, 72, P, I64, 0) // load a parameter (64bit register or stack location)
OP___(ldf, 73, Ld, F64) // 64-bit float load
OP_64(ldq, 74, Ld, I64) // 64-bit integer load
OP___(ldf, 73, Ld, F64, -1) // 64-bit float load
OP_64(ldq, 74, Ld, I64, -1) // 64-bit integer load
OP_64(qalloc, 75, I, I64) // allocate some stack space (result is a 64-bit address)
OP_64(qalloc, 75, I, I64, 0) // allocate some stack space (result is a 64-bit address)
OP_64(stqi, 76, Sti, Void) // 64-bit integer store
OP_64(stqi, 76, Sti, Void, 0) // 64-bit integer store
OP___(st32f, 77, Sti, Void) // store 64-bit float as a 32-bit float (dropping precision)
OP___(ld32f, 78, Ld, F64) // load 32-bit float and widen to 64-bit float
OP___(st32f, 77, Sti, Void, 0) // store 64-bit float as a 32-bit float (dropping precision)
OP___(ld32f, 78, Ld, F64, -1) // load 32-bit float and widen to 64-bit float
OP___(fcall, 79, C, F64) // subroutine call returning 64-bit float value
OP_64(qcall, 80, C, I64) // subroutine call returning 64-bit integer value
OP___(fcall, 79, C, F64, -1) // subroutine call returning 64-bit float value
OP_64(qcall, 80, C, I64, -1) // subroutine call returning 64-bit integer value
OP___(stfi, 81, Sti, Void) // 64-bit float store
OP___(stfi, 81, Sti, Void, 0) // 64-bit float store
OP___(fret, 82, Op1, Void) // return a 64-bit float
OP_64(qret, 83, Op1, Void) // return a 64-bit integer
OP___(fret, 82, Op1, Void, 0) // return a 64-bit float
OP_64(qret, 83, Op1, Void, 0) // return a 64-bit integer
OP___(__84, 84, None, Void)
OP___(__85, 85, None, Void)
OP___(__86, 86, None, Void)
OP___(__87, 87, None, Void)
OP_UN(84)
OP_UN(85)
OP_UN(86)
OP_UN(87)
// All opcodes below this marker are subject to CSE.
OP_64(quad, 88, N64, I64, 1) // 64-bit integer constant value
OP_64(qcmov, 89, Op3, I64, 1) // 64-bit conditional move
OP_64(quad, 88, N64, I64) // 64-bit integer constant value
OP_64(qcmov, 89, Op3, I64) // 64-bit conditional move
OP_64(i2q, 90, Op1, I64, 1) // sign-extend i32 to i64
OP_64(u2q, 91, Op1, I64, 1) // zero-extend u32 to u64
OP___(i2f, 92, Op1, F64, 1) // convert a signed 32-bit integer to a float
OP___(u2f, 93, Op1, F64, 1) // convert an unsigned 32-bit integer to a float
OP___(f2i, 94, Op1, I32, 1) // f2i conversion, no exception raised, platform rounding rules.
OP_64(i2q, 90, Op1, I64) // sign-extend i32 to i64
OP_64(u2q, 91, Op1, I64) // zero-extend u32 to u64
OP___(i2f, 92, Op1, F64) // convert a signed 32-bit integer to a float
OP___(u2f, 93, Op1, F64) // convert an unsigned 32-bit integer to a float
OP___(f2i, 94, Op1, I32) // f2i conversion, no exception raised, platform rounding rules.
OP_UN(95)
OP_UN(96)
OP_UN(97)
OP_UN(98)
OP___(__95, 95, None, Void)
OP___(__96, 96, None, Void)
OP___(ldfc, 97, Ld, F64) // non-volatile 64-bit float load
OP_64(ldqc, 98, Ld, I64) // non-volatile 64-bit integer load
OP___(fneg, 99, Op1, F64) // floating-point negation
OP___(fadd, 100, Op2, F64) // floating-point addition
OP___(fsub, 101, Op2, F64) // floating-point subtraction
OP___(fmul, 102, Op2, F64) // floating-point multiplication
OP___(fdiv, 103, Op2, F64) // floating-point division
// LIR_fmod is just a place-holder opcode, eg. the back-ends cannot generate
OP___(fneg, 99, Op1, F64, 1) // floating-point negation
OP___(fadd, 100, Op2, F64, 1) // floating-point addition
OP___(fsub, 101, Op2, F64, 1) // floating-point subtraction
OP___(fmul, 102, Op2, F64, 1) // floating-point multiplication
OP___(fdiv, 103, Op2, F64, 1) // floating-point division
// LIR_fmod is just a place-holder opcode, ie. the back-ends cannot generate
// code for it. It's used in TraceMonkey briefly but is always demoted to a
// LIR_mod or converted to a function call before Nanojit has to do anything
// serious with it.
OP___(fmod, 104, Op2, F64) // floating-point modulus
OP___(fmod, 104, Op2, F64, 1) // floating-point modulus
OP_64(qiand, 105, Op2, I64) // 64-bit bitwise AND
OP_64(qior, 106, Op2, I64) // 64-bit bitwise OR
OP_64(qxor, 107, Op2, I64) // 64-bit bitwise XOR
OP___(__108, 108, None, Void)
OP_64(qilsh, 109, Op2, I64) // 64-bit left shift; 2nd operand is a 32-bit integer
OP_64(qirsh, 110, Op2, I64) // 64-bit signed right shift; 2nd operand is a 32-bit integer
OP_64(qursh, 111, Op2, I64) // 64-bit unsigned right shift; 2nd operand is a 32-bit integer
OP_64(qiadd, 112, Op2, I64) // 64-bit bitwise ADD
OP_64(qiand, 105, Op2, I64, 1) // 64-bit bitwise AND
OP_64(qior, 106, Op2, I64, 1) // 64-bit bitwise OR
OP_64(qxor, 107, Op2, I64, 1) // 64-bit bitwise XOR
OP_UN(108)
OP_64(qilsh, 109, Op2, I64, 1) // 64-bit left shift; 2nd operand is a 32-bit integer
OP_64(qirsh, 110, Op2, I64, 1) // 64-bit signed right shift; 2nd operand is a 32-bit integer
OP_64(qursh, 111, Op2, I64, 1) // 64-bit unsigned right shift; 2nd operand is a 32-bit integer
OP_64(qiadd, 112, Op2, I64, 1) // 64-bit bitwise ADD
OP___(ldc32f, 113, Ld, F64) // non-volatile load 32-bit float and widen to 64-bit float
OP_SF(qjoin, 114, Op2, F64) // join two 32-bit values (1st arg is low bits, 2nd is high)
OP_64(q2i, 115, Op1, I32) // truncate i64 to i32
OP___(__116, 116, None, Void)
OP___(__117, 117, None, Void)
OP___(float, 118, N64, F64) // 64-bit float constant value
OP_UN(113)
// Integer (64-bit) relational operators.
// NB: These opcodes must remain continuous so that comparison-opcode detection
// works correctly.
OP_64(qeq, 119, Op2, I32) // integer equality
OP_64(qlt, 120, Op2, I32) // signed integer less-than (0x78 0111 1000)
OP_64(qgt, 121, Op2, I32) // signed integer greater-than (0x79 0111 1001)
OP_64(qle, 122, Op2, I32) // signed integer less-than-or-equal (0x7A 0111 1010)
OP_64(qge, 123, Op2, I32) // signed integer greater-than-or-equal (0x7B 0111 1011)
OP_64(qult, 124, Op2, I32) // unsigned integer less-than (0x7C 0111 1100)
OP_64(qugt, 125, Op2, I32) // unsigned integer greater-than (0x7D 0111 1101)
OP_64(qule, 126, Op2, I32) // unsigned integer less-than-or-equal (0x7E 0111 1110)
OP_64(quge, 127, Op2, I32) // unsigned integer greater-than-or-equal (0x7F 0111 1111)
OP_SF(qjoin, 114, Op2, F64, 1) // join two 32-bit values (1st arg is low bits, 2nd is high)
OP_64(q2i, 115, Op1, I32, 1) // truncate i64 to i32
OP_UN(116)
OP_UN(117)
OP___(float, 118, N64, F64, 1) // 64-bit float constant value
// 64-bit integer comparisons. Their order must be preserved so that, except
// for 'qeq', (op ^ 1) gives the opposite one (eg. qlt ^ 1 == qgt). They must
// also remain contiguous so that opcode range checking works correctly.
// Static assertions in LIR.h check these requirements.
OP_64(qeq, 119, Op2, I32, 1) // integer equality
OP_64(qlt, 120, Op2, I32, 1) // signed integer less-than (0x78 0111 1000)
OP_64(qgt, 121, Op2, I32, 1) // signed integer greater-than (0x79 0111 1001)
OP_64(qle, 122, Op2, I32, 1) // signed integer less-than-or-equal (0x7A 0111 1010)
OP_64(qge, 123, Op2, I32, 1) // signed integer greater-than-or-equal (0x7B 0111 1011)
OP_64(qult, 124, Op2, I32, 1) // unsigned integer less-than (0x7C 0111 1100)
OP_64(qugt, 125, Op2, I32, 1) // unsigned integer greater-than (0x7D 0111 1101)
OP_64(qule, 126, Op2, I32, 1) // unsigned integer less-than-or-equal (0x7E 0111 1110)
OP_64(quge, 127, Op2, I32, 1) // unsigned integer greater-than-or-equal (0x7F 0111 1111)
#undef OP_UN
#undef OP_32
#undef OP_64
#undef OP_SF

View file

@ -634,7 +634,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
if (_config.arm_vfp) {
fp_reg = findRegFor(arg, FpRegs);
NanoAssert(isKnownReg(fp_reg));
NanoAssert(deprecated_isKnownReg(fp_reg));
}
#ifdef NJ_ARM_EABI
@ -722,7 +722,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd)
void
Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
{
NanoAssert(isKnownReg(r));
NanoAssert(deprecated_isKnownReg(r));
if (sz & ARGSIZE_MASK_INT)
{
// arg goes in specific register
@ -765,7 +765,7 @@ Assembler::asm_stkarg(LInsp arg, int stkd)
bool isF64 = arg->isF64();
Register rr;
if (arg->isUsed() && (rr = arg->deprecated_getReg(), isKnownReg(rr))) {
if (arg->isUsed() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
// The argument resides somewhere in registers, so we simply need to
// push it onto the stack.
if (!_config.arm_vfp || !isF64) {
@ -826,14 +826,14 @@ Assembler::asm_call(LInsp ins)
* sequence we'd get would be something like:
* MOV {R0-R3},params [from below]
* BL function [from below]
* MOV {R0-R3},spilled data [from evictScratchRegs()]
* MOV {R0-R3},spilled data [from evictScratchRegsExcept()]
* MOV Dx,{R0,R1} [from here]
* which is clearly broken.
*
* This is not a problem for non-floating point calls, because the
* restoring of spilled data into R0 is done via a call to
* deprecated_prepResultReg(R0) in the other branch of this if-then-else,
* meaning that evictScratchRegs() will not modify R0. However,
* meaning that evictScratchRegsExcept() will not modify R0. However,
* deprecated_prepResultReg is not aware of the concept of using a register pair
* (R0,R1) for the result of a single operation, so it can only be
* used here with the ultimate VFP register, and not R0/R1, which
@ -846,7 +846,7 @@ Assembler::asm_call(LInsp ins)
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegs();
evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS];
@ -872,7 +872,7 @@ Assembler::asm_call(LInsp ins)
NanoAssert(ins->opcode() == LIR_fcall);
if (!isKnownReg(rr)) {
if (!deprecated_isKnownReg(rr)) {
int d = deprecated_disp(ins);
NanoAssert(d != 0);
deprecated_freeRsrcOf(ins, false);
@ -1326,8 +1326,7 @@ Assembler::asm_load64(LInsp ins)
switch (ins->opcode()) {
case LIR_ldf:
case LIR_ldfc:
if (_config.arm_vfp && isKnownReg(rr)) {
if (_config.arm_vfp && deprecated_isKnownReg(rr)) {
// VFP is enabled and the result will go into a register.
NanoAssert(IsFpReg(rr));
@ -1341,7 +1340,7 @@ Assembler::asm_load64(LInsp ins)
// Either VFP is not available or the result needs to go into memory;
// in either case, VFP instructions are not required. Note that the
// result will never be loaded into registers if VFP is not available.
NanoAssert(!isKnownReg(rr));
NanoAssert(!deprecated_isKnownReg(rr));
NanoAssert(d != 0);
// Check that the offset is 8-byte (64-bit) aligned.
@ -1353,9 +1352,8 @@ Assembler::asm_load64(LInsp ins)
return;
case LIR_ld32f:
case LIR_ldc32f:
if (_config.arm_vfp) {
if (isKnownReg(rr)) {
if (deprecated_isKnownReg(rr)) {
NanoAssert(IsFpReg(rr));
FCVTDS(rr, S14);
} else {
@ -1415,8 +1413,8 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
Register rv = findRegFor(value, FpRegs);
NanoAssert(isKnownReg(rb));
NanoAssert(isKnownReg(rv));
NanoAssert(deprecated_isKnownReg(rb));
NanoAssert(deprecated_isKnownReg(rv));
Register baseReg = rb;
intptr_t baseOffset = dr;
@ -1464,8 +1462,8 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
Register rv = findRegFor(value, FpRegs);
NanoAssert(isKnownReg(rb));
NanoAssert(isKnownReg(rv));
NanoAssert(deprecated_isKnownReg(rb));
NanoAssert(deprecated_isKnownReg(rv));
Register baseReg = rb;
intptr_t baseOffset = dr;
@ -1535,8 +1533,7 @@ Assembler::asm_quad(LInsp ins)
deprecated_freeRsrcOf(ins, false);
if (_config.arm_vfp && isKnownReg(rr))
{
if (_config.arm_vfp && deprecated_isKnownReg(rr)) {
asm_spill(rr, d, false, true);
underrunProtect(4*4);
@ -2086,7 +2083,7 @@ Assembler::asm_i2f(LInsp ins)
Register srcr = findRegFor(ins->oprnd1(), GpRegs);
// todo: support int value in memory, as per x86
NanoAssert(isKnownReg(srcr));
NanoAssert(deprecated_isKnownReg(srcr));
FSITOD(rr, S14);
FMSR(S14, srcr);
@ -2099,7 +2096,7 @@ Assembler::asm_u2f(LInsp ins)
Register sr = findRegFor(ins->oprnd1(), GpRegs);
// todo: support int value in memory, as per x86
NanoAssert(isKnownReg(sr));
NanoAssert(deprecated_isKnownReg(sr));
FUITOD(rr, S14);
FMSR(S14, sr);
@ -2135,8 +2132,6 @@ Assembler::asm_fop(LInsp ins)
LInsp rhs = ins->oprnd2();
LOpcode op = ins->opcode();
NanoAssert(op >= LIR_fadd && op <= LIR_fdiv);
// rr = ra OP rb
Register rr = deprecated_prepResultReg(ins, FpRegs);
@ -2163,7 +2158,7 @@ Assembler::asm_fcmp(LInsp ins)
LInsp rhs = ins->oprnd2();
LOpcode op = ins->opcode();
NanoAssert(op >= LIR_feq && op <= LIR_fge);
NanoAssert(isFCmpOpcode(op));
Register ra, rb;
findRegFor2(FpRegs, lhs, ra, FpRegs, rhs, rb);
@ -2183,7 +2178,7 @@ Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
{
LOpcode condop = cond->opcode();
NanoAssert(cond->isCmp());
NanoAssert(_config.arm_vfp || ((condop < LIR_feq) || (condop > LIR_fge)));
NanoAssert(_config.arm_vfp || !isFCmpOpcode(condop));
// The old "never" condition code has special meaning on newer ARM cores,
// so use "always" as a sensible default code.
@ -2360,8 +2355,8 @@ Assembler::asm_arith(LInsp ins)
: lhs->deprecated_getReg() );
// Don't re-use the registers we've already allocated.
NanoAssert(isKnownReg(rr));
NanoAssert(isKnownReg(ra));
NanoAssert(deprecated_isKnownReg(rr));
NanoAssert(deprecated_isKnownReg(ra));
allow &= ~rmask(rr);
allow &= ~rmask(ra);
@ -2384,7 +2379,7 @@ Assembler::asm_arith(LInsp ins)
Register rs = deprecated_prepResultReg(ins, allow);
int d = findMemFor(lhs) + rhs->imm32();
NanoAssert(isKnownReg(rs));
NanoAssert(deprecated_isKnownReg(rs));
asm_add_imm(rs, FP, d);
}
@ -2420,11 +2415,11 @@ Assembler::asm_arith(LInsp ins)
rb = ra;
} else {
rb = asm_binop_rhs_reg(ins);
if (!isKnownReg(rb))
if (!deprecated_isKnownReg(rb))
rb = findRegFor(rhs, allow);
allow &= ~rmask(rb);
}
NanoAssert(isKnownReg(rb));
NanoAssert(deprecated_isKnownReg(rb));
const Register SBZ = (Register)0;
switch (op)
@ -2538,7 +2533,7 @@ Assembler::asm_neg_not(LInsp ins)
Register ra = ( !lhs->isInReg()
? findSpecificRegFor(lhs, rr)
: lhs->deprecated_getReg() );
NanoAssert(isKnownReg(ra));
NanoAssert(deprecated_isKnownReg(ra));
if (op == LIR_not)
MVN(rr, ra);
@ -2558,7 +2553,6 @@ Assembler::asm_load32(LInsp ins)
switch (op) {
case LIR_ldzb:
case LIR_ldcb:
if (isU12(-d) || isU12(d)) {
LDRB(rr, ra, d);
} else {
@ -2567,7 +2561,6 @@ Assembler::asm_load32(LInsp ins)
}
return;
case LIR_ldzs:
case LIR_ldcs:
// Some ARM machines require 2-byte alignment here.
// Similar to the ldcb/ldzb case, but the max offset is smaller.
if (isU8(-d) || isU8(d)) {
@ -2578,7 +2571,6 @@ Assembler::asm_load32(LInsp ins)
}
return;
case LIR_ld:
case LIR_ldc:
// Some ARM machines require 4-byte alignment here.
if (isU12(-d) || isU12(d)) {
LDR(rr, ra, d);
@ -2588,7 +2580,6 @@ Assembler::asm_load32(LInsp ins)
}
return;
case LIR_ldsb:
case LIR_ldcsb:
if (isU8(-d) || isU8(d)) {
LDRSB(rr, ra, d);
} else {
@ -2597,7 +2588,6 @@ Assembler::asm_load32(LInsp ins)
}
return;
case LIR_ldss:
case LIR_ldcss:
if (isU8(-d) || isU8(d)) {
LDRSH(rr, ra, d);
} else {

View file

@ -121,7 +121,7 @@ typedef enum {
FirstReg = R0,
LastReg = D6,
deprecated_UnknownReg = 32,
deprecated_UnknownReg = 32, // XXX: remove eventually, see bug 538924
S14 = 24
} Register;

View file

@ -391,7 +391,7 @@ namespace nanojit
void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r)
{
NanoAssert(isKnownReg(r));
NanoAssert(deprecated_isKnownReg(r));
if (sz & ARGSIZE_MASK_INT) {
// arg goes in specific register
if (p->isconst())
@ -427,7 +427,7 @@ namespace nanojit
{
bool isF64 = arg->isF64();
Register rr;
if (arg->isUsed() && (rr = arg->deprecated_getReg(), isKnownReg(rr))) {
if (arg->isUsed() && (rr = arg->deprecated_getReg(), deprecated_isKnownReg(rr))) {
// The argument resides somewhere in registers, so we simply need to
// push it onto the stack.
if (!cpu_has_fpu || !isF64) {
@ -542,7 +542,7 @@ namespace nanojit
Register ft = registerAllocTmp(FpRegs & ~(rmask(fr))); // allocate temporary register for constant
// todo: support int value in memory, as per x86
NanoAssert(isKnownReg(v));
NanoAssert(deprecated_isKnownReg(v));
// mtc1 $v,$ft
// bgez $v,1f
@ -603,8 +603,6 @@ namespace nanojit
LInsp rhs = ins->oprnd2();
LOpcode op = ins->opcode();
NanoAssert(op >= LIR_fadd && op <= LIR_fdiv);
// rr = ra OP rb
Register rr = deprecated_prepResultReg(ins, FpRegs);
@ -644,7 +642,7 @@ namespace nanojit
deprecated_freeRsrcOf(ins, false);
if (cpu_has_fpu && isKnownReg(rr)) {
if (cpu_has_fpu && deprecated_isKnownReg(rr)) {
asm_spill(rr, d, false, true);
asm_li_d(rr, ins->imm64_1(), ins->imm64_0());
}
@ -670,8 +668,6 @@ namespace nanojit
void Assembler::asm_load64(LIns *ins)
{
NanoAssert(!ins->isop(LIR_ldq) && !ins->isop(LIR_ldqc));
NanoAssert(ins->isF64());
LIns* base = ins->oprnd1();
@ -684,7 +680,7 @@ namespace nanojit
NanoAssert(IsGpReg(rbase));
deprecated_freeRsrcOf(ins, false);
if (cpu_has_fpu && isKnownReg(rd)) {
if (cpu_has_fpu && deprecated_isKnownReg(rd)) {
NanoAssert(IsFpReg(rd));
asm_ldst64 (false, rd, dr, rbase);
}
@ -692,7 +688,7 @@ namespace nanojit
// Either FPU is not available or the result needs to go into memory;
// in either case, FPU instructions are not required. Note that the
// result will never be loaded into registers if FPU is not available.
NanoAssert(!isKnownReg(rd));
NanoAssert(!deprecated_isKnownReg(rd));
NanoAssert(ds != 0);
NanoAssert(isS16(dr) && isS16(dr+4));
@ -881,23 +877,18 @@ namespace nanojit
Register rbase = getBaseReg(base, d, GpRegs);
switch (op) {
case LIR_ldcb:
case LIR_ldzb: // 8-bit integer load, zero-extend to 32-bit
asm_ldst(OP_LBU, rres, d, rbase);
break;
case LIR_ldcs:
case LIR_ldzs: // 16-bit integer load, zero-extend to 32-bit
asm_ldst(OP_LHU, rres, d, rbase);
break;
case LIR_ldcsb:
case LIR_ldsb: // 8-bit integer load, sign-extend to 32-bit
asm_ldst(OP_LB, rres, d, rbase);
break;
case LIR_ldcss:
case LIR_ldss: // 16-bit integer load, sign-extend to 32-bit
asm_ldst(OP_LH, rres, d, rbase);
break;
case LIR_ldc:
case LIR_ld: // 32-bit integer load
asm_ldst(OP_LW, rres, d, rbase);
break;
@ -951,8 +942,8 @@ namespace nanojit
Register rb;
// Don't re-use the registers we've already allocated.
NanoAssert(isKnownReg(rr));
NanoAssert(isKnownReg(ra));
NanoAssert(deprecated_isKnownReg(rr));
NanoAssert(deprecated_isKnownReg(ra));
allow &= ~rmask(rr);
allow &= ~rmask(ra);
@ -1020,7 +1011,7 @@ namespace nanojit
// general case, put rhs in register
rb = (rhs == lhs) ? ra : findRegFor(rhs, allow);
NanoAssert(isKnownReg(rb));
NanoAssert(deprecated_isKnownReg(rb));
switch (op) {
case LIR_add:
@ -1114,8 +1105,7 @@ namespace nanojit
if (value->isconstq())
asm_store_imm64(value, dr, rbase);
else if (!cpu_has_fpu ||
value->isop(LIR_ldq) || value->isop(LIR_ldqc)) {
else if (!cpu_has_fpu || value->isop(LIR_ldq)) {
int ds = findMemFor(value);
@ -1175,7 +1165,7 @@ namespace nanojit
void Assembler::asm_cmp(LOpcode condop, LIns *a, LIns *b, Register cr)
{
RegisterMask allow = condop >= LIR_feq && condop <= LIR_fge ? FpRegs : GpRegs;
RegisterMask allow = isFCmpOpcode(condop) ? FpRegs : GpRegs;
Register ra = findRegFor(a, allow);
Register rb = (b==a) ? ra : findRegFor(b, allow & ~rmask(ra));
@ -1244,7 +1234,7 @@ namespace nanojit
LOpcode condop = cond->opcode();
NanoAssert(cond->isCond());
bool inrange;
RegisterMask allow = condop >= LIR_feq && condop <= LIR_fge ? FpRegs : GpRegs;
RegisterMask allow = isFCmpOpcode(condop) ? FpRegs : GpRegs;
LIns *a = cond->oprnd1();
LIns *b = cond->oprnd2();
Register ra = findRegFor(a, allow);
@ -1319,7 +1309,7 @@ namespace nanojit
}
NIns *patch = NULL;
if (cpu_has_fpu && (condop >= LIR_feq && condop <= LIR_fge)) {
if (cpu_has_fpu && isFCmpOpcode(condop)) {
// c.xx.d $ra,$rb
// bc1x btarg
switch (condop) {
@ -1568,7 +1558,7 @@ namespace nanojit
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegs();
evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS];

View file

@ -88,7 +88,7 @@ namespace nanojit
// Wellknown register names used by code generator
FirstReg = ZERO,
LastReg = F31,
deprecated_UnknownReg = 127
deprecated_UnknownReg = 127 // XXX: remove eventually, see bug 538924
} Register;

View file

@ -139,7 +139,6 @@ namespace nanojit
switch(ins->opcode()) {
case LIR_ldzb:
case LIR_ldcb:
if (isS16(d)) {
LBZ(rr, d, ra);
} else {
@ -148,7 +147,6 @@ namespace nanojit
}
return;
case LIR_ldzs:
case LIR_ldcs:
// these are expected to be 2 or 4-byte aligned
if (isS16(d)) {
LHZ(rr, d, ra);
@ -158,7 +156,6 @@ namespace nanojit
}
return;
case LIR_ld:
case LIR_ldc:
// these are expected to be 4-byte aligned
if (isS16(d)) {
LWZ(rr, d, ra);
@ -169,8 +166,6 @@ namespace nanojit
return;
case LIR_ldsb:
case LIR_ldss:
case LIR_ldcsb:
case LIR_ldcss:
NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
return;
default:
@ -213,13 +208,10 @@ namespace nanojit
switch (ins->opcode()) {
case LIR_ldf:
case LIR_ldfc:
CASE64(LIR_ldq:)
CASE64(LIR_ldqc:)
// handled by mainline code below for now
break;
case LIR_ld32f:
case LIR_ldc32f:
NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
return;
default:
@ -230,7 +222,7 @@ namespace nanojit
LIns* base = ins->oprnd1();
#ifdef NANOJIT_64BIT
Register rr = ins->deprecated_getReg();
if (isKnownReg(rr) && (rmask(rr) & FpRegs)) {
if (deprecated_isKnownReg(rr) && (rmask(rr) & FpRegs)) {
// FPR already assigned, fine, use it
deprecated_freeRsrcOf(ins, false);
} else {
@ -540,19 +532,19 @@ namespace nanojit
}
void Assembler::asm_cmp(LOpcode condop, LIns *a, LIns *b, ConditionRegister cr) {
RegisterMask allow = condop >= LIR_feq && condop <= LIR_fge ? FpRegs : GpRegs;
RegisterMask allow = isFCmpOpcode(condop) ? FpRegs : GpRegs;
Register ra = findRegFor(a, allow);
#if !PEDANTIC
if (b->isconst()) {
int32_t d = b->imm32();
if (isS16(d)) {
if (condop >= LIR_eq && condop <= LIR_ge) {
if (isSICmpOpcode(condop)) {
CMPWI(cr, ra, d);
return;
}
#if defined NANOJIT_64BIT
if (condop >= LIR_qeq && condop <= LIR_qge) {
if (isSQCmpOpcode(condop)) {
CMPDI(cr, ra, d);
TODO(cmpdi);
return;
@ -560,12 +552,12 @@ namespace nanojit
#endif
}
if (isU16(d)) {
if ((condop == LIR_eq || condop >= LIR_ult && condop <= LIR_uge)) {
if (isUICmpOpcode(condop)) {
CMPLWI(cr, ra, d);
return;
}
#if defined NANOJIT_64BIT
if ((condop == LIR_qeq || condop >= LIR_qult && condop <= LIR_quge)) {
if (isUQCmpOpcode(condop)) {
CMPLDI(cr, ra, d);
TODO(cmpldi);
return;
@ -577,21 +569,21 @@ namespace nanojit
// general case
Register rb = b==a ? ra : findRegFor(b, allow & ~rmask(ra));
if (condop >= LIR_eq && condop <= LIR_ge) {
if (isSICmpOpcode(condop)) {
CMPW(cr, ra, rb);
}
else if (condop >= LIR_ult && condop <= LIR_uge) {
else if (isUICmpOpcode(condop)) {
CMPLW(cr, ra, rb);
}
#if defined NANOJIT_64BIT
else if (condop >= LIR_qeq && condop <= LIR_qge) {
else if (isSQCmpOpcode(condop)) {
CMPD(cr, ra, rb);
}
else if (condop >= LIR_qult && condop <= LIR_quge) {
else if (isUQCmpOpcode(condop)) {
CMPLD(cr, ra, rb);
}
#endif
else if (condop >= LIR_feq && condop <= LIR_fge) {
else if (isFCmpOpcode(condop)) {
// set the lt/gt bit for fle/fge. We don't do this for
// int/uint because in those cases we can invert the branch condition.
// for float, we can't because of unordered comparisons
@ -688,7 +680,7 @@ namespace nanojit
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegs();
evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo();
ArgSize sizes[MAXARGS];
@ -796,7 +788,7 @@ namespace nanojit
else if (sz == ARGSIZE_F) {
if (p->isUsed()) {
Register rp = p->deprecated_getReg();
if (!isKnownReg(rp) || !IsFpReg(rp)) {
if (!deprecated_isKnownReg(rp) || !IsFpReg(rp)) {
// load it into the arg reg
int d = findMemFor(p);
LFD(r, d, FP);
@ -1039,7 +1031,7 @@ namespace nanojit
void Assembler::asm_quad(LIns *ins) {
#ifdef NANOJIT_64BIT
Register r = ins->deprecated_getReg();
if (isKnownReg(r) && (rmask(r) & FpRegs)) {
if (deprecated_isKnownReg(r) && (rmask(r) & FpRegs)) {
// FPR already assigned, fine, use it
deprecated_freeRsrcOf(ins, false);
} else {

View file

@ -162,7 +162,7 @@ namespace nanojit
Rlr = 8,
Rctr = 9,
deprecated_UnknownReg = 127,
deprecated_UnknownReg = 127, // XXX: remove eventually, see bug 538924
FirstReg = R0,
LastReg = F31
};

View file

@ -159,7 +159,7 @@ namespace nanojit
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegs();
evictScratchRegsExcept(0);
const CallInfo* call = ins->callInfo();
@ -330,11 +330,9 @@ namespace nanojit
{
switch (ins->opcode()) {
case LIR_ldf:
case LIR_ldfc:
// handled by mainline code below for now
break;
case LIR_ld32f:
case LIR_ldc32f:
NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
return;
default:
@ -398,7 +396,7 @@ namespace nanojit
return;
}
if (value->isop(LIR_ldf) || value->isop(LIR_ldfc))
if (value->isop(LIR_ldf))
{
// value is 64bit struct or int64_t, or maybe a double.
// it may be live in an FPU reg. Either way, don't
@ -458,7 +456,7 @@ namespace nanojit
NIns* at = 0;
LOpcode condop = cond->opcode();
NanoAssert(cond->isCmp());
if (condop >= LIR_feq && condop <= LIR_fge)
if (isFCmpOpcode(condop))
{
return asm_fbranch(branchOnFalse, cond, targ);
}
@ -574,7 +572,7 @@ namespace nanojit
Register r = deprecated_prepResultReg(ins, AllowableFlagRegs);
underrunProtect(8);
LOpcode condop = ins->opcode();
NanoAssert(condop >= LIR_feq && condop <= LIR_fge);
NanoAssert(isFCmpOpcode(condop));
if (condop == LIR_feq)
MOVFEI(1, 0, 0, 0, r);
else if (condop == LIR_fle)
@ -737,21 +735,16 @@ namespace nanojit
Register ra = getBaseReg(base, d, GpRegs);
switch(op) {
case LIR_ldzb:
case LIR_ldcb:
LDUB32(ra, d, rr);
break;
case LIR_ldzs:
case LIR_ldcs:
LDUH32(ra, d, rr);
break;
case LIR_ld:
case LIR_ldc:
LDSW32(ra, d, rr);
break;
case LIR_ldsb:
case LIR_ldss:
case LIR_ldcsb:
case LIR_ldcss:
NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
return;
default:
@ -932,7 +925,7 @@ namespace nanojit
{
NIns *at = 0;
LOpcode condop = cond->opcode();
NanoAssert(condop >= LIR_feq && condop <= LIR_fge);
NanoAssert(isFCmpOpcode(condop));
underrunProtect(32);
intptr_t tt = ((intptr_t)targ - (intptr_t)_nIns + 8) >> 2;
// !targ means that it needs patch.

View file

@ -166,7 +166,7 @@ namespace nanojit
FirstReg = 0,
LastReg = 29,
deprecated_UnknownReg = 30
deprecated_UnknownReg = 30 // XXX: remove eventually, see bug 538924
}
Register;

View file

@ -620,63 +620,35 @@ namespace nanojit
}
}
// register allocation for 2-address style ops of the form R = R (op) B
void Assembler::regalloc_binary(LIns *ins, RegisterMask allow, Register &rr, Register &ra, Register &rb) {
#ifdef _DEBUG
RegisterMask originalAllow = allow;
#endif
LIns *a = ins->oprnd1();
LIns *b = ins->oprnd2();
if (a != b) {
rb = findRegFor(b, allow);
allow &= ~rmask(rb);
}
rr = deprecated_prepResultReg(ins, allow);
// if this is last use of a in reg, we can re-use result reg
if (!a->isInReg()) {
ra = findSpecificRegForUnallocated(a, rr);
} else if (!(allow & rmask(a->getReg()))) {
// 'a' already has a register assigned, but it's not valid.
// To make sure floating point operations stay in FPU registers
// as much as possible, make sure that only a few opcodes are
// reserving GPRs.
NanoAssert(a->isop(LIR_quad) || a->isop(LIR_float) ||
a->isop(LIR_ldf) || a->isop(LIR_ldfc) ||
a->isop(LIR_ldq) || a->isop(LIR_ldqc) ||
a->isop(LIR_ld32f) || a->isop(LIR_ldc32f) ||
a->isop(LIR_u2f) || a->isop(LIR_fcall));
allow &= ~rmask(rr);
ra = findRegFor(a, allow);
} else {
ra = a->getReg();
}
if (a == b) {
rb = ra;
}
NanoAssert(originalAllow & rmask(rr));
NanoAssert(originalAllow & rmask(ra));
NanoAssert(originalAllow & rmask(rb));
}
void Assembler::asm_qbinop(LIns *ins) {
asm_arith(ins);
}
void Assembler::asm_shift(LIns *ins) {
// shift require rcx for shift count
// Shift requires rcx for shift count.
LIns *a = ins->oprnd1();
LIns *b = ins->oprnd2();
if (b->isconst()) {
asm_shift_imm(ins);
return;
}
Register rr, ra;
if (b != ins->oprnd1()) {
if (a != b) {
findSpecificRegFor(b, RCX);
regalloc_unary(ins, GpRegs & ~rmask(RCX), rr, ra);
beginOp1Regs(ins, GpRegs & ~rmask(RCX), rr, ra);
} else {
// a == b means both must be in RCX
regalloc_unary(ins, rmask(RCX), rr, ra);
// Nb: this is just like beginOp1Regs() except that it asserts
// that ra is in GpRegs instead of rmask(RCX)) -- this is
// necessary for the a==b case because 'a' might not be in RCX
// (which is ok, the MR(rr, ra) below will move it into RCX).
rr = prepareResultReg(ins, rmask(RCX));
// If 'a' isn't in a register, it can be clobbered by 'ins'.
ra = a->isInReg() ? a->getReg() : rr;
NanoAssert(rmask(ra) & GpRegs);
}
switch (ins->opcode()) {
default:
TODO(asm_shift);
@ -689,11 +661,14 @@ namespace nanojit
}
if (rr != ra)
MR(rr, ra);
endOpRegs(ins, rr, ra);
}
void Assembler::asm_shift_imm(LIns *ins) {
Register rr, ra;
regalloc_unary(ins, GpRegs, rr, ra);
beginOp1Regs(ins, GpRegs, rr, ra);
int shift = ins->oprnd2()->imm32() & 63;
switch (ins->opcode()) {
default: TODO(shiftimm);
@ -706,6 +681,8 @@ namespace nanojit
}
if (rr != ra)
MR(rr, ra);
endOpRegs(ins, rr, ra);
}
static bool isImm32(LIns *ins) {
@ -715,21 +692,22 @@ namespace nanojit
return ins->isconst() ? ins->imm32() : int32_t(ins->imm64());
}
// binary op, integer regs, rhs is int32 const
// Binary op, integer regs, rhs is int32 constant.
void Assembler::asm_arith_imm(LIns *ins) {
LIns *b = ins->oprnd2();
int32_t imm = getImm32(b);
LOpcode op = ins->opcode();
Register rr, ra;
if (op == LIR_mul || op == LIR_mulxov) {
// imul has true 3-addr form, it doesn't clobber ra
rr = deprecated_prepResultReg(ins, GpRegs);
LIns *a = ins->oprnd1();
ra = findRegFor(a, GpRegs);
// Special case: imul-by-imm has true 3-addr form. So we don't
// need the MR(rr, ra) after the IMULI.
beginOp1Regs(ins, GpRegs, rr, ra);
IMULI(rr, ra, imm);
endOpRegs(ins, rr, ra);
return;
}
regalloc_unary(ins, GpRegs, rr, ra);
beginOp1Regs(ins, GpRegs, rr, ra);
if (isS8(imm)) {
switch (ins->opcode()) {
default: TODO(arith_imm8);
@ -765,35 +743,63 @@ namespace nanojit
}
if (rr != ra)
MR(rr, ra);
endOpRegs(ins, rr, ra);
}
void Assembler::asm_div_mod(LIns *ins) {
LIns *div;
if (ins->opcode() == LIR_mod) {
// LIR_mod expects the LIR_div to be near
div = ins->oprnd1();
deprecated_prepResultReg(ins, rmask(RDX));
} else {
div = ins;
evictIfActive(RDX);
}
// Generates code for a LIR_div that doesn't have a subsequent LIR_mod.
void Assembler::asm_div(LIns *div) {
NanoAssert(div->isop(LIR_div));
LIns *a = div->oprnd1();
LIns *b = div->oprnd2();
LIns *lhs = div->oprnd1();
LIns *rhs = div->oprnd2();
evictIfActive(RDX);
prepareResultReg(div, rmask(RAX));
deprecated_prepResultReg(div, rmask(RAX));
Register rb = findRegFor(b, GpRegs & ~(rmask(RAX)|rmask(RDX)));
Register ra = a->isInReg() ? a->getReg() : RAX;
Register rhsReg = findRegFor(rhs, GpRegs & ~(rmask(RAX)|rmask(RDX)));
Register lhsReg = !lhs->isInReg()
? findSpecificRegForUnallocated(lhs, RAX)
: lhs->getReg();
IDIV(rhsReg);
IDIV(rb);
SARI(RDX, 31);
MR(RDX, RAX);
if (RAX != lhsReg)
MR(RAX, lhsReg);
if (RAX != ra)
MR(RAX, ra);
freeResourcesOf(div);
if (!a->isInReg()) {
NanoAssert(ra == RAX);
findSpecificRegForUnallocated(a, RAX);
}
}
// Generates code for a LIR_mod(LIR_div(divL, divR)) sequence.
void Assembler::asm_div_mod(LIns *mod) {
LIns *div = mod->oprnd1();
NanoAssert(mod->isop(LIR_mod));
NanoAssert(div->isop(LIR_div));
LIns *divL = div->oprnd1();
LIns *divR = div->oprnd2();
prepareResultReg(mod, rmask(RDX));
prepareResultReg(div, rmask(RAX));
Register rDivR = findRegFor(divR, GpRegs & ~(rmask(RAX)|rmask(RDX)));
Register rDivL = divL->isInReg() ? divL->getReg() : RAX;
IDIV(rDivR);
SARI(RDX, 31);
MR(RDX, RAX);
if (RAX != rDivL)
MR(RAX, rDivL);
freeResourcesOf(mod);
freeResourcesOf(div);
if (!divL->isInReg()) {
NanoAssert(rDivL == RAX);
findSpecificRegForUnallocated(divL, RAX);
}
}
// binary op with integer registers
@ -807,9 +813,13 @@ namespace nanojit
asm_shift(ins);
return;
case LIR_mod:
case LIR_div:
asm_div_mod(ins);
return;
case LIR_div:
// Nb: if the div feeds into a mod it will be handled by
// asm_div_mod() rather than here.
asm_div(ins);
return;
default:
break;
}
@ -819,7 +829,7 @@ namespace nanojit
asm_arith_imm(ins);
return;
}
regalloc_binary(ins, GpRegs, rr, ra, rb);
beginOp2Regs(ins, GpRegs, rr, ra, rb);
switch (ins->opcode()) {
default: TODO(asm_arith);
case LIR_or: ORLRR(rr, rb); break;
@ -838,16 +848,15 @@ namespace nanojit
case LIR_qaddp: ADDQRR(rr, rb); break;
}
if (rr != ra)
MR(rr,ra);
MR(rr, ra);
endOpRegs(ins, rr, ra);
}
// binary op with fp registers
// Binary op with fp registers.
void Assembler::asm_fop(LIns *ins) {
// NB, rb is always filled in by regalloc_binary,
// but compilers can't always tell that: init to UnspecifiedReg
// to avoid a warning.
Register rr, ra, rb = UnspecifiedReg;
regalloc_binary(ins, FpRegs, rr, ra, rb);
Register rr, ra, rb = UnspecifiedReg; // init to shut GCC up
beginOp2Regs(ins, FpRegs, rr, ra, rb);
switch (ins->opcode()) {
default: TODO(asm_fop);
case LIR_fdiv: DIVSD(rr, rb); break;
@ -858,28 +867,29 @@ namespace nanojit
if (rr != ra) {
asm_nongp_copy(rr, ra);
}
endOpRegs(ins, rr, ra);
}
void Assembler::asm_neg_not(LIns *ins) {
Register rr, ra;
regalloc_unary(ins, GpRegs, rr, ra);
NanoAssert(IsGpReg(ra));
beginOp1Regs(ins, GpRegs, rr, ra);
if (ins->isop(LIR_not))
NOT(rr);
else
NEG(rr);
if (rr != ra)
MR(rr, ra);
endOpRegs(ins, rr, ra);
}
void Assembler::asm_call(LIns *ins) {
Register retReg = ( ins->isop(LIR_fcall) ? XMM0 : retRegs[0] );
deprecated_prepResultReg(ins, rmask(retReg));
Register rr = ( ins->isop(LIR_fcall) ? XMM0 : retRegs[0] );
prepareResultReg(ins, rmask(rr));
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegs();
evictScratchRegsExcept(rmask(rr));
const CallInfo *call = ins->callInfo();
ArgSize sizes[MAXARGS];
@ -906,6 +916,9 @@ namespace nanojit
CALLRAX();
}
// Call this now so that the arg setup can involve 'rr'.
freeResourcesOf(ins);
#ifdef _WIN64
int stk_used = 32; // always reserve 32byte shadow area
#else
@ -994,14 +1007,15 @@ namespace nanojit
void Assembler::asm_q2i(LIns *ins) {
Register rr, ra;
regalloc_unary(ins, GpRegs, rr, ra);
beginOp1Regs(ins, GpRegs, rr, ra);
NanoAssert(IsGpReg(ra));
MOVLR(rr, ra); // 32bit mov zeros the upper 32bits of the target
endOpRegs(ins, rr, ra);
}
void Assembler::asm_promote(LIns *ins) {
Register rr, ra;
regalloc_unary(ins, GpRegs, rr, ra);
beginOp1Regs(ins, GpRegs, rr, ra);
NanoAssert(IsGpReg(ra));
if (ins->isop(LIR_u2q)) {
MOVLR(rr, ra); // 32bit mov zeros the upper 32bits of the target
@ -1009,38 +1023,44 @@ namespace nanojit
NanoAssert(ins->isop(LIR_i2q));
MOVSXDR(rr, ra); // sign extend 32->64
}
endOpRegs(ins, rr, ra);
}
// the CVTSI2SD instruction only writes to the low 64bits of the target
// The CVTSI2SD instruction only writes to the low 64bits of the target
// XMM register, which hinders register renaming and makes dependence
// chains longer. So we precede with XORPS to clear the target register.
void Assembler::asm_i2f(LIns *ins) {
Register r = deprecated_prepResultReg(ins, FpRegs);
Register b = findRegFor(ins->oprnd1(), GpRegs);
CVTSI2SD(r, b); // cvtsi2sd xmmr, b only writes xmm:0:64
XORPS(r); // xorps xmmr,xmmr to break dependency chains
LIns *a = ins->oprnd1();
NanoAssert(ins->isF64() && a->isI32());
Register rr = prepareResultReg(ins, FpRegs);
Register ra = findRegFor(a, GpRegs);
CVTSI2SD(rr, ra); // cvtsi2sd xmmr, b only writes xmm:0:64
XORPS(rr); // xorps xmmr,xmmr to break dependency chains
freeResourcesOf(ins);
}
void Assembler::asm_u2f(LIns *ins) {
Register r = deprecated_prepResultReg(ins, FpRegs);
Register b = findRegFor(ins->oprnd1(), GpRegs);
NanoAssert(ins->oprnd1()->isI32());
// since oprnd1 value is 32bit, its okay to zero-extend the value without worrying about clobbering.
CVTSQ2SD(r, b); // convert int64 to double
XORPS(r); // xorps xmmr,xmmr to break dependency chains
MOVLR(b, b); // zero extend u32 to int64
LIns *a = ins->oprnd1();
NanoAssert(ins->isF64() && a->isI32());
Register rr = prepareResultReg(ins, FpRegs);
Register ra = findRegFor(a, GpRegs);
// Because oprnd1 is 32bit, it's ok to zero-extend it without worrying about clobbering.
CVTSQ2SD(rr, ra); // convert int64 to double
XORPS(rr); // xorps xmmr,xmmr to break dependency chains
MOVLR(ra, ra); // zero extend u32 to int64
freeResourcesOf(ins);
}
void Assembler::asm_f2i(LIns *ins) {
LIns *lhs = ins->oprnd1();
NanoAssert(ins->isI32() && lhs->isF64());
Register r = prepareResultReg(ins, GpRegs);
Register b = findRegFor(lhs, FpRegs);
CVTSD2SI(r, b);
LIns *a = ins->oprnd1();
NanoAssert(ins->isI32() && a->isF64());
Register rr = prepareResultReg(ins, GpRegs);
Register rb = findRegFor(a, FpRegs);
CVTSD2SI(rr, rb);
freeResourcesOf(ins);
}
@ -1052,11 +1072,16 @@ namespace nanojit
NanoAssert((ins->isop(LIR_cmov) && iftrue->isI32() && iffalse->isI32()) ||
(ins->isop(LIR_qcmov) && iftrue->isI64() && iffalse->isI64()));
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
// (This is true on Intel, is it true on all architectures?)
const Register rr = deprecated_prepResultReg(ins, GpRegs);
const Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
Register rr = prepareResultReg(ins, GpRegs);
Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
// If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
// WARNING: We cannot generate any code that affects the condition
// codes between the MRcc generation here and the asm_cmp() call
// below. See asm_cmp() for more details.
LOpcode condop = cond->opcode();
if (ins->opcode() == LIR_cmov) {
switch (condop) {
@ -1085,7 +1110,15 @@ namespace nanojit
default: NanoAssert(0); break;
}
}
/*const Register rt =*/ findSpecificRegFor(iftrue, rr);
if (rr != rt)
MR(rr, rt);
freeResourcesOf(ins);
if (!iftrue->isInReg()) {
NanoAssert(rt == rr);
findSpecificRegForUnallocated(iftrue, rr);
}
asm_cmp(cond);
}
@ -1096,10 +1129,10 @@ namespace nanojit
}
NanoAssert(cond->isCmp());
LOpcode condop = cond->opcode();
if (condop >= LIR_feq && condop <= LIR_fge)
if (isFCmpOpcode(condop))
return asm_fbranch(onFalse, cond, target);
// We must ensure there's room for the instr before calculating
// We must ensure there's room for the instruction before calculating
// the offset. And the offset determines the opcode (8bit or 32bit).
if (target && isTargetWithinS8(target)) {
if (onFalse) {
@ -1158,7 +1191,7 @@ namespace nanojit
}
}
}
NIns *patch = _nIns; // addr of instr to patch
NIns *patch = _nIns; // address of instruction to patch
asm_cmp(cond);
return patch;
}
@ -1176,6 +1209,9 @@ namespace nanojit
JO( 8, target);
}
// WARNING: this function cannot generate code that will affect the
// condition codes prior to the generation of the test/cmp. See
// Nativei386.cpp:asm_cmp() for details.
void Assembler::asm_cmp(LIns *cond) {
LIns *b = cond->oprnd2();
if (isImm32(b)) {
@ -1192,10 +1228,10 @@ namespace nanojit
}
LOpcode condop = cond->opcode();
if (LIR_qeq <= condop && condop <= LIR_quge) {
if (isQCmpOpcode(condop)) {
CMPQR(ra, rb);
} else {
NanoAssert(LIR_eq <= condop && condop <= LIR_uge);
NanoAssert(isICmpOpcode(condop));
CMPLR(ra, rb);
}
}
@ -1206,13 +1242,13 @@ namespace nanojit
LIns *b = cond->oprnd2();
Register ra = findRegFor(a, GpRegs);
int32_t imm = getImm32(b);
if (LIR_qeq <= condop && condop <= LIR_quge) {
if (isQCmpOpcode(condop)) {
if (isS8(imm))
CMPQR8(ra, imm);
else
CMPQRI(ra, imm);
} else {
NanoAssert(LIR_eq <= condop && condop <= LIR_uge);
NanoAssert(isICmpOpcode(condop));
if (isS8(imm))
CMPLR8(ra, imm);
else
@ -1220,8 +1256,8 @@ namespace nanojit
}
}
// compiling floating point branches
// discussion in https://bugzilla.mozilla.org/show_bug.cgi?id=443886
// Compiling floating point branches.
// Discussion in https://bugzilla.mozilla.org/show_bug.cgi?id=443886.
//
// fucom/p/pp: c3 c2 c0 jae ja jbe jb je jne
// ucomisd: Z P C !C !C&!Z C|Z C Z !Z
@ -1231,7 +1267,7 @@ namespace nanojit
// less < 0 0 1 T T T
// equal = 1 0 0 T T T
//
// here's the cases, using conditionals:
// Here are the cases, using conditionals:
//
// branch >= > <= < =
// ------ --- --- --- --- ---
@ -1281,7 +1317,7 @@ namespace nanojit
}
patch = _nIns;
}
fcmp(a, b);
asm_fcmp(a, b);
return patch;
}
@ -1292,7 +1328,7 @@ namespace nanojit
if (op == LIR_feq) {
// result = ZF & !PF, must do logic on flags
// r = al|bl|cl|dl, can only use rh without rex prefix
Register r = deprecated_prepResultReg(ins, 1<<RAX|1<<RCX|1<<RDX|1<<RBX);
Register r = prepareResultReg(ins, 1<<RAX|1<<RCX|1<<RDX|1<<RBX);
MOVZX8(r, r); // movzx8 r,rl r[8:63] = 0
X86_AND8R(r); // and rl,rh rl &= rh
X86_SETNP(r); // setnp rh rh = !PF
@ -1305,22 +1341,30 @@ namespace nanojit
op = LIR_fge;
LIns *t = a; a = b; b = t;
}
Register r = deprecated_prepResultReg(ins, GpRegs); // x64 can use any GPR as setcc target
Register r = prepareResultReg(ins, GpRegs); // x64 can use any GPR as setcc target
MOVZX8(r, r);
if (op == LIR_fgt)
SETA(r);
else
SETAE(r);
}
fcmp(a, b);
freeResourcesOf(ins);
asm_fcmp(a, b);
}
void Assembler::fcmp(LIns *a, LIns *b) {
// WARNING: This function cannot generate any code that will affect the
// condition codes prior to the generation of the ucomisd. See asm_cmp()
// for more details.
void Assembler::asm_fcmp(LIns *a, LIns *b) {
Register ra, rb;
findRegFor2(FpRegs, a, ra, FpRegs, b, rb);
UCOMISD(ra, rb);
}
// WARNING: the code generated by this function must not affect the
// condition codes. See asm_cmp() for details.
void Assembler::asm_restore(LIns *ins, Register r) {
if (ins->isop(LIR_alloc)) {
int d = arDisp(ins);
@ -1336,11 +1380,10 @@ namespace nanojit
}
else {
int d = findMemFor(ins);
if (IsFpReg(r)) {
NanoAssert(ins->isN64());
// load 64bits into XMM. don't know if double or int64, assume double.
if (ins->isF64()) {
NanoAssert(IsFpReg(r));
MOVSDRM(r, d, FP);
} else if (ins->isN64()) {
} else if (ins->isI64()) {
NanoAssert(IsGpReg(r));
MOVQRM(r, d, FP);
} else {
@ -1353,8 +1396,10 @@ namespace nanojit
void Assembler::asm_cond(LIns *ins) {
LOpcode op = ins->opcode();
// unlike x86-32, with a rex prefix we can use any GP register as an 8bit target
Register r = deprecated_prepResultReg(ins, GpRegs);
Register r = prepareResultReg(ins, GpRegs);
// SETcc only sets low 8 bits, so extend
MOVZX8(r, r);
switch (op) {
@ -1379,6 +1424,8 @@ namespace nanojit
case LIR_quge:
case LIR_uge: SETAE(r); break;
}
freeResourcesOf(ins);
asm_cmp(ins);
}
@ -1409,18 +1456,17 @@ namespace nanojit
}
}
void Assembler::regalloc_load(LIns *ins, RegisterMask allow, Register &rr, int32_t &dr, Register &rb) {
// Register setup for load ops. Pairs with endLoadRegs().
void Assembler::beginLoadRegs(LIns *ins, RegisterMask allow, Register &rr, int32_t &dr, Register &rb) {
dr = ins->disp();
LIns *base = ins->oprnd1();
rb = getBaseReg(base, dr, BaseRegs);
if (!ins->isInRegMask(allow)) {
rr = deprecated_prepResultReg(ins, allow & ~rmask(rb));
} else {
// keep already assigned register
rr = ins->getReg();
NanoAssert(allow & rmask(rr));
deprecated_freeRsrcOf(ins, false);
}
rr = prepareResultReg(ins, allow & ~rmask(rb));
}
// Register clean-up for load ops. Pairs with beginLoadRegs().
void Assembler::endLoadRegs(LIns* ins) {
freeResourcesOf(ins);
}
void Assembler::asm_load64(LIns *ins) {
@ -1428,20 +1474,17 @@ namespace nanojit
int32_t dr;
switch (ins->opcode()) {
case LIR_ldq:
case LIR_ldqc:
regalloc_load(ins, GpRegs, rr, dr, rb);
beginLoadRegs(ins, GpRegs, rr, dr, rb);
NanoAssert(IsGpReg(rr));
MOVQRM(rr, dr, rb); // general 64bit load, 32bit const displacement
break;
case LIR_ldf:
case LIR_ldfc:
regalloc_load(ins, FpRegs, rr, dr, rb);
beginLoadRegs(ins, FpRegs, rr, dr, rb);
NanoAssert(IsFpReg(rr));
MOVSDRM(rr, dr, rb); // load 64bits into XMM
break;
case LIR_ld32f:
case LIR_ldc32f:
regalloc_load(ins, FpRegs, rr, dr, rb);
beginLoadRegs(ins, FpRegs, rr, dr, rb);
NanoAssert(IsFpReg(rr));
CVTSS2SD(rr, rr);
MOVSSRM(rr, dr, rb);
@ -1450,40 +1493,36 @@ namespace nanojit
NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
break;
}
endLoadRegs(ins);
}
void Assembler::asm_load32(LIns *ins) {
NanoAssert(ins->isI32());
Register r, b;
int32_t d;
regalloc_load(ins, GpRegs, r, d, b);
beginLoadRegs(ins, GpRegs, r, d, b);
LOpcode op = ins->opcode();
switch(op) {
switch (op) {
case LIR_ldzb:
case LIR_ldcb:
MOVZX8M( r, d, b);
break;
case LIR_ldzs:
case LIR_ldcs:
MOVZX16M(r, d, b);
break;
case LIR_ld:
case LIR_ldc:
MOVLRM( r, d, b);
break;
case LIR_ldsb:
case LIR_ldcsb:
MOVSX8M( r, d, b);
break;
case LIR_ldss:
case LIR_ldcss:
MOVSX16M( r, d, b);
break;
default:
NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
break;
}
endLoadRegs(ins);
}
void Assembler::asm_store64(LOpcode op, LIns *value, int d, LIns *base) {
@ -1542,8 +1581,6 @@ namespace nanojit
NanoAssertMsg(0, "asm_store32 should never receive this LIR opcode");
break;
}
}
void Assembler::asm_int(LIns *ins) {
@ -1597,11 +1634,11 @@ namespace nanojit
uint32_t a = ins->paramArg();
uint32_t kind = ins->paramKind();
if (kind == 0) {
// ordinary param
// first four or six args always in registers for x86_64 ABI
// Ordinary param. First four or six args always in registers for x86_64 ABI.
if (a < (uint32_t)NumArgRegs) {
// incoming arg in register
deprecated_prepResultReg(ins, rmask(argRegs[a]));
prepareResultReg(ins, rmask(argRegs[a]));
// No code to generate.
} else {
// todo: support stack based args, arg 0 is at [FP+off] where off
// is the # of regs to be pushed in genProlog()
@ -1609,24 +1646,60 @@ namespace nanojit
}
}
else {
// saved param
deprecated_prepResultReg(ins, rmask(savedRegs[a]));
// Saved param.
prepareResultReg(ins, rmask(savedRegs[a]));
// No code to generate.
}
freeResourcesOf(ins);
}
// Register setup for 2-address style unary ops of the form R = (op) R.
// Pairs with endOpRegs().
void Assembler::beginOp1Regs(LIns* ins, RegisterMask allow, Register &rr, Register &ra) {
LIns* a = ins->oprnd1();
rr = prepareResultReg(ins, allow);
// If 'a' isn't in a register, it can be clobbered by 'ins'.
ra = a->isInReg() ? a->getReg() : rr;
NanoAssert(rmask(ra) & allow);
}
// Register setup for 2-address style binary ops of the form R = R (op) B.
// Pairs with endOpRegs().
void Assembler::beginOp2Regs(LIns *ins, RegisterMask allow, Register &rr, Register &ra,
Register &rb) {
LIns *a = ins->oprnd1();
LIns *b = ins->oprnd2();
if (a != b) {
rb = findRegFor(b, allow);
allow &= ~rmask(rb);
}
rr = prepareResultReg(ins, allow);
// If 'a' isn't in a register, it can be clobbered by 'ins'.
ra = a->isInReg() ? a->getReg() : rr;
NanoAssert(rmask(ra) & allow);
if (a == b) {
rb = ra;
}
}
// register allocation for 2-address style unary ops of the form R = (op) R
void Assembler::regalloc_unary(LIns *ins, RegisterMask allow, Register &rr, Register &ra) {
LIns *a = ins->oprnd1();
rr = deprecated_prepResultReg(ins, allow);
// if this is last use of a in reg, we can re-use result reg
// Register clean-up for 2-address style unary ops of the form R = (op) R.
// Pairs with beginOp1Regs() and beginOp2Regs().
void Assembler::endOpRegs(LIns* ins, Register rr, Register ra) {
LIns* a = ins->oprnd1();
// We're finished with 'ins'.
NanoAssert(ins->getReg() == rr);
freeResourcesOf(ins);
// If 'a' isn't in a register yet, that means it's clobbered by 'ins'.
if (!a->isInReg()) {
ra = findSpecificRegForUnallocated(a, rr);
} else {
// 'a' already has a register assigned. Caller must emit a copy
// to rr once instr code is generated. (ie mov rr,ra ; op rr)
ra = a->getReg();
}
NanoAssert(allow & rmask(rr));
NanoAssert(ra == rr);
findSpecificRegForUnallocated(a, ra);
}
}
static const AVMPLUS_ALIGN16(int64_t) negateMask[] = {0x8000000000000000LL,0};
@ -1634,7 +1707,7 @@ namespace nanojit
void Assembler::asm_fneg(LIns *ins) {
Register rr, ra;
if (isS32((uintptr_t)negateMask) || isTargetWithinS32((NIns*)negateMask)) {
regalloc_unary(ins, FpRegs, rr, ra);
beginOp1Regs(ins, FpRegs, rr, ra);
if (isS32((uintptr_t)negateMask)) {
// builtin code is in bottom or top 2GB addr space, use absolute addressing
XORPSA(rr, (int32_t)(uintptr_t)negateMask);
@ -1644,14 +1717,17 @@ namespace nanojit
}
if (ra != rr)
asm_nongp_copy(rr,ra);
endOpRegs(ins, rr, ra);
} else {
// this is just hideous - can't use RIP-relative load, can't use
// This is just hideous - can't use RIP-relative load, can't use
// absolute-address load, and cant move imm64 const to XMM.
// so do it all in a GPR. hrmph.
rr = deprecated_prepResultReg(ins, GpRegs);
rr = prepareResultReg(ins, GpRegs);
ra = findRegFor(ins->oprnd1(), GpRegs & ~rmask(rr));
XORQRR(rr, ra); // xor rr, ra
asm_quad(rr, negateMask[0], /*canClobberCCs*/true); // mov rr, 0x8000000000000000
freeResourcesOf(ins);
}
}

View file

@ -399,15 +399,18 @@ namespace nanojit
void asm_shift(LIns*);\
void asm_shift_imm(LIns*);\
void asm_arith_imm(LIns*);\
void regalloc_unary(LIns *ins, RegisterMask allow, Register &rr, Register &ra);\
void regalloc_binary(LIns *ins, RegisterMask allow, Register &rr, Register &ra, Register &rb);\
void regalloc_load(LIns *ins, RegisterMask allow, Register &rr, int32_t &d, Register &rb);\
void beginOp1Regs(LIns *ins, RegisterMask allow, Register &rr, Register &ra);\
void beginOp2Regs(LIns *ins, RegisterMask allow, Register &rr, Register &ra, Register &rb);\
void endOpRegs(LIns *ins, Register rr, Register ra);\
void beginLoadRegs(LIns *ins, RegisterMask allow, Register &rr, int32_t &d, Register &rb);\
void endLoadRegs(LIns *ins);\
void dis(NIns *p, int bytes);\
void asm_cmp(LIns*);\
void asm_cmp_imm(LIns*);\
void fcmp(LIns*, LIns*);\
void asm_fcmp(LIns*, LIns*);\
NIns* asm_fbranch(bool, LIns*, NIns*);\
void asm_div_mod(LIns *i);\
void asm_div(LIns *ins);\
void asm_div_mod(LIns *ins);\
int max_stk_used;\
void PUSHR(Register r);\
void POPR(Register r);\

View file

@ -161,13 +161,10 @@ namespace nanojit
void Assembler::asm_call(LInsp ins)
{
Register retReg = ( ins->isop(LIR_fcall) ? FST0 : retRegs[0] );
deprecated_prepResultReg(ins, rmask(retReg));
Register rr = ( ins->isop(LIR_fcall) ? FST0 : retRegs[0] );
prepareResultReg(ins, rmask(rr));
// Do this after we've handled the call result, so we don't
// force the call result to be spilled unnecessarily.
evictScratchRegs();
evictScratchRegsExcept(rmask(rr));
const CallInfo* call = ins->callInfo();
// must be signed, not unsigned
@ -224,15 +221,19 @@ namespace nanojit
CALL(call);
}
else {
// indirect call. x86 Calling conventions don't use EAX as an
// Indirect call. x86 Calling conventions don't use EAX as an
// argument, and do use EAX as a return value. We need a register
// for the address to call, so we use EAX since it will always be
// available
// available.
CALLr(call, EAX);
}
// Make sure fpu stack is empty before call (restoreCallerSaved).
// Call this now so that the arg setup can involve 'rr'.
freeResourcesOf(ins);
// Make sure fpu stack is empty before call.
NanoAssert(_allocator.isFree(FST0));
// Pre-assign registers to the first N 4B args based on the calling convention.
uint32_t n = 0;
@ -247,7 +248,7 @@ namespace nanojit
stkd = 0;
}
for(uint32_t i=0; i < argc; i++)
for (uint32_t i = 0; i < argc; i++)
{
uint32_t j = argc-i-1;
ArgSize sz = sizes[j];
@ -393,13 +394,17 @@ namespace nanojit
} else {
int d = findMemFor(ins);
if (rmask(r) & GpRegs) {
if (ins->isI32()) {
NanoAssert(rmask(r) & GpRegs);
LD(r, d, FP);
} else if (rmask(r) & XmmRegs) {
SSE_LDQ(r, d, FP);
} else {
NanoAssert(rmask(r) & x87Regs);
FLDQ(d, FP);
NanoAssert(ins->isF64());
if (rmask(r) & XmmRegs) {
SSE_LDQ(r, d, FP);
} else {
NanoAssert(rmask(r) & x87Regs);
FLDQ(d, FP);
}
}
}
}
@ -483,87 +488,69 @@ namespace nanojit
LIns* base = ins->oprnd1();
int db = ins->disp();
Register rr = UnspecifiedReg; // init to shut GCC up
bool inReg = ins->isInReg();
if (inReg)
rr = ins->getReg();
Register rb = getBaseReg(base, db, GpRegs);
if (inReg && (rmask(rr) & XmmRegs))
{
deprecated_freeRsrcOf(ins, false);
Register rb = getBaseReg(base, db, GpRegs);
// There are two cases:
// - 'ins' is in FpRegs: load it.
// - otherwise: there's no point loading the value into a register
// because its only use will be to immediately spill it. Instead we
// do a memory-to-memory move from the load address directly to the
// spill slot. (There must be a spill slot assigned.) This is why
// we don't use prepareResultReg() here unlike most other places --
// because it mandates bringing the value into a register.
//
if (ins->isInReg()) {
Register rr = ins->getReg();
asm_spilli(ins, false); // if also in memory in post-state, spill it now
switch (ins->opcode()) {
case LIR_ldf:
case LIR_ldfc:
case LIR_ldf:
if (rmask(rr) & XmmRegs) {
SSE_LDQ(rr, db, rb);
break;
case LIR_ld32f:
case LIR_ldc32f:
} else {
NanoAssert(rmask(rr) & x87Regs);
FLDQ(db, rb);
}
break;
case LIR_ld32f:
if (rmask(rr) & XmmRegs) {
SSE_CVTSS2SD(rr, rr);
SSE_LDSS(rr, db, rb);
SSE_XORPDr(rr,rr);
break;
default:
NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
break;
} else {
NanoAssert(rmask(rr) & x87Regs);
FLD32(db, rb);
}
break;
default:
NanoAssert(0);
break;
}
}
else
{
bool inAr = ins->isInAr();
int dr = 0;
if (inAr)
dr = arDisp(ins);
Register rb;
if (base->isop(LIR_alloc)) {
rb = FP;
db += findMemFor(base);
} else {
rb = findRegFor(base, GpRegs);
}
ins->clearReg();
} else {
NanoAssert(ins->isInAr());
int dr = arDisp(ins);
switch (ins->opcode()) {
case LIR_ldf:
case LIR_ldfc:
// don't use an fpu reg to simply load & store the value.
if (inAr)
asm_mmq(FP, dr, rb, db);
deprecated_freeRsrcOf(ins, false);
if (inReg)
{
NanoAssert(rmask(rr)&x87Regs);
_allocator.retire(rr);
FLDQ(db, rb);
}
break;
case LIR_ld32f:
case LIR_ldc32f:
deprecated_freeRsrcOf(ins, false);
if (inReg)
{
NanoAssert(rmask(rr)&x87Regs);
_allocator.retire(rr);
// Be sure to shadow the value onto our local area if there's space for it,
// but don't pop the FP stack, we expect the register to stay valid.
if (inAr)
FSTQ(0, dr, FP);
FLD32(db, rb);
}
else
{
// We need to use fpu to expand 32->64, can't use asm_mmq...
// just load-and-store-with-pop.
NanoAssert(inAr);
FSTPQ(dr, FP);
FLD32(db, rb);
}
break;
default:
NanoAssertMsg(0, "asm_load64 should never receive this LIR opcode");
break;
case LIR_ldf:
// Don't use an fpu reg to simply load & store the value.
asm_mmq(FP, dr, rb, db);
break;
case LIR_ld32f:
// Need to use fpu to expand 32->64.
FSTPQ(dr, FP);
FLD32(db, rb);
break;
default:
NanoAssert(0);
break;
}
}
freeResourcesOf(ins);
}
void Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base)
@ -593,7 +580,7 @@ namespace nanojit
STi(rb, dr+4, value->imm64_1());
STi(rb, dr, value->imm64_0());
} else if (value->isop(LIR_ldf) || value->isop(LIR_ldfc)) {
} else if (value->isop(LIR_ldf)) {
// value is 64bit struct or int64_t, or maybe a double.
// It may be live in an FPU reg. Either way, don't put it in an
// FPU reg just to load & store it.
@ -653,7 +640,7 @@ namespace nanojit
NanoAssert(cond->isCmp());
// Handle float conditions separately.
if (condop >= LIR_feq && condop <= LIR_fge) {
if (isFCmpOpcode(condop)) {
return asm_fbranch(branchOnFalse, cond, targ);
}
@ -917,7 +904,7 @@ namespace nanojit
Register rr = prepareResultReg(ins, allow);
// If 'lhs' isn't in a register, it can be clobbered by 'ins'.
Register ra = !lhs->isInReg() ? rr : lhs->getReg();
Register ra = lhs->isInReg() ? lhs->getReg() : rr;
if (!isConstRhs) {
if (lhs == rhs)
@ -976,7 +963,7 @@ namespace nanojit
}
}
// This is called when we have a mod(div(divL, divR)) sequence.
// Generates code for a LIR_mod(LIR_div(divL, divR)) sequence.
void Assembler::asm_div_mod(LInsp mod)
{
LInsp div = mod->oprnd1();
@ -992,12 +979,10 @@ namespace nanojit
prepareResultReg(div, rmask(EAX));
Register rDivR = findRegFor(divR, (GpRegs & ~(rmask(EAX)|rmask(EDX))));
Register rDivL = !divL->isInReg() ? EAX : divL->getReg();
Register rDivL = divL->isInReg() ? divL->getReg() : EAX;
DIV(rDivR);
CDQ(); // sign-extend EAX into EDX:EAX
if (EAX != rDivL)
MR(EAX, rDivL);
@ -1028,19 +1013,19 @@ namespace nanojit
//
void Assembler::asm_neg_not(LInsp ins)
{
LOpcode op = ins->opcode();
LIns* lhs = ins->oprnd1();
Register rr = prepareResultReg(ins, GpRegs);
// If 'lhs' isn't in a register, it can be clobbered by 'ins'.
Register ra = !lhs->isInReg() ? rr : lhs->getReg();
Register ra = lhs->isInReg() ? lhs->getReg() : rr;
if (op == LIR_not)
if (ins->isop(LIR_not)) {
NOT(rr);
else
} else {
NanoAssert(ins->isop(LIR_neg));
NEG(rr);
}
if (rr != ra)
MR(rr, ra);
@ -1056,119 +1041,140 @@ namespace nanojit
LOpcode op = ins->opcode();
LIns* base = ins->oprnd1();
int32_t d = ins->disp();
Register rr = deprecated_prepResultReg(ins, GpRegs);
Register rr = prepareResultReg(ins, GpRegs);
if (base->isconst()) {
intptr_t addr = base->imm32();
addr += d;
switch(op) {
switch (op) {
case LIR_ldzb:
case LIR_ldcb:
LD8Zdm(rr, addr);
return;
break;
case LIR_ldsb:
case LIR_ldcsb:
LD8Sdm(rr, addr);
return;
break;
case LIR_ldzs:
case LIR_ldcs:
LD16Zdm(rr, addr);
return;
break;
case LIR_ldss:
case LIR_ldcss:
LD16Sdm(rr, addr);
return;
break;
case LIR_ld:
case LIR_ldc:
LDdm(rr, addr);
return;
break;
default:
NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
return;
break;
}
}
/* Search for add(X,Y) */
if (base->opcode() == LIR_piadd) {
int scale = 0;
freeResourcesOf(ins);
} else if (base->opcode() == LIR_piadd) {
// Search for add(X,Y).
LIns *lhs = base->oprnd1();
LIns *rhs = base->oprnd2();
/* See if we can bypass any SHLs, by searching for
* add(X, shl(Y,Z)) -> mov r, [X+Y*Z]
*/
// If we have this:
//
// W = ld (add(X, shl(Y, Z)))[d] , where int(1) <= Z <= int(3)
//
// we assign lhs=X, rhs=Y, scale=Z, and generate this:
//
// mov rW, [rX+rY*(2^rZ)]
//
// Otherwise, we must have this:
//
// W = ld (add(X, Y))[d]
//
// which we treat like this:
//
// W = ld (add(X, shl(Y, 0)))[d]
//
int scale;
if (rhs->opcode() == LIR_pilsh && rhs->oprnd2()->isconst()) {
scale = rhs->oprnd2()->imm32();
if (scale >= 1 && scale <= 3)
rhs = rhs->oprnd1();
else
scale = 0;
} else {
scale = 0;
}
/* Does LHS have a register yet? If not, re-use the result reg.
* @todo -- If LHS is const, we could eliminate a register use.
*/
Register rleft = ( !lhs->isInReg()
? findSpecificRegForUnallocated(lhs, rr)
: lhs->getReg() );
// If 'lhs' isn't in a register, it can be clobbered by 'ins'.
// Likewise for 'rhs', but we try it with 'lhs' first.
Register ra, rb;
// @todo -- If LHS and/or RHS is const, we could eliminate a register use.
if (!lhs->isInReg()) {
ra = rr;
rb = findRegFor(rhs, GpRegs & ~(rmask(ra)));
/* Does RHS have a register yet? If not, try to re-use the result reg. */
Register rright = ( rr != rleft && !rhs->isInReg()
? findSpecificRegForUnallocated(rhs, rr)
: findRegFor(rhs, GpRegs & ~(rmask(rleft))) );
} else {
ra = lhs->getReg();
NanoAssert(ra != rr);
rb = rhs->isInReg() ? findRegFor(rhs, GpRegs & ~(rmask(ra))) : rr;
}
switch(op) {
switch (op) {
case LIR_ldzb:
case LIR_ldcb:
LD8Zsib(rr, d, rleft, rright, scale);
return;
LD8Zsib(rr, d, ra, rb, scale);
break;
case LIR_ldsb:
case LIR_ldcsb:
LD8Ssib(rr, d, rleft, rright, scale);
return;
LD8Ssib(rr, d, ra, rb, scale);
break;
case LIR_ldzs:
case LIR_ldcs:
LD16Zsib(rr, d, rleft, rright, scale);
return;
LD16Zsib(rr, d, ra, rb, scale);
break;
case LIR_ldss:
case LIR_ldcss:
LD16Ssib(rr, d, rleft, rright, scale);
return;
LD16Ssib(rr, d, ra, rb, scale);
break;
case LIR_ld:
case LIR_ldc:
LDsib(rr, d, rleft, rright, scale);
return;
LDsib(rr, d, ra, rb, scale);
break;
default:
NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
return;
break;
}
}
Register ra = getBaseReg(base, d, GpRegs);
switch(op) {
case LIR_ldzb:
case LIR_ldcb:
LD8Z(rr, d, ra);
return;
case LIR_ldsb:
case LIR_ldcsb:
LD8S(rr, d, ra);
return;
case LIR_ldzs:
case LIR_ldcs:
LD16Z(rr, d, ra);
return;
case LIR_ldss:
case LIR_ldcss:
LD16S(rr, d, ra);
return;
case LIR_ld:
case LIR_ldc:
LD(rr, d, ra);
return;
default:
NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
return;
freeResourcesOf(ins);
if (!lhs->isInReg()) {
NanoAssert(ra == rr);
findSpecificRegForUnallocated(lhs, ra);
} else if (!rhs->isInReg()) {
NanoAssert(rb == rr);
findSpecificRegForUnallocated(rhs, rb);
}
} else {
Register ra = getBaseReg(base, d, GpRegs);
switch (op) {
case LIR_ldzb:
LD8Z(rr, d, ra);
break;
case LIR_ldsb:
LD8S(rr, d, ra);
break;
case LIR_ldzs:
LD16Z(rr, d, ra);
break;
case LIR_ldss:
LD16S(rr, d, ra);
break;
case LIR_ld:
LD(rr, d, ra);
break;
default:
NanoAssertMsg(0, "asm_load32 should never receive this LIR opcode");
break;
}
freeResourcesOf(ins);
if (!base->isop(LIR_alloc) && !base->isInReg()) {
NanoAssert(ra == rr);
findSpecificRegForUnallocated(base, ra);
}
}
}
@ -1181,50 +1187,68 @@ namespace nanojit
NanoAssert(condval->isCmp());
NanoAssert(ins->isop(LIR_cmov) && iftrue->isI32() && iffalse->isI32());
const Register rr = deprecated_prepResultReg(ins, GpRegs);
Register rr = prepareResultReg(ins, GpRegs);
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
// (This is true on Intel, is it true on all architectures?)
const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
Register rf = findRegFor(iffalse, GpRegs & ~rmask(rr));
// If 'iftrue' isn't in a register, it can be clobbered by 'ins'.
Register rt = iftrue->isInReg() ? iftrue->getReg() : rr;
// WARNING: We cannot generate any code that affects the condition
// codes between the MRcc generation here and the asm_cmp() call
// below. See asm_cmp() for more details.
switch (condval->opcode()) {
// note that these are all opposites...
case LIR_eq: MRNE(rr, iffalsereg); break;
case LIR_lt: MRGE(rr, iffalsereg); break;
case LIR_le: MRG( rr, iffalsereg); break;
case LIR_gt: MRLE(rr, iffalsereg); break;
case LIR_ge: MRL( rr, iffalsereg); break;
case LIR_ult: MRAE(rr, iffalsereg); break;
case LIR_ule: MRA( rr, iffalsereg); break;
case LIR_ugt: MRBE(rr, iffalsereg); break;
case LIR_uge: MRB( rr, iffalsereg); break;
// Note that these are all opposites...
case LIR_eq: MRNE(rr, rf); break;
case LIR_lt: MRGE(rr, rf); break;
case LIR_le: MRG( rr, rf); break;
case LIR_gt: MRLE(rr, rf); break;
case LIR_ge: MRL( rr, rf); break;
case LIR_ult: MRAE(rr, rf); break;
case LIR_ule: MRA( rr, rf); break;
case LIR_ugt: MRBE(rr, rf); break;
case LIR_uge: MRB( rr, rf); break;
default: NanoAssert(0); break;
}
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
if (rr != rt)
MR(rr, rt);
freeResourcesOf(ins);
if (!iftrue->isInReg()) {
NanoAssert(rt == rr);
findSpecificRegForUnallocated(iftrue, rr);
}
asm_cmp(condval);
}
void Assembler::asm_param(LInsp ins)
{
uint32_t a = ins->paramArg();
uint32_t arg = ins->paramArg();
uint32_t kind = ins->paramKind();
if (kind == 0) {
// ordinary param
AbiKind abi = _thisfrag->lirbuf->abi;
uint32_t abi_regcount = max_abi_regs[abi];
if (a < abi_regcount) {
if (arg < abi_regcount) {
// Incoming arg in register.
deprecated_prepResultReg(ins, rmask(argRegs[a]));
prepareResultReg(ins, rmask(argRegs[arg]));
// No code to generate.
} else {
// Incoming arg is on stack, and EBP points nearby (see genPrologue()).
Register r = deprecated_prepResultReg(ins, GpRegs);
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
Register r = prepareResultReg(ins, GpRegs);
int d = (arg - abi_regcount) * sizeof(intptr_t) + 8;
LD(r, d, FP);
}
} else {
// Saved param.
prepareResultReg(ins, rmask(savedRegs[arg]));
// No code to generate.
}
else {
// saved param
deprecated_prepResultReg(ins, rmask(savedRegs[a]));
}
freeResourcesOf(ins);
}
void Assembler::asm_int(LInsp ins)
@ -1285,10 +1309,13 @@ namespace nanojit
void Assembler::asm_quad(LInsp ins)
{
NanoAssert(ins->isconstf());
if (ins->isInReg()) {
Register rr = ins->getReg();
NanoAssert(rmask(rr) & FpRegs);
asm_quad(rr, ins->imm64(), ins->imm64f(), /*canClobberCCs*/true);
} else {
// Do nothing, will be rematerialized when necessary.
}
freeResourcesOf(ins);
@ -1362,8 +1389,7 @@ namespace nanojit
// If 'r' is known, then that's the register we have to put 'ins'
// into.
if (sz == ARGSIZE_I || sz == ARGSIZE_U)
{
if (sz == ARGSIZE_I || sz == ARGSIZE_U) {
if (r != UnspecifiedReg) {
if (ins->isconst()) {
// Rematerialize the constant.
@ -1392,9 +1418,8 @@ namespace nanojit
else
asm_pusharg(ins);
}
}
else
{
} else {
NanoAssert(sz == ARGSIZE_F);
asm_farg(ins, stkd);
}
@ -1452,7 +1477,6 @@ namespace nanojit
} else {
FSTPQ(stkd, SP);
//
// 22Jul09 rickr - Enabling the evict causes a 10% slowdown on primes
//
// evict() triggers a very expensive fstpq/fldq pair around the store.
@ -1460,11 +1484,12 @@ namespace nanojit
//
// see https://bugzilla.mozilla.org/show_bug.cgi?id=491084
/* It's possible that the same LIns* with r=FST0 will appear in the argument list more
* than once. In this case FST0 will not have been evicted and the multiple pop
* actions will unbalance the FPU stack. A quick fix is to always evict FST0 manually.
*/
evictIfActive(FST0);
// It's possible that the same LIns* with r=FST0 will appear in the argument list more
// than once. In this case FST0 will not have been evicted and the multiple pop
// actions will unbalance the FPU stack. A quick fix is to always evict FST0 manually.
NanoAssert(r == FST0);
NanoAssert(ins == _allocator.getActive(r));
evict(ins);
}
if (!_config.i386_fixed_esp)
SUBi(ESP, 8);
@ -1483,93 +1508,85 @@ namespace nanojit
RegisterMask allow = XmmRegs;
Register rb = UnspecifiedReg;
if (lhs != rhs) {
rb = findRegFor(rhs,allow);
rb = findRegFor(rhs, allow);
allow &= ~rmask(rb);
}
Register rr = deprecated_prepResultReg(ins, allow);
Register ra;
Register rr = prepareResultReg(ins, allow);
// if this is last use of lhs in reg, we can re-use result reg
// If 'lhs' isn't in a register, it can be clobbered by 'ins'.
Register ra;
if (!lhs->isInReg()) {
ra = findSpecificRegForUnallocated(lhs, rr);
} else if ((rmask(lhs->getReg()) & XmmRegs) == 0) {
// We need this case on AMD64, because it's possible that
// an earlier instruction has done a quadword load and reserved a
// GPR. If so, ask for a new register.
ra = findRegFor(lhs, XmmRegs);
ra = rr;
} else if (!(rmask(lhs->getReg()) & XmmRegs)) {
NanoAssert(lhs->getReg() == FST0);
// We need to evict lhs from x87Regs, which then puts us in
// the same situation as the !isInReg() case.
evict(lhs);
ra = rr;
} else {
// lhs already has a register assigned but maybe not from the allow set
ra = findRegFor(lhs, allow);
ra = lhs->getReg();
NanoAssert(rmask(ra) & XmmRegs);
}
if (lhs == rhs)
rb = ra;
if (op == LIR_fadd)
SSE_ADDSD(rr, rb);
else if (op == LIR_fsub)
SSE_SUBSD(rr, rb);
else if (op == LIR_fmul)
SSE_MULSD(rr, rb);
else //if (op == LIR_fdiv)
SSE_DIVSD(rr, rb);
switch (op) {
case LIR_fadd: SSE_ADDSD(rr, rb); break;
case LIR_fsub: SSE_SUBSD(rr, rb); break;
case LIR_fmul: SSE_MULSD(rr, rb); break;
case LIR_fdiv: SSE_DIVSD(rr, rb); break;
default: NanoAssert(0);
}
if (rr != ra)
SSE_MOVSD(rr, ra);
freeResourcesOf(ins);
if (!lhs->isInReg()) {
NanoAssert(ra == rr);
findSpecificRegForUnallocated(lhs, ra);
}
}
else
{
// we swap lhs/rhs on purpose here, works out better
// if you only have one fpu reg. use divr/subr.
// We swap lhs/rhs on purpose here, it works out better with
// only one fpu reg -- we can use divr/subr.
LIns* rhs = ins->oprnd1();
LIns* lhs = ins->oprnd2();
Register rr = deprecated_prepResultReg(ins, rmask(FST0));
debug_only( Register rr = ) prepareResultReg(ins, rmask(FST0));
NanoAssert(FST0 == rr);
NanoAssert(!lhs->isInReg() || FST0 == lhs->getReg());
if (rhs->isconstq())
{
if (rhs->isconstq()) {
const uint64_t* p = findQuadConstant(rhs->imm64());
// lhs into reg, prefer same reg as result
switch (op) {
case LIR_fadd: FADDdm( (const double*)p); break;
case LIR_fsub: FSUBRdm((const double*)p); break;
case LIR_fmul: FMULdm( (const double*)p); break;
case LIR_fdiv: FDIVRdm((const double*)p); break;
default: NanoAssert(0);
}
// last use of lhs in reg, can reuse rr
// else, lhs already has a different reg assigned
if (!lhs->isInReg())
findSpecificRegForUnallocated(lhs, rr);
NanoAssert(lhs->getReg()==FST0);
// assume that the lhs is in ST(0) and rhs is on stack
if (op == LIR_fadd)
{ FADDdm((const double*)p); }
else if (op == LIR_fsub)
{ FSUBRdm((const double*)p); }
else if (op == LIR_fmul)
{ FMULdm((const double*)p); }
else if (op == LIR_fdiv)
{ FDIVRdm((const double*)p); }
}
else
{
// make sure rhs is in memory
} else {
int db = findMemFor(rhs);
// lhs into reg, prefer same reg as result
// last use of lhs in reg, can reuse rr
// else, lhs already has a different reg assigned
if (!lhs->isInReg())
findSpecificRegForUnallocated(lhs, rr);
NanoAssert(lhs->getReg()==FST0);
// assume that the lhs is in ST(0) and rhs is on stack
if (op == LIR_fadd)
{ FADD(db, FP); }
else if (op == LIR_fsub)
{ FSUBR(db, FP); }
else if (op == LIR_fmul)
{ FMUL(db, FP); }
else if (op == LIR_fdiv)
{ FDIVR(db, FP); }
switch (op) {
case LIR_fadd: FADD( db, FP); break;
case LIR_fsub: FSUBR(db, FP); break;
case LIR_fmul: FMUL( db, FP); break;
case LIR_fdiv: FDIVR(db, FP); break;
default: NanoAssert(0);
}
}
freeResourcesOf(ins);
if (!lhs->isInReg()) {
findSpecificRegForUnallocated(lhs, FST0);
}
}
}
@ -1728,7 +1745,7 @@ namespace nanojit
void Assembler::asm_fcmp(LIns *cond)
{
LOpcode condop = cond->opcode();
NanoAssert(condop >= LIR_feq && condop <= LIR_fge);
NanoAssert(isFCmpOpcode(condop));
LIns* lhs = cond->oprnd1();
LIns* rhs = cond->oprnd2();
NanoAssert(lhs->isF64() && rhs->isF64());

View file

@ -880,7 +880,7 @@ namespace nanojit
*(--_nIns) = 0x57;\
*(--_nIns) = 0x0f;\
*(--_nIns) = 0x66;\
asm_output("xorpd %s,[0x%p]",gpn(r),(void*)(maskaddr));\
asm_output("xorpd %s,[%p]",gpn(r),(void*)(maskaddr));\
} while(0)
#define SSE_XORPDr(rd,rs) do{ \

View file

@ -1457,10 +1457,10 @@ TrapHandler(JSContext *cx, JSScript *script, jsbytecode *pc, jsval *rval,
str = (JSString *) closure;
caller = JS_GetScriptedCaller(cx, NULL);
if (!JS_EvaluateScript(cx, caller->scopeChain,
JS_GetStringBytes(str), JS_GetStringLength(str),
caller->script->filename, caller->script->lineno,
rval)) {
if (!JS_EvaluateUCInStackFrame(cx, caller,
JS_GetStringChars(str), JS_GetStringLength(str),
caller->script->filename, caller->script->lineno,
rval)) {
return JSTRAP_ERROR;
}
if (!JSVAL_IS_VOID(*rval))
@ -2549,7 +2549,7 @@ split_addProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
return JS_TRUE;
if (!cpx->isInner && cpx->inner) {
/* Make sure to define this property on the inner object. */
if (!JS_ValueToId(cx, *vp, &asId))
if (!JS_ValueToId(cx, id, &asId))
return JS_FALSE;
return JS_DefinePropertyById(cx, cpx->inner, asId, *vp, NULL, NULL, JSPROP_ENUMERATE);
}
@ -3635,6 +3635,32 @@ MakeAbsolutePathname(JSContext *cx, const char *from, const char *leaf)
#endif // XP_UNIX
static JSBool
Compile(JSContext *cx, uintN argc, jsval *vp)
{
if (argc < 1) {
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_MORE_ARGS_NEEDED,
"compile", "0", "s");
return JS_FALSE;
}
jsval arg0 = JS_ARGV(cx, vp)[0];
if (!JSVAL_IS_STRING(arg0)) {
const char *typeName = JS_GetTypeName(cx, JS_TypeOfValue(cx, arg0));
JS_ReportError(cx, "expected string to compile, got %s", typeName);
return JS_FALSE;
}
JSString *scriptContents = JSVAL_TO_STRING(arg0);
JSScript *result = JS_CompileUCScript(cx, NULL, JS_GetStringCharsZ(cx, scriptContents),
JS_GetStringLength(scriptContents), "<string>", 0);
if (!result)
return JS_FALSE;
JS_DestroyScript(cx, result);
JS_SET_RVAL(cx, vp, JSVAL_VOID);
return JS_TRUE;
}
static JSBool
Snarf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
{
@ -3786,6 +3812,7 @@ static JSFunctionSpec shell_functions[] = {
JS_FN("scatter", Scatter, 1,0),
#endif
JS_FS("snarf", Snarf, 0,0,0),
JS_FN("compile", Compile, 1,0),
JS_FN("timeout", Timeout, 1,0),
JS_FN("elapsed", Elapsed, 0,0),
JS_FS_END
@ -3888,6 +3915,7 @@ static const char *const shell_help_messages[] = {
"scatter(fns) Call functions concurrently (ignoring errors)",
#endif
"snarf(filename) Read filename into returned string",
"compile(code) Parses a string, potentially throwing",
"timeout([seconds])\n"
" Get/Set the limit in seconds for the execution time for the current context.\n"
" A negative value (default) means that the execution time is unlimited.",

View file

@ -32,3 +32,4 @@ script 15.8-1.js
script 15.9.5.js
script 8.6.2.1-1.js
script 9.9-1.js
script trapflatclosure.js

View file

@ -0,0 +1,24 @@
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/licenses/publicdomain/
*/
var gTestfile = 'trapflatclosure.js';
var BUGNUMBER = 549617;
var summary = 'flat closure debugged via trap while still active';
var expect = "abc";
var actual = expect;
function a(x, y) {
return function () { return x; };
}
var f = a("abc", 123);
if (this.trap)
trap(f, "try {actual = x} catch (e) {actual = e}");
f();
reportCompare(expect, actual, summary);
printStatus("All tests passed!");

View file

@ -0,0 +1,25 @@
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/licenses/publicdomain/
*/
var gTestfile = 'enumerate-undefined.js';
//-----------------------------------------------------------------------------
var BUGNUMBER = 547087;
var summary = 'JS_EnumerateStandardClasses uses wrong attributes for undefined';
print(BUGNUMBER + ": " + summary);
/**************
* BEGIN TEST *
**************/
for (var p in this);
assertEq(Object.getOwnPropertyDescriptor(this, "undefined").writable, false);
/******************************************************************************/
reportCompare(true, true);
print("All tests passed!");

View file

@ -1,3 +1,4 @@
url-prefix ../../jsreftest.html?test=ecma_5/misc/
script global-numeric-properties.js
script redeclare-var-non-writable-property.js
script enumerate-undefined.js

View file

@ -0,0 +1,22 @@
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/licenses/publicdomain/
* Contributor: Jason Orendorff
*/
// This test appeared in bug 497789 comment 78.
var a = {x: 'a'},
b1 = Object.create(a),
c1 = Object.create(b1),
b2 = Object.create(a),
c2 = Object.create(b2);
b2.x = 'b'; // foreshadowing a.x
var s = '';
for each (var obj in [c1, c2])
s += obj.x;
assertEq(s, 'ab');
print(" PASSED! Property cache soundness: objects with the same shape but different prototypes.");

View file

@ -0,0 +1,9 @@
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/licenses/publicdomain/
* Contributors: Gary Kwong and Jason Orendorff
*/
var obj = {};
obj.__defineSetter__("x", function() {});
obj.watch("x", function() {});
obj.__defineSetter__("x", /a/);

198
js/src/tests/parsemark.py Normal file
View file

@ -0,0 +1,198 @@
#!/usr/bin/env python
"""%prog [options] dirpath
Pulls performance data on parsing via the js shell.
Displays the average number of milliseconds it took to parse each file.
For comparison, something apparently approximating a t-test is performed:
"Faster" means that:
t_baseline_goodrun = (t_baseline_avg - t_baseline_stddev)
t_current_badrun = (t_current_avg + t_current_stddev)
t_current_badrun < t_baseline_goodrun
Effectively, a bad run from the current data is better than a good run from the
baseline data, we're probably faster. A similar computation is used for
determining the "slower" designation.
Arguments:
dirpath directory filled with parsilicious js files
"""
import math
import optparse
import os
import subprocess as subp
import sys
from string import Template
from operator import itemgetter
_DIR = os.path.dirname(__file__)
JS_CODE_TEMPLATE = Template("""
var contents = snarf("$filepath");
for (let i = 0; i < $warmup_run_count; i++)
compile(contents);
var results = [];
for (let i = 0; i < $real_run_count; i++) {
var start = new Date();
compile(contents);
var end = new Date();
results.push(end - start);
}
print(results);
""")
def find_shell(filename='js'):
"""Look around for the js shell. Prefer more obvious places to look.
:return: Path if found, else None.
"""
relpaths = ['', 'obj', os.pardir, [os.pardir, 'obj']]
for relpath in relpaths:
path_parts = [_DIR]
if isinstance(relpath, list):
path_parts += relpath
else:
path_parts.append(relpath)
path_parts.append(filename)
path = os.path.join(*path_parts)
if os.path.isfile(path):
return path
def gen_filepaths(dirpath, target_ext='.js'):
for filename in os.listdir(dirpath):
if filename.endswith(target_ext):
yield os.path.join(dirpath, filename)
def avg(seq):
return sum(seq) / len(seq)
def stddev(seq, mean):
diffs = ((float(item) - mean) ** 2 for item in seq)
return math.sqrt(sum(diffs) / len(seq))
def bench(shellpath, filepath, warmup_runs, counted_runs, stfu=False):
"""Return a list of milliseconds for the counted runs."""
assert '"' not in filepath
code = JS_CODE_TEMPLATE.substitute(filepath=filepath,
warmup_run_count=warmup_runs, real_run_count=counted_runs)
proc = subp.Popen([shellpath, '-e', code], stdout=subp.PIPE)
stdout, _ = proc.communicate()
milliseconds = [float(val) for val in stdout.split(',')]
mean = avg(milliseconds)
sigma = stddev(milliseconds, mean)
if not stfu:
print 'Runs:', milliseconds
print 'Mean:', mean
print 'Stddev: %.2f (%.2f%% of mean)' % (sigma, sigma / mean * 100)
return mean, sigma
def parsemark(filepaths, fbench, stfu=False):
""":param fbench: fbench(filename) -> float"""
bench_map = {}
for filepath in filepaths:
filename = os.path.split(filepath)[-1]
if not stfu:
print 'Parsemarking %s...' % filename
bench_map[filename] = fbench(filepath)
print '{'
for i, (filename, (avg, stddev)) in enumerate(bench_map.iteritems()):
assert '"' not in filename
fmt = ' %30s: {"average_ms": %6.2f, "stddev_ms": %6.2f}'
if i != len(bench_map) - 1:
fmt += ','
filename_str = '"%s"' % filename
print fmt % (filename_str, avg, stddev)
print '}'
return bench_map
def compare(current, baseline):
for key, (avg, stddev) in current.iteritems():
try:
base_avg, base_stddev = itemgetter('average_ms', 'stddev_ms')(baseline.get(key, None))
except TypeError:
print key, 'missing from baseline'
continue
t_best, t_worst = avg - stddev, avg + stddev
base_t_best, base_t_worst = base_avg - base_stddev, base_avg + base_stddev
fmt = '%30s: %s'
if t_worst < base_t_best: # Worst takes less time (better) than baseline's best.
speedup = -((t_worst - base_t_best) / base_t_best) * 100
result = 'faster: %6.2fms < baseline %6.2fms (%+6.2f%%)' % \
(t_worst, base_t_best, speedup)
elif t_best > base_t_worst: # Best takes more time (worse) than baseline's worst.
slowdown = -((t_best - base_t_worst) / base_t_worst) * 100
result = 'SLOWER: %6.2fms > baseline %6.2fms (%+6.2f%%) ' % \
(t_best, base_t_worst, slowdown)
else:
result = 'Meh.'
print '%30s: %s' % (key, result)
def try_import_json():
try:
import json
return json
except ImportError:
try:
import simplejson as json
return json
except ImportError:
pass
def main():
parser = optparse.OptionParser(usage=__doc__.strip())
parser.add_option('-w', '--warmup-runs', metavar='COUNT', type=int,
default=5, help='used to minimize test instability')
parser.add_option('-c', '--counted-runs', metavar='COUNT', type=int,
default=20, help='timed data runs that count towards the average')
parser.add_option('-s', '--shell', metavar='PATH', help='explicit shell '
'location; when omitted, will look in likely places')
parser.add_option('-b', '--baseline', metavar='JSON_PATH',
dest='baseline_path', help='json file with baseline values to '
'compare against')
parser.add_option('-q', '--quiet', dest='stfu', action='store_true',
default=False, help='only print JSON to stdout')
options, args = parser.parse_args()
try:
dirpath = args.pop(0)
except IndexError:
parser.print_help()
print
print >> sys.stderr, 'error: dirpath required'
return -1
shellpath = options.shell or find_shell()
if not shellpath:
print >> sys.stderr, 'Could not find shell'
return -1
if options.baseline_path:
if not os.path.isfile(options.baseline_path):
print >> sys.stderr, 'Baseline file does not exist'
return -1
json = try_import_json()
if not json:
print >> sys.stderr, 'You need a json lib for baseline comparison'
return -1
benchfile = lambda filepath: bench(shellpath, filepath,
options.warmup_runs, options.counted_runs, stfu=options.stfu)
bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu)
if options.baseline_path:
fh = open(options.baseline_path, 'r') # 2.4 compat, no 'with'.
baseline_map = json.load(fh)
fh.close()
compare(current=bench_map, baseline=baseline_map)
return 0
if __name__ == '__main__':
sys.exit(main())

View file

@ -0,0 +1,7 @@
a = b = c = d = 0;
__defineGetter__("e", function () { throw StopIteration; })
try {
for each(f in this) {}
} catch (exc) {
assertEq(exc, StopIteration);
}

View file

@ -0,0 +1,7 @@
var obj = {a: 0, b: 0, c: 0, d: 0, get e() { throw StopIteration; }};
try {
for each (x in obj) {}
FAIL;
} catch (exc) {
assertEq(exc, StopIteration);
}

View file

@ -0,0 +1,47 @@
function convertToInt(str) {
return str | 0;
}
function convertToIntOnTrace(str) {
var z;
for (var i = 0; i < RUNLOOP; ++i) {
z = str | 0;
}
return z;
}
function convertToDouble(str) {
return str * 1.5;
}
function convertToDoubleOnTrace(str) {
var z;
for (var i = 0; i < RUNLOOP; ++i) {
z = str * 1.5;
}
return z;
}
assertEq(convertToInt("0x10"), 16);
assertEq(convertToInt("-0x10"), 0);
assertEq(convertToIntOnTrace("0x10"), 16);
checkStats({
traceTriggered: 1
});
assertEq(convertToIntOnTrace("-0x10"), 0);
checkStats({
traceTriggered: 2
});
assertEq(convertToDouble("0x10"), 24);
assertEq(convertToDouble("-0x10"), NaN);
assertEq(convertToDoubleOnTrace("0x10"), 24);
checkStats({
traceTriggered: 3
});
assertEq(convertToDoubleOnTrace("-0x10"), NaN);
checkStats({
traceTriggered: 4
});

View file

@ -0,0 +1,345 @@
// 3D Cube Rotation
// http://www.speich.net/computer/moztesting/3d.htm
// Created by Simon Speich
var Q = new Array();
var MTrans = new Array(); // transformation matrix
var MQube = new Array(); // position information of qube
var I = new Array(); // entity matrix
var Origin = new Object();
var Testing = new Object();
var LoopTimer;
var DisplArea = new Object();
DisplArea.Width = 300;
DisplArea.Height = 300;
function DrawLine(From, To) {
var x1 = From.V[0];
var x2 = To.V[0];
var y1 = From.V[1];
var y2 = To.V[1];
var dx = Math.abs(x2 - x1);
var dy = Math.abs(y2 - y1);
var x = x1;
var y = y1;
var IncX1, IncY1;
var IncX2, IncY2;
var Den;
var Num;
var NumAdd;
var NumPix;
if (x2 >= x1) { IncX1 = 1; IncX2 = 1; }
else { IncX1 = -1; IncX2 = -1; }
if (y2 >= y1) { IncY1 = 1; IncY2 = 1; }
else { IncY1 = -1; IncY2 = -1; }
if (dx >= dy) {
IncX1 = 0;
IncY2 = 0;
Den = dx;
Num = dx / 2;
NumAdd = dy;
NumPix = dx;
}
else {
IncX2 = 0;
IncY1 = 0;
Den = dy;
Num = dy / 2;
NumAdd = dx;
NumPix = dy;
}
NumPix = Math.round(Q.LastPx + NumPix);
var i = Q.LastPx;
for (; i < NumPix; i++) {
Num += NumAdd;
if (Num >= Den) {
Num -= Den;
x += IncX1;
y += IncY1;
}
x += IncX2;
y += IncY2;
}
Q.LastPx = NumPix;
}
function CalcCross(V0, V1) {
var Cross = new Array();
Cross[0] = V0[1]*V1[2] - V0[2]*V1[1];
Cross[1] = V0[2]*V1[0] - V0[0]*V1[2];
Cross[2] = V0[0]*V1[1] - V0[1]*V1[0];
return Cross;
}
function CalcNormal(V0, V1, V2) {
var A = new Array(); var B = new Array();
for (var i = 0; i < 3; i++) {
A[i] = V0[i] - V1[i];
B[i] = V2[i] - V1[i];
}
A = CalcCross(A, B);
var Length = Math.sqrt(A[0]*A[0] + A[1]*A[1] + A[2]*A[2]);
for (var i = 0; i < 3; i++) A[i] = A[i] / Length;
A[3] = 1;
return A;
}
function CreateP(X,Y,Z) {
this.V = [X,Y,Z,1];
}
// multiplies two matrices
function MMulti(M1, M2) {
var M = [[],[],[],[]];
var i = 0;
var j = 0;
for (; i < 4; i++) {
j = 0;
for (; j < 4; j++) M[i][j] = M1[i][0] * M2[0][j] + M1[i][1] * M2[1][j] + M1[i][2] * M2[2][j] + M1[i][3] * M2[3][j];
}
return M;
}
//multiplies matrix with vector
function VMulti(M, V) {
var Vect = new Array();
var i = 0;
for (;i < 4; i++) Vect[i] = M[i][0] * V[0] + M[i][1] * V[1] + M[i][2] * V[2] + M[i][3] * V[3];
return Vect;
}
function VMulti2(M, V) {
var Vect = new Array();
var i = 0;
for (;i < 3; i++) Vect[i] = M[i][0] * V[0] + M[i][1] * V[1] + M[i][2] * V[2];
return Vect;
}
// add to matrices
function MAdd(M1, M2) {
var M = [[],[],[],[]];
var i = 0;
var j = 0;
for (; i < 4; i++) {
j = 0;
for (; j < 4; j++) M[i][j] = M1[i][j] + M2[i][j];
}
return M;
}
function Translate(M, Dx, Dy, Dz) {
var T = [
[1,0,0,Dx],
[0,1,0,Dy],
[0,0,1,Dz],
[0,0,0,1]
];
return MMulti(T, M);
}
function RotateX(M, Phi) {
var a = Phi;
a *= Math.PI / 180;
var Cos = Math.cos(a);
var Sin = Math.sin(a);
var R = [
[1,0,0,0],
[0,Cos,-Sin,0],
[0,Sin,Cos,0],
[0,0,0,1]
];
return MMulti(R, M);
}
function RotateY(M, Phi) {
var a = Phi;
a *= Math.PI / 180;
var Cos = Math.cos(a);
var Sin = Math.sin(a);
var R = [
[Cos,0,Sin,0],
[0,1,0,0],
[-Sin,0,Cos,0],
[0,0,0,1]
];
return MMulti(R, M);
}
function RotateZ(M, Phi) {
var a = Phi;
a *= Math.PI / 180;
var Cos = Math.cos(a);
var Sin = Math.sin(a);
var R = [
[Cos,-Sin,0,0],
[Sin,Cos,0,0],
[0,0,1,0],
[0,0,0,1]
];
return MMulti(R, M);
}
function DrawQube() {
// calc current normals
var CurN = new Array();
var i = 5;
Q.LastPx = 0;
for (; i > -1; i--) CurN[i] = VMulti2(MQube, Q.Normal[i]);
if (CurN[0][2] < 0) {
if (!Q.Line[0]) { DrawLine(Q[0], Q[1]); Q.Line[0] = true; };
if (!Q.Line[1]) { DrawLine(Q[1], Q[2]); Q.Line[1] = true; };
if (!Q.Line[2]) { DrawLine(Q[2], Q[3]); Q.Line[2] = true; };
if (!Q.Line[3]) { DrawLine(Q[3], Q[0]); Q.Line[3] = true; };
}
if (CurN[1][2] < 0) {
if (!Q.Line[2]) { DrawLine(Q[3], Q[2]); Q.Line[2] = true; };
if (!Q.Line[9]) { DrawLine(Q[2], Q[6]); Q.Line[9] = true; };
if (!Q.Line[6]) { DrawLine(Q[6], Q[7]); Q.Line[6] = true; };
if (!Q.Line[10]) { DrawLine(Q[7], Q[3]); Q.Line[10] = true; };
}
if (CurN[2][2] < 0) {
if (!Q.Line[4]) { DrawLine(Q[4], Q[5]); Q.Line[4] = true; };
if (!Q.Line[5]) { DrawLine(Q[5], Q[6]); Q.Line[5] = true; };
if (!Q.Line[6]) { DrawLine(Q[6], Q[7]); Q.Line[6] = true; };
if (!Q.Line[7]) { DrawLine(Q[7], Q[4]); Q.Line[7] = true; };
}
if (CurN[3][2] < 0) {
if (!Q.Line[4]) { DrawLine(Q[4], Q[5]); Q.Line[4] = true; };
if (!Q.Line[8]) { DrawLine(Q[5], Q[1]); Q.Line[8] = true; };
if (!Q.Line[0]) { DrawLine(Q[1], Q[0]); Q.Line[0] = true; };
if (!Q.Line[11]) { DrawLine(Q[0], Q[4]); Q.Line[11] = true; };
}
if (CurN[4][2] < 0) {
if (!Q.Line[11]) { DrawLine(Q[4], Q[0]); Q.Line[11] = true; };
if (!Q.Line[3]) { DrawLine(Q[0], Q[3]); Q.Line[3] = true; };
if (!Q.Line[10]) { DrawLine(Q[3], Q[7]); Q.Line[10] = true; };
if (!Q.Line[7]) { DrawLine(Q[7], Q[4]); Q.Line[7] = true; };
}
if (CurN[5][2] < 0) {
if (!Q.Line[8]) { DrawLine(Q[1], Q[5]); Q.Line[8] = true; };
if (!Q.Line[5]) { DrawLine(Q[5], Q[6]); Q.Line[5] = true; };
if (!Q.Line[9]) { DrawLine(Q[6], Q[2]); Q.Line[9] = true; };
if (!Q.Line[1]) { DrawLine(Q[2], Q[1]); Q.Line[1] = true; };
}
Q.Line = [false,false,false,false,false,false,false,false,false,false,false,false];
Q.LastPx = 0;
}
function Loop() {
if (Testing.LoopCount > Testing.LoopMax) return;
var TestingStr = String(Testing.LoopCount);
while (TestingStr.length < 3) TestingStr = "0" + TestingStr;
MTrans = Translate(I, -Q[8].V[0], -Q[8].V[1], -Q[8].V[2]);
MTrans = RotateX(MTrans, 1);
MTrans = RotateY(MTrans, 3);
MTrans = RotateZ(MTrans, 5);
MTrans = Translate(MTrans, Q[8].V[0], Q[8].V[1], Q[8].V[2]);
MQube = MMulti(MTrans, MQube);
var i = 8;
for (; i > -1; i--) {
Q[i].V = VMulti(MTrans, Q[i].V);
}
DrawQube();
Testing.LoopCount++;
Loop();
}
function Init(CubeSize) {
// init/reset vars
Origin.V = [150,150,20,1];
Testing.LoopCount = 0;
Testing.LoopMax = 50;
Testing.TimeMax = 0;
Testing.TimeAvg = 0;
Testing.TimeMin = 0;
Testing.TimeTemp = 0;
Testing.TimeTotal = 0;
Testing.Init = false;
// transformation matrix
MTrans = [
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]
];
// position information of qube
MQube = [
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]
];
// entity matrix
I = [
[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]
];
// create qube
Q[0] = new CreateP(-CubeSize,-CubeSize, CubeSize);
Q[1] = new CreateP(-CubeSize, CubeSize, CubeSize);
Q[2] = new CreateP( CubeSize, CubeSize, CubeSize);
Q[3] = new CreateP( CubeSize,-CubeSize, CubeSize);
Q[4] = new CreateP(-CubeSize,-CubeSize,-CubeSize);
Q[5] = new CreateP(-CubeSize, CubeSize,-CubeSize);
Q[6] = new CreateP( CubeSize, CubeSize,-CubeSize);
Q[7] = new CreateP( CubeSize,-CubeSize,-CubeSize);
// center of gravity
Q[8] = new CreateP(0, 0, 0);
// anti-clockwise edge check
Q.Edge = [[0,1,2],[3,2,6],[7,6,5],[4,5,1],[4,0,3],[1,5,6]];
// calculate squad normals
Q.Normal = new Array();
for (var i = 0; i < Q.Edge.length; i++) Q.Normal[i] = CalcNormal(Q[Q.Edge[i][0]].V, Q[Q.Edge[i][1]].V, Q[Q.Edge[i][2]].V);
// line drawn ?
Q.Line = [false,false,false,false,false,false,false,false,false,false,false,false];
// create line pixels
Q.NumPx = 9 * 2 * CubeSize;
for (var i = 0; i < Q.NumPx; i++) CreateP(0,0,0);
MTrans = Translate(MTrans, Origin.V[0], Origin.V[1], Origin.V[2]);
MQube = MMulti(MTrans, MQube);
var i = 0;
for (; i < 9; i++) {
Q[i].V = VMulti(MTrans, Q[i].V);
}
DrawQube();
Testing.Init = true;
Loop();
}
for ( var i = 20; i <= 160; i *= 2 ) {
Init(i);
}
var actual = '';
for (var i = 0; i < Q.length; ++i) {
actual += Q[i].V + ';';
}
var expected = "-116.618229186398,212.51135212951073,62.5094191967962,1;127.83701023614447,417.11611179082263,90.41153816299942,1;293.9570894432935,196.58093046570656,252.17789153139591,1;49.501850020750915,-8.02382919560505,224.275772565193,1;6.042910556709444,103.41906953429206,-212.1778915313964,1;250.49814997925202,308.02382919560387,-184.27577256519325,1;416.61822918640064,87.48864787048812,-22.509419196796493,1;172.1629897638581,-117.1161117908236,-50.41153816299975,1;150.0000000000007,149.99999999999952,20,1;";
assertEq(actual, expected);
Q = null;
MTrans = null;
MQube = null;
I = null;
Origin = null;
Testing = null;
LoopTime = null;
DisplArea = null;

View file

@ -0,0 +1,424 @@
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/*
* AES Cipher function: encrypt 'input' with Rijndael algorithm
*
* takes byte-array 'input' (16 bytes)
* 2D byte-array key schedule 'w' (Nr+1 x Nb bytes)
*
* applies Nr rounds (10/12/14) using key schedule w for 'add round key' stage
*
* returns byte-array encrypted value (16 bytes)
*/
function Cipher(input, w) { // main Cipher function [§5.1]
var Nb = 4; // block size (in words): no of columns in state (fixed at 4 for AES)
var Nr = w.length/Nb - 1; // no of rounds: 10/12/14 for 128/192/256-bit keys
var state = [[],[],[],[]]; // initialise 4xNb byte-array 'state' with input [§3.4]
for (var i=0; i<4*Nb; i++) state[i%4][Math.floor(i/4)] = input[i];
state = AddRoundKey(state, w, 0, Nb);
for (var round=1; round<Nr; round++) {
state = SubBytes(state, Nb);
state = ShiftRows(state, Nb);
state = MixColumns(state, Nb);
state = AddRoundKey(state, w, round, Nb);
}
state = SubBytes(state, Nb);
state = ShiftRows(state, Nb);
state = AddRoundKey(state, w, Nr, Nb);
var output = new Array(4*Nb); // convert state to 1-d array before returning [§3.4]
for (var i=0; i<4*Nb; i++) output[i] = state[i%4][Math.floor(i/4)];
return output;
}
function SubBytes(s, Nb) { // apply SBox to state S [§5.1.1]
for (var r=0; r<4; r++) {
for (var c=0; c<Nb; c++) s[r][c] = Sbox[s[r][c]];
}
return s;
}
function ShiftRows(s, Nb) { // shift row r of state S left by r bytes [§5.1.2]
var t = new Array(4);
for (var r=1; r<4; r++) {
for (var c=0; c<4; c++) t[c] = s[r][(c+r)%Nb]; // shift into temp copy
for (var c=0; c<4; c++) s[r][c] = t[c]; // and copy back
} // note that this will work for Nb=4,5,6, but not 7,8 (always 4 for AES):
return s; // see fp.gladman.plus.com/cryptography_technology/rijndael/aes.spec.311.pdf
}
function MixColumns(s, Nb) { // combine bytes of each col of state S [§5.1.3]
for (var c=0; c<4; c++) {
var a = new Array(4); // 'a' is a copy of the current column from 's'
var b = new Array(4); // 'b' is a•{02} in GF(2^8)
for (var i=0; i<4; i++) {
a[i] = s[i][c];
b[i] = s[i][c]&0x80 ? s[i][c]<<1 ^ 0x011b : s[i][c]<<1;
}
// a[n] ^ b[n] is a•{03} in GF(2^8)
s[0][c] = b[0] ^ a[1] ^ b[1] ^ a[2] ^ a[3]; // 2*a0 + 3*a1 + a2 + a3
s[1][c] = a[0] ^ b[1] ^ a[2] ^ b[2] ^ a[3]; // a0 * 2*a1 + 3*a2 + a3
s[2][c] = a[0] ^ a[1] ^ b[2] ^ a[3] ^ b[3]; // a0 + a1 + 2*a2 + 3*a3
s[3][c] = a[0] ^ b[0] ^ a[1] ^ a[2] ^ b[3]; // 3*a0 + a1 + a2 + 2*a3
}
return s;
}
function AddRoundKey(state, w, rnd, Nb) { // xor Round Key into state S [§5.1.4]
for (var r=0; r<4; r++) {
for (var c=0; c<Nb; c++) state[r][c] ^= w[rnd*4+c][r];
}
return state;
}
function KeyExpansion(key) { // generate Key Schedule (byte-array Nr+1 x Nb) from Key [§5.2]
var Nb = 4; // block size (in words): no of columns in state (fixed at 4 for AES)
var Nk = key.length/4 // key length (in words): 4/6/8 for 128/192/256-bit keys
var Nr = Nk + 6; // no of rounds: 10/12/14 for 128/192/256-bit keys
var w = new Array(Nb*(Nr+1));
var temp = new Array(4);
for (var i=0; i<Nk; i++) {
var r = [key[4*i], key[4*i+1], key[4*i+2], key[4*i+3]];
w[i] = r;
}
for (var i=Nk; i<(Nb*(Nr+1)); i++) {
w[i] = new Array(4);
for (var t=0; t<4; t++) temp[t] = w[i-1][t];
if (i % Nk == 0) {
temp = SubWord(RotWord(temp));
for (var t=0; t<4; t++) temp[t] ^= Rcon[i/Nk][t];
} else if (Nk > 6 && i%Nk == 4) {
temp = SubWord(temp);
}
for (var t=0; t<4; t++) w[i][t] = w[i-Nk][t] ^ temp[t];
}
return w;
}
function SubWord(w) { // apply SBox to 4-byte word w
for (var i=0; i<4; i++) w[i] = Sbox[w[i]];
return w;
}
function RotWord(w) { // rotate 4-byte word w left by one byte
w[4] = w[0];
for (var i=0; i<4; i++) w[i] = w[i+1];
return w;
}
// Sbox is pre-computed multiplicative inverse in GF(2^8) used in SubBytes and KeyExpansion [§5.1.1]
var Sbox = [0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16];
// Rcon is Round Constant used for the Key Expansion [1st col is 2^(r-1) in GF(2^8)] [§5.2]
var Rcon = [ [0x00, 0x00, 0x00, 0x00],
[0x01, 0x00, 0x00, 0x00],
[0x02, 0x00, 0x00, 0x00],
[0x04, 0x00, 0x00, 0x00],
[0x08, 0x00, 0x00, 0x00],
[0x10, 0x00, 0x00, 0x00],
[0x20, 0x00, 0x00, 0x00],
[0x40, 0x00, 0x00, 0x00],
[0x80, 0x00, 0x00, 0x00],
[0x1b, 0x00, 0x00, 0x00],
[0x36, 0x00, 0x00, 0x00] ];
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/*
* Use AES to encrypt 'plaintext' with 'password' using 'nBits' key, in 'Counter' mode of operation
* - see http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
* for each block
* - outputblock = cipher(counter, key)
* - cipherblock = plaintext xor outputblock
*/
function AESEncryptCtr(plaintext, password, nBits) {
if (!(nBits==128 || nBits==192 || nBits==256)) return ''; // standard allows 128/192/256 bit keys
// for this example script, generate the key by applying Cipher to 1st 16/24/32 chars of password;
// for real-world applications, a more secure approach would be to hash the password e.g. with SHA-1
var nBytes = nBits/8; // no bytes in key
var pwBytes = new Array(nBytes);
for (var i=0; i<nBytes; i++) pwBytes[i] = password.charCodeAt(i) & 0xff;
var key = Cipher(pwBytes, KeyExpansion(pwBytes));
key = key.concat(key.slice(0, nBytes-16)); // key is now 16/24/32 bytes long
// initialise counter block (NIST SP800-38A §B.2): millisecond time-stamp for nonce in 1st 8 bytes,
// block counter in 2nd 8 bytes
var blockSize = 16; // block size fixed at 16 bytes / 128 bits (Nb=4) for AES
var counterBlock = new Array(blockSize); // block size fixed at 16 bytes / 128 bits (Nb=4) for AES
var nonce = (new Date()).getTime(); // milliseconds since 1-Jan-1970
// encode nonce in two stages to cater for JavaScript 32-bit limit on bitwise ops
for (var i=0; i<4; i++) counterBlock[i] = (nonce >>> i*8) & 0xff;
for (var i=0; i<4; i++) counterBlock[i+4] = (nonce/0x100000000 >>> i*8) & 0xff;
// generate key schedule - an expansion of the key into distinct Key Rounds for each round
var keySchedule = KeyExpansion(key);
var blockCount = Math.ceil(plaintext.length/blockSize);
var ciphertext = new Array(blockCount); // ciphertext as array of strings
for (var b=0; b<blockCount; b++) {
// set counter (block #) in last 8 bytes of counter block (leaving nonce in 1st 8 bytes)
// again done in two stages for 32-bit ops
for (var c=0; c<4; c++) counterBlock[15-c] = (b >>> c*8) & 0xff;
for (var c=0; c<4; c++) counterBlock[15-c-4] = (b/0x100000000 >>> c*8)
var cipherCntr = Cipher(counterBlock, keySchedule); // -- encrypt counter block --
// calculate length of final block:
var blockLength = b<blockCount-1 ? blockSize : (plaintext.length-1)%blockSize+1;
var ct = '';
for (var i=0; i<blockLength; i++) { // -- xor plaintext with ciphered counter byte-by-byte --
var plaintextByte = plaintext.charCodeAt(b*blockSize+i);
var cipherByte = plaintextByte ^ cipherCntr[i];
ct += String.fromCharCode(cipherByte);
}
// ct is now ciphertext for this block
ciphertext[b] = escCtrlChars(ct); // escape troublesome characters in ciphertext
}
// convert the nonce to a string to go on the front of the ciphertext
var ctrTxt = '';
for (var i=0; i<8; i++) ctrTxt += String.fromCharCode(counterBlock[i]);
ctrTxt = escCtrlChars(ctrTxt);
// use '-' to separate blocks, use Array.join to concatenate arrays of strings for efficiency
return ctrTxt + '-' + ciphertext.join('-');
}
/*
* Use AES to decrypt 'ciphertext' with 'password' using 'nBits' key, in Counter mode of operation
*
* for each block
* - outputblock = cipher(counter, key)
* - cipherblock = plaintext xor outputblock
*/
function AESDecryptCtr(ciphertext, password, nBits) {
if (!(nBits==128 || nBits==192 || nBits==256)) return ''; // standard allows 128/192/256 bit keys
var nBytes = nBits/8; // no bytes in key
var pwBytes = new Array(nBytes);
for (var i=0; i<nBytes; i++) pwBytes[i] = password.charCodeAt(i) & 0xff;
var pwKeySchedule = KeyExpansion(pwBytes);
var key = Cipher(pwBytes, pwKeySchedule);
key = key.concat(key.slice(0, nBytes-16)); // key is now 16/24/32 bytes long
var keySchedule = KeyExpansion(key);
ciphertext = ciphertext.split('-'); // split ciphertext into array of block-length strings
// recover nonce from 1st element of ciphertext
var blockSize = 16; // block size fixed at 16 bytes / 128 bits (Nb=4) for AES
var counterBlock = new Array(blockSize);
var ctrTxt = unescCtrlChars(ciphertext[0]);
for (var i=0; i<8; i++) counterBlock[i] = ctrTxt.charCodeAt(i);
var plaintext = new Array(ciphertext.length-1);
for (var b=1; b<ciphertext.length; b++) {
// set counter (block #) in last 8 bytes of counter block (leaving nonce in 1st 8 bytes)
for (var c=0; c<4; c++) counterBlock[15-c] = ((b-1) >>> c*8) & 0xff;
for (var c=0; c<4; c++) counterBlock[15-c-4] = ((b/0x100000000-1) >>> c*8) & 0xff;
var cipherCntr = Cipher(counterBlock, keySchedule); // encrypt counter block
ciphertext[b] = unescCtrlChars(ciphertext[b]);
var pt = '';
for (var i=0; i<ciphertext[b].length; i++) {
// -- xor plaintext with ciphered counter byte-by-byte --
var ciphertextByte = ciphertext[b].charCodeAt(i);
var plaintextByte = ciphertextByte ^ cipherCntr[i];
pt += String.fromCharCode(plaintextByte);
}
// pt is now plaintext for this block
plaintext[b-1] = pt; // b-1 'cos no initial nonce block in plaintext
}
return plaintext.join('');
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
function escCtrlChars(str) { // escape control chars which might cause problems handling ciphertext
return str.replace(/[\0\t\n\v\f\r\xa0'"!-]/g, function(c) { return '!' + c.charCodeAt(0) + '!'; });
} // \xa0 to cater for bug in Firefox; include '-' to leave it free for use as a block marker
function unescCtrlChars(str) { // unescape potentially problematic control characters
return str.replace(/!\d\d?\d?!/g, function(c) { return String.fromCharCode(c.slice(1,-1)); });
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/*
* if escCtrlChars()/unescCtrlChars() still gives problems, use encodeBase64()/decodeBase64() instead
*/
var b64 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
function encodeBase64(str) { // http://tools.ietf.org/html/rfc4648
var o1, o2, o3, h1, h2, h3, h4, bits, i=0, enc='';
str = encodeUTF8(str); // encode multi-byte chars into UTF-8 for byte-array
do { // pack three octets into four hexets
o1 = str.charCodeAt(i++);
o2 = str.charCodeAt(i++);
o3 = str.charCodeAt(i++);
bits = o1<<16 | o2<<8 | o3;
h1 = bits>>18 & 0x3f;
h2 = bits>>12 & 0x3f;
h3 = bits>>6 & 0x3f;
h4 = bits & 0x3f;
// end of string? index to '=' in b64
if (isNaN(o3)) h4 = 64;
if (isNaN(o2)) h3 = 64;
// use hexets to index into b64, and append result to encoded string
enc += b64.charAt(h1) + b64.charAt(h2) + b64.charAt(h3) + b64.charAt(h4);
} while (i < str.length);
return enc;
}
function decodeBase64(str) {
var o1, o2, o3, h1, h2, h3, h4, bits, i=0, enc='';
do { // unpack four hexets into three octets using index points in b64
h1 = b64.indexOf(str.charAt(i++));
h2 = b64.indexOf(str.charAt(i++));
h3 = b64.indexOf(str.charAt(i++));
h4 = b64.indexOf(str.charAt(i++));
bits = h1<<18 | h2<<12 | h3<<6 | h4;
o1 = bits>>16 & 0xff;
o2 = bits>>8 & 0xff;
o3 = bits & 0xff;
if (h3 == 64) enc += String.fromCharCode(o1);
else if (h4 == 64) enc += String.fromCharCode(o1, o2);
else enc += String.fromCharCode(o1, o2, o3);
} while (i < str.length);
return decodeUTF8(enc); // decode UTF-8 byte-array back to Unicode
}
function encodeUTF8(str) { // encode multi-byte string into utf-8 multiple single-byte characters
str = str.replace(
/[\u0080-\u07ff]/g, // U+0080 - U+07FF = 2-byte chars
function(c) {
var cc = c.charCodeAt(0);
return String.fromCharCode(0xc0 | cc>>6, 0x80 | cc&0x3f); }
);
str = str.replace(
/[\u0800-\uffff]/g, // U+0800 - U+FFFF = 3-byte chars
function(c) {
var cc = c.charCodeAt(0);
return String.fromCharCode(0xe0 | cc>>12, 0x80 | cc>>6&0x3F, 0x80 | cc&0x3f); }
);
return str;
}
function decodeUTF8(str) { // decode utf-8 encoded string back into multi-byte characters
str = str.replace(
/[\u00c0-\u00df][\u0080-\u00bf]/g, // 2-byte chars
function(c) {
var cc = (c.charCodeAt(0)&0x1f)<<6 | c.charCodeAt(1)&0x3f;
return String.fromCharCode(cc); }
);
str = str.replace(
/[\u00e0-\u00ef][\u0080-\u00bf][\u0080-\u00bf]/g, // 3-byte chars
function(c) {
var cc = (c.charCodeAt(0)&0x0f)<<12 | (c.charCodeAt(1)&0x3f<<6) | c.charCodeAt(2)&0x3f;
return String.fromCharCode(cc); }
);
return str;
}
function byteArrayToHexStr(b) { // convert byte array to hex string for displaying test vectors
var s = '';
for (var i=0; i<b.length; i++) s += b[i].toString(16) + ' ';
return s;
}
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
var plainText = "ROMEO: But, soft! what light through yonder window breaks?\n\
It is the east, and Juliet is the sun.\n\
Arise, fair sun, and kill the envious moon,\n\
Who is already sick and pale with grief,\n\
That thou her maid art far more fair than she:\n\
Be not her maid, since she is envious;\n\
Her vestal livery is but sick and green\n\
And none but fools do wear it; cast it off.\n\
It is my lady, O, it is my love!\n\
O, that she knew she were!\n\
She speaks yet she says nothing: what of that?\n\
Her eye discourses; I will answer it.\n\
I am too bold, 'tis not to me she speaks:\n\
Two of the fairest stars in all the heaven,\n\
Having some business, do entreat her eyes\n\
To twinkle in their spheres till they return.\n\
What if her eyes were there, they in her head?\n\
The brightness of her cheek would shame those stars,\n\
As daylight doth a lamp; her eyes in heaven\n\
Would through the airy region stream so bright\n\
That birds would sing and think it were not night.\n\
See, how she leans her cheek upon her hand!\n\
O, that I were a glove upon that hand,\n\
That I might touch that cheek!\n\
JULIET: Ay me!\n\
ROMEO: She speaks:\n\
O, speak again, bright angel! for thou art\n\
As glorious to this night, being o'er my head\n\
As is a winged messenger of heaven\n\
Unto the white-upturned wondering eyes\n\
Of mortals that fall back to gaze on him\n\
When he bestrides the lazy-pacing clouds\n\
And sails upon the bosom of the air.";
var password = "O Romeo, Romeo! wherefore art thou Romeo?";
var cipherText = AESEncryptCtr(plainText, password, 256);
var decryptedText = AESDecryptCtr(cipherText, password, 256);
assertEq(plainText, decryptedText);

View file

@ -0,0 +1,287 @@
/*
* A JavaScript implementation of the RSA Data Security, Inc. MD5 Message
* Digest Algorithm, as defined in RFC 1321.
* Version 2.1 Copyright (C) Paul Johnston 1999 - 2002.
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
* Distributed under the BSD License
* See http://pajhome.org.uk/crypt/md5 for more info.
*/
/*
* Configurable variables. You may need to tweak these to be compatible with
* the server-side, but the defaults work in most cases.
*/
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
/*
* These are the functions you'll usually want to call
* They take string arguments and return either hex or base-64 encoded strings
*/
function hex_md5(s){ return binl2hex(core_md5(str2binl(s), s.length * chrsz));}
function b64_md5(s){ return binl2b64(core_md5(str2binl(s), s.length * chrsz));}
function str_md5(s){ return binl2str(core_md5(str2binl(s), s.length * chrsz));}
function hex_hmac_md5(key, data) { return binl2hex(core_hmac_md5(key, data)); }
function b64_hmac_md5(key, data) { return binl2b64(core_hmac_md5(key, data)); }
function str_hmac_md5(key, data) { return binl2str(core_hmac_md5(key, data)); }
/*
* Perform a simple self-test to see if the VM is working
*/
function md5_vm_test()
{
return hex_md5("abc") == "900150983cd24fb0d6963f7d28e17f72";
}
/*
* Calculate the MD5 of an array of little-endian words, and a bit length
*/
function core_md5(x, len)
{
/* append padding */
x[len >> 5] |= 0x80 << ((len) % 32);
x[(((len + 64) >>> 9) << 4) + 14] = len;
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
for(var i = 0; i < x.length; i += 16)
{
var olda = a;
var oldb = b;
var oldc = c;
var oldd = d;
a = md5_ff(a, b, c, d, x[i+ 0], 7 , -680876936);
d = md5_ff(d, a, b, c, x[i+ 1], 12, -389564586);
c = md5_ff(c, d, a, b, x[i+ 2], 17, 606105819);
b = md5_ff(b, c, d, a, x[i+ 3], 22, -1044525330);
a = md5_ff(a, b, c, d, x[i+ 4], 7 , -176418897);
d = md5_ff(d, a, b, c, x[i+ 5], 12, 1200080426);
c = md5_ff(c, d, a, b, x[i+ 6], 17, -1473231341);
b = md5_ff(b, c, d, a, x[i+ 7], 22, -45705983);
a = md5_ff(a, b, c, d, x[i+ 8], 7 , 1770035416);
d = md5_ff(d, a, b, c, x[i+ 9], 12, -1958414417);
c = md5_ff(c, d, a, b, x[i+10], 17, -42063);
b = md5_ff(b, c, d, a, x[i+11], 22, -1990404162);
a = md5_ff(a, b, c, d, x[i+12], 7 , 1804603682);
d = md5_ff(d, a, b, c, x[i+13], 12, -40341101);
c = md5_ff(c, d, a, b, x[i+14], 17, -1502002290);
b = md5_ff(b, c, d, a, x[i+15], 22, 1236535329);
a = md5_gg(a, b, c, d, x[i+ 1], 5 , -165796510);
d = md5_gg(d, a, b, c, x[i+ 6], 9 , -1069501632);
c = md5_gg(c, d, a, b, x[i+11], 14, 643717713);
b = md5_gg(b, c, d, a, x[i+ 0], 20, -373897302);
a = md5_gg(a, b, c, d, x[i+ 5], 5 , -701558691);
d = md5_gg(d, a, b, c, x[i+10], 9 , 38016083);
c = md5_gg(c, d, a, b, x[i+15], 14, -660478335);
b = md5_gg(b, c, d, a, x[i+ 4], 20, -405537848);
a = md5_gg(a, b, c, d, x[i+ 9], 5 , 568446438);
d = md5_gg(d, a, b, c, x[i+14], 9 , -1019803690);
c = md5_gg(c, d, a, b, x[i+ 3], 14, -187363961);
b = md5_gg(b, c, d, a, x[i+ 8], 20, 1163531501);
a = md5_gg(a, b, c, d, x[i+13], 5 , -1444681467);
d = md5_gg(d, a, b, c, x[i+ 2], 9 , -51403784);
c = md5_gg(c, d, a, b, x[i+ 7], 14, 1735328473);
b = md5_gg(b, c, d, a, x[i+12], 20, -1926607734);
a = md5_hh(a, b, c, d, x[i+ 5], 4 , -378558);
d = md5_hh(d, a, b, c, x[i+ 8], 11, -2022574463);
c = md5_hh(c, d, a, b, x[i+11], 16, 1839030562);
b = md5_hh(b, c, d, a, x[i+14], 23, -35309556);
a = md5_hh(a, b, c, d, x[i+ 1], 4 , -1530992060);
d = md5_hh(d, a, b, c, x[i+ 4], 11, 1272893353);
c = md5_hh(c, d, a, b, x[i+ 7], 16, -155497632);
b = md5_hh(b, c, d, a, x[i+10], 23, -1094730640);
a = md5_hh(a, b, c, d, x[i+13], 4 , 681279174);
d = md5_hh(d, a, b, c, x[i+ 0], 11, -358537222);
c = md5_hh(c, d, a, b, x[i+ 3], 16, -722521979);
b = md5_hh(b, c, d, a, x[i+ 6], 23, 76029189);
a = md5_hh(a, b, c, d, x[i+ 9], 4 , -640364487);
d = md5_hh(d, a, b, c, x[i+12], 11, -421815835);
c = md5_hh(c, d, a, b, x[i+15], 16, 530742520);
b = md5_hh(b, c, d, a, x[i+ 2], 23, -995338651);
a = md5_ii(a, b, c, d, x[i+ 0], 6 , -198630844);
d = md5_ii(d, a, b, c, x[i+ 7], 10, 1126891415);
c = md5_ii(c, d, a, b, x[i+14], 15, -1416354905);
b = md5_ii(b, c, d, a, x[i+ 5], 21, -57434055);
a = md5_ii(a, b, c, d, x[i+12], 6 , 1700485571);
d = md5_ii(d, a, b, c, x[i+ 3], 10, -1894986606);
c = md5_ii(c, d, a, b, x[i+10], 15, -1051523);
b = md5_ii(b, c, d, a, x[i+ 1], 21, -2054922799);
a = md5_ii(a, b, c, d, x[i+ 8], 6 , 1873313359);
d = md5_ii(d, a, b, c, x[i+15], 10, -30611744);
c = md5_ii(c, d, a, b, x[i+ 6], 15, -1560198380);
b = md5_ii(b, c, d, a, x[i+13], 21, 1309151649);
a = md5_ii(a, b, c, d, x[i+ 4], 6 , -145523070);
d = md5_ii(d, a, b, c, x[i+11], 10, -1120210379);
c = md5_ii(c, d, a, b, x[i+ 2], 15, 718787259);
b = md5_ii(b, c, d, a, x[i+ 9], 21, -343485551);
a = safe_add(a, olda);
b = safe_add(b, oldb);
c = safe_add(c, oldc);
d = safe_add(d, oldd);
}
return Array(a, b, c, d);
}
/*
* These functions implement the four basic operations the algorithm uses.
*/
function md5_cmn(q, a, b, x, s, t)
{
return safe_add(bit_rol(safe_add(safe_add(a, q), safe_add(x, t)), s),b);
}
function md5_ff(a, b, c, d, x, s, t)
{
return md5_cmn((b & c) | ((~b) & d), a, b, x, s, t);
}
function md5_gg(a, b, c, d, x, s, t)
{
return md5_cmn((b & d) | (c & (~d)), a, b, x, s, t);
}
function md5_hh(a, b, c, d, x, s, t)
{
return md5_cmn(b ^ c ^ d, a, b, x, s, t);
}
function md5_ii(a, b, c, d, x, s, t)
{
return md5_cmn(c ^ (b | (~d)), a, b, x, s, t);
}
/*
* Calculate the HMAC-MD5, of a key and some data
*/
function core_hmac_md5(key, data)
{
var bkey = str2binl(key);
if(bkey.length > 16) bkey = core_md5(bkey, key.length * chrsz);
var ipad = Array(16), opad = Array(16);
for(var i = 0; i < 16; i++)
{
ipad[i] = bkey[i] ^ 0x36363636;
opad[i] = bkey[i] ^ 0x5C5C5C5C;
}
var hash = core_md5(ipad.concat(str2binl(data)), 512 + data.length * chrsz);
return core_md5(opad.concat(hash), 512 + 128);
}
/*
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
* to work around bugs in some JS interpreters.
*/
function safe_add(x, y)
{
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
}
/*
* Bitwise rotate a 32-bit number to the left.
*/
function bit_rol(num, cnt)
{
return (num << cnt) | (num >>> (32 - cnt));
}
/*
* Convert a string to an array of little-endian words
* If chrsz is ASCII, characters >255 have their hi-byte silently ignored.
*/
function str2binl(str)
{
var bin = Array();
var mask = (1 << chrsz) - 1;
for(var i = 0; i < str.length * chrsz; i += chrsz)
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (i%32);
return bin;
}
/*
* Convert an array of little-endian words to a string
*/
function binl2str(bin)
{
var str = "";
var mask = (1 << chrsz) - 1;
for(var i = 0; i < bin.length * 32; i += chrsz)
str += String.fromCharCode((bin[i>>5] >>> (i % 32)) & mask);
return str;
}
/*
* Convert an array of little-endian words to a hex string.
*/
function binl2hex(binarray)
{
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var str = "";
for(var i = 0; i < binarray.length * 4; i++)
{
str += hex_tab.charAt((binarray[i>>2] >> ((i%4)*8+4)) & 0xF) +
hex_tab.charAt((binarray[i>>2] >> ((i%4)*8 )) & 0xF);
}
return str;
}
/*
* Convert an array of little-endian words to a base-64 string
*/
function binl2b64(binarray)
{
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var str = "";
for(var i = 0; i < binarray.length * 4; i += 3)
{
var triplet = (((binarray[i >> 2] >> 8 * ( i %4)) & 0xFF) << 16)
| (((binarray[i+1 >> 2] >> 8 * ((i+1)%4)) & 0xFF) << 8 )
| ((binarray[i+2 >> 2] >> 8 * ((i+2)%4)) & 0xFF);
for(var j = 0; j < 4; j++)
{
if(i * 8 + j * 6 > binarray.length * 32) str += b64pad;
else str += tab.charAt((triplet >> 6*(3-j)) & 0x3F);
}
}
return str;
}
var plainText = "Rebellious subjects, enemies to peace,\n\
Profaners of this neighbour-stained steel,--\n\
Will they not hear? What, ho! you men, you beasts,\n\
That quench the fire of your pernicious rage\n\
With purple fountains issuing from your veins,\n\
On pain of torture, from those bloody hands\n\
Throw your mistemper'd weapons to the ground,\n\
And hear the sentence of your moved prince.\n\
Three civil brawls, bred of an airy word,\n\
By thee, old Capulet, and Montague,\n\
Have thrice disturb'd the quiet of our streets,\n\
And made Verona's ancient citizens\n\
Cast by their grave beseeming ornaments,\n\
To wield old partisans, in hands as old,\n\
Canker'd with peace, to part your canker'd hate:\n\
If ever you disturb our streets again,\n\
Your lives shall pay the forfeit of the peace.\n\
For this time, all the rest depart away:\n\
You Capulet; shall go along with me:\n\
And, Montague, come you this afternoon,\n\
To know our further pleasure in this case,\n\
To old Free-town, our common judgment-place.\n\
Once more, on pain of death, all men depart."
for (var i = 0; i <4; i++) {
plainText += plainText;
}
var md5Output = hex_md5(plainText);
assertEq(md5Output, "a831e91e0f70eddcb70dc61c6f82f6cd")

View file

@ -0,0 +1,225 @@
/*
* A JavaScript implementation of the Secure Hash Algorithm, SHA-1, as defined
* in FIPS PUB 180-1
* Version 2.1a Copyright Paul Johnston 2000 - 2002.
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
* Distributed under the BSD License
* See http://pajhome.org.uk/crypt/md5 for details.
*/
/*
* Configurable variables. You may need to tweak these to be compatible with
* the server-side, but the defaults work in most cases.
*/
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
var b64pad = ""; /* base-64 pad character. "=" for strict RFC compliance */
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
/*
* These are the functions you'll usually want to call
* They take string arguments and return either hex or base-64 encoded strings
*/
function hex_sha1(s){return binb2hex(core_sha1(str2binb(s),s.length * chrsz));}
function b64_sha1(s){return binb2b64(core_sha1(str2binb(s),s.length * chrsz));}
function str_sha1(s){return binb2str(core_sha1(str2binb(s),s.length * chrsz));}
function hex_hmac_sha1(key, data){ return binb2hex(core_hmac_sha1(key, data));}
function b64_hmac_sha1(key, data){ return binb2b64(core_hmac_sha1(key, data));}
function str_hmac_sha1(key, data){ return binb2str(core_hmac_sha1(key, data));}
/*
* Perform a simple self-test to see if the VM is working
*/
function sha1_vm_test()
{
return hex_sha1("abc") == "a9993e364706816aba3e25717850c26c9cd0d89d";
}
/*
* Calculate the SHA-1 of an array of big-endian words, and a bit length
*/
function core_sha1(x, len)
{
/* append padding */
x[len >> 5] |= 0x80 << (24 - len % 32);
x[((len + 64 >> 9) << 4) + 15] = len;
var w = Array(80);
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
var e = -1009589776;
for(var i = 0; i < x.length; i += 16)
{
var olda = a;
var oldb = b;
var oldc = c;
var oldd = d;
var olde = e;
for(var j = 0; j < 80; j++)
{
if(j < 16) w[j] = x[i + j];
else w[j] = rol(w[j-3] ^ w[j-8] ^ w[j-14] ^ w[j-16], 1);
var t = safe_add(safe_add(rol(a, 5), sha1_ft(j, b, c, d)),
safe_add(safe_add(e, w[j]), sha1_kt(j)));
e = d;
d = c;
c = rol(b, 30);
b = a;
a = t;
}
a = safe_add(a, olda);
b = safe_add(b, oldb);
c = safe_add(c, oldc);
d = safe_add(d, oldd);
e = safe_add(e, olde);
}
return Array(a, b, c, d, e);
}
/*
* Perform the appropriate triplet combination function for the current
* iteration
*/
function sha1_ft(t, b, c, d)
{
if(t < 20) return (b & c) | ((~b) & d);
if(t < 40) return b ^ c ^ d;
if(t < 60) return (b & c) | (b & d) | (c & d);
return b ^ c ^ d;
}
/*
* Determine the appropriate additive constant for the current iteration
*/
function sha1_kt(t)
{
return (t < 20) ? 1518500249 : (t < 40) ? 1859775393 :
(t < 60) ? -1894007588 : -899497514;
}
/*
* Calculate the HMAC-SHA1 of a key and some data
*/
function core_hmac_sha1(key, data)
{
var bkey = str2binb(key);
if(bkey.length > 16) bkey = core_sha1(bkey, key.length * chrsz);
var ipad = Array(16), opad = Array(16);
for(var i = 0; i < 16; i++)
{
ipad[i] = bkey[i] ^ 0x36363636;
opad[i] = bkey[i] ^ 0x5C5C5C5C;
}
var hash = core_sha1(ipad.concat(str2binb(data)), 512 + data.length * chrsz);
return core_sha1(opad.concat(hash), 512 + 160);
}
/*
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
* to work around bugs in some JS interpreters.
*/
function safe_add(x, y)
{
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
}
/*
* Bitwise rotate a 32-bit number to the left.
*/
function rol(num, cnt)
{
return (num << cnt) | (num >>> (32 - cnt));
}
/*
* Convert an 8-bit or 16-bit string to an array of big-endian words
* In 8-bit function, characters >255 have their hi-byte silently ignored.
*/
function str2binb(str)
{
var bin = Array();
var mask = (1 << chrsz) - 1;
for(var i = 0; i < str.length * chrsz; i += chrsz)
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (32 - chrsz - i%32);
return bin;
}
/*
* Convert an array of big-endian words to a string
*/
function binb2str(bin)
{
var str = "";
var mask = (1 << chrsz) - 1;
for(var i = 0; i < bin.length * 32; i += chrsz)
str += String.fromCharCode((bin[i>>5] >>> (32 - chrsz - i%32)) & mask);
return str;
}
/*
* Convert an array of big-endian words to a hex string.
*/
function binb2hex(binarray)
{
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var str = "";
for(var i = 0; i < binarray.length * 4; i++)
{
str += hex_tab.charAt((binarray[i>>2] >> ((3 - i%4)*8+4)) & 0xF) +
hex_tab.charAt((binarray[i>>2] >> ((3 - i%4)*8 )) & 0xF);
}
return str;
}
/*
* Convert an array of big-endian words to a base-64 string
*/
function binb2b64(binarray)
{
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var str = "";
for(var i = 0; i < binarray.length * 4; i += 3)
{
var triplet = (((binarray[i >> 2] >> 8 * (3 - i %4)) & 0xFF) << 16)
| (((binarray[i+1 >> 2] >> 8 * (3 - (i+1)%4)) & 0xFF) << 8 )
| ((binarray[i+2 >> 2] >> 8 * (3 - (i+2)%4)) & 0xFF);
for(var j = 0; j < 4; j++)
{
if(i * 8 + j * 6 > binarray.length * 32) str += b64pad;
else str += tab.charAt((triplet >> 6*(3-j)) & 0x3F);
}
}
return str;
}
var plainText = "Two households, both alike in dignity,\n\
In fair Verona, where we lay our scene,\n\
From ancient grudge break to new mutiny,\n\
Where civil blood makes civil hands unclean.\n\
From forth the fatal loins of these two foes\n\
A pair of star-cross'd lovers take their life;\n\
Whole misadventured piteous overthrows\n\
Do with their death bury their parents' strife.\n\
The fearful passage of their death-mark'd love,\n\
And the continuance of their parents' rage,\n\
Which, but their children's end, nought could remove,\n\
Is now the two hours' traffic of our stage;\n\
The which if you with patient ears attend,\n\
What here shall miss, our toil shall strive to mend.";
for (var i = 0; i <4; i++) {
plainText += plainText;
}
var sha1Output = hex_sha1(plainText);
assertEq(sha1Output, "2524d264def74cce2498bf112bedf00e6c0b796d")

View file

@ -0,0 +1,100 @@
/*
* Copyright (C) Rich Moore. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY CONTRIBUTORS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/////. Start CORDIC
var AG_CONST = 0.6072529350;
function FIXED(X)
{
return X * 65536.0;
}
function FLOAT(X)
{
return X / 65536.0;
}
function DEG2RAD(X)
{
return 0.017453 * (X);
}
var Angles = [
FIXED(45.0), FIXED(26.565), FIXED(14.0362), FIXED(7.12502),
FIXED(3.57633), FIXED(1.78991), FIXED(0.895174), FIXED(0.447614),
FIXED(0.223811), FIXED(0.111906), FIXED(0.055953),
FIXED(0.027977)
];
function cordicsincos() {
var X;
var Y;
var TargetAngle;
var CurrAngle;
var Step;
X = FIXED(AG_CONST); /* AG_CONST * cos(0) */
Y = 0; /* AG_CONST * sin(0) */
TargetAngle = FIXED(28.027);
CurrAngle = 0;
for (Step = 0; Step < 12; Step++) {
var NewX;
if (TargetAngle > CurrAngle) {
NewX = X - (Y >> Step);
Y = (X >> Step) + Y;
X = NewX;
CurrAngle += Angles[Step];
} else {
NewX = X + (Y >> Step);
Y = -(X >> Step) + Y;
X = NewX;
CurrAngle -= Angles[Step];
}
}
return CurrAngle;
}
///// End CORDIC
function cordic( runs ) {
var actual;
var start = new Date();
for ( var i = 0 ; i < runs ; i++ ) {
actual = cordicsincos();
}
var end = new Date();
assertEq(actual, 1834995.3515519998)
return end.getTime() - start.getTime();
}
cordic(25000);

View file

@ -0,0 +1,41 @@
// The Computer Language Shootout
// http://shootout.alioth.debian.org/
// contributed by Isaac Gouy
function partial(n){
var a1 = a2 = a3 = a4 = a5 = a6 = a7 = a8 = a9 = 0.0;
var twothirds = 2.0/3.0;
var alt = -1.0;
var k2 = k3 = sk = ck = 0.0;
for (var k = 1; k <= n; k++){
k2 = k*k;
k3 = k2*k;
sk = Math.sin(k);
ck = Math.cos(k);
alt = -alt;
a1 += Math.pow(twothirds,k-1);
a2 += Math.pow(k,-0.5);
a3 += 1.0/(k*(k+1.0));
a4 += 1.0/(k3 * sk*sk);
a5 += 1.0/(k3 * ck*ck);
a6 += 1.0/k;
a7 += 1.0/k2;
a8 += alt/k;
a9 += alt/(2*k -1);
}
return [ a1, a2, a3, a4, a5, a6, a7, a8, a9 ];
}
var actual = [];
for (var i = 1024; i <= 16384; i *= 2)
Array.prototype.push.apply(actual, partial(i));
var eps = 1e-12;
var expect = [2.9999999999999987,62.555269219624684,0.9990243902439033,30.174793391263677,42.99468748637077,7.509175672278132,1.6439579810301654,0.6926591377284127,0.785154022830656,2.9999999999999987,89.06036157695789,0.9995119570522216,30.30796333494624,42.99485339033617,8.202078771817716,1.6444459047881168,0.6929030995395857,0.7852760930922243,2.9999999999999987,126.54745783224483,0.999755918965097,30.314167756318135,42.994888939123,8.89510389696629,1.6446899560231332,0.6930251251486118,0.7853371282421086,2.9999999999999987,179.56450569047874,0.9998779445868421,30.314499725429847,42.99489723774016,9.588190046095265,1.644812003986005,0.693086149128997,0.785367645819433,2.9999999999999987,254.54355172132264,0.9999389685688135,30.31451920492601,42.99489939769195,10.281306710008463,1.6448730335545856,0.6931166639131536,0.7853829046083998];
assertEq(actual.length, expect.length);
for (var i = 0; i < expect.length; ++i)
assertEq(Math.abs(actual[i] - expect[i]) < eps, true);

View file

@ -0,0 +1,53 @@
// The Great Computer Language Shootout
// http://shootout.alioth.debian.org/
//
// contributed by Ian Osgood
function A(i,j) {
return 1/((i+j)*(i+j+1)/2+i+1);
}
function Au(u,v) {
for (var i=0; i<u.length; ++i) {
var t = 0;
for (var j=0; j<u.length; ++j)
t += A(i,j) * u[j];
v[i] = t;
}
}
function Atu(u,v) {
for (var i=0; i<u.length; ++i) {
var t = 0;
for (var j=0; j<u.length; ++j)
t += A(j,i) * u[j];
v[i] = t;
}
}
function AtAu(u,v,w) {
Au(u,w);
Atu(w,v);
}
function spectralnorm(n) {
var i, u=[], v=[], w=[], vv=0, vBv=0;
for (i=0; i<n; ++i) {
u[i] = 1; v[i] = w[i] = 0;
}
for (i=0; i<10; ++i) {
AtAu(u,v,w);
AtAu(v,u,w);
}
for (i=0; i<n; ++i) {
vBv += u[i]*v[i];
vv += v[i]*v[i];
}
return Math.sqrt(vBv/vv);
}
var actual = '';
for (var i = 6; i <= 48; i *= 2) {
actual += spectralnorm(i) + ',';
}
assertEq(actual, "1.2657786149754053,1.2727355112619148,1.273989979775574,1.274190125290389,");

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,90 @@
// The Great Computer Language Shootout
// http://shootout.alioth.debian.org
//
// Contributed by Ian Osgood
var last = 42, A = 3877, C = 29573, M = 139968;
function rand(max) {
last = (last * A + C) % M;
return max * last / M;
}
var ALU =
"GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGG" +
"GAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGAGA" +
"CCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAAAAT" +
"ACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAATCCCA" +
"GCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAACCCGGG" +
"AGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTGCACTCC" +
"AGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
var IUB = {
a:0.27, c:0.12, g:0.12, t:0.27,
B:0.02, D:0.02, H:0.02, K:0.02,
M:0.02, N:0.02, R:0.02, S:0.02,
V:0.02, W:0.02, Y:0.02
}
var HomoSap = {
a: 0.3029549426680,
c: 0.1979883004921,
g: 0.1975473066391,
t: 0.3015094502008
}
function makeCumulative(table) {
var last = null;
for (var c in table) {
if (last) table[c] += table[last];
last = c;
}
}
function fastaRepeat(n, seq) {
var seqi = 0, lenOut = 60;
while (n>0) {
if (n<lenOut) lenOut = n;
if (seqi + lenOut < seq.length) {
ret = seq.substring(seqi, seqi+lenOut);
seqi += lenOut;
} else {
var s = seq.substring(seqi);
seqi = lenOut - s.length;
ret = s + seq.substring(0, seqi);
}
n -= lenOut;
}
return ret;
}
function fastaRandom(n, table) {
var line = new Array(60);
makeCumulative(table);
while (n>0) {
if (n<line.length) line = new Array(n);
for (var i=0; i<line.length; i++) {
var r = rand(1);
for (var c in table) {
if (r < table[c]) {
line[i] = c;
break;
}
}
}
ret = line.join('');
n -= line.length;
}
return ret;
}
var ret;
var count = 7;
var actual1 = fastaRepeat(2*count*100000, ALU);
var actual2 = fastaRandom(3*count*1000, IUB);
var actual3 = fastaRandom(5*count*1000, HomoSap);
assertEq(actual1, "CAAAAAGGCCGGGCGCGGTG");
assertEq(actual2, "VtttaDtKgcaaWaaaaatSccMcVatgtKgtaKgcgatatgtagtSaaaDttatacaaa");
assertEq(actual3, "ttggctatatttatgttgga");

File diff suppressed because one or more lines are too long

Some files were not shown because too many files have changed in this diff Show more