diff options
author | Feng Qian <> | 2009-04-10 18:11:29 -0700 |
---|---|---|
committer | The Android Open Source Project <initial-contribution@android.com> | 2009-04-10 18:11:29 -0700 |
commit | 8f72e70a9fd78eec56623b3a62e68f16b7b27e28 (patch) | |
tree | 181bf9a400c30a1bf34ea6d72560e8d00111d549 /JavaScriptCore | |
parent | 7ed56f225e0ade046e1c2178977f72b2d896f196 (diff) | |
download | external_webkit-8f72e70a9fd78eec56623b3a62e68f16b7b27e28.zip external_webkit-8f72e70a9fd78eec56623b3a62e68f16b7b27e28.tar.gz external_webkit-8f72e70a9fd78eec56623b3a62e68f16b7b27e28.tar.bz2 |
AI 145796: Land the WebKit merge @r42026.
Automated import of CL 145796
Diffstat (limited to 'JavaScriptCore')
134 files changed, 11144 insertions, 4523 deletions
diff --git a/JavaScriptCore/API/JSBase.cpp b/JavaScriptCore/API/JSBase.cpp index 2ffe345..422b296 100644 --- a/JavaScriptCore/API/JSBase.cpp +++ b/JavaScriptCore/API/JSBase.cpp @@ -58,7 +58,7 @@ JSValueRef JSEvaluateScript(JSContextRef ctx, JSStringRef script, JSObjectRef th *exception = toRef(completion.value()); return 0; } - + if (completion.value()) return toRef(completion.value()); diff --git a/JavaScriptCore/API/JSBasePrivate.h b/JavaScriptCore/API/JSBasePrivate.h index 6beacda..befa316 100644 --- a/JavaScriptCore/API/JSBasePrivate.h +++ b/JavaScriptCore/API/JSBasePrivate.h @@ -43,7 +43,7 @@ owns a large non-GC memory region. Calling this function will encourage the garbage collector to collect soon, hoping to reclaim that large non-GC memory region. */ -JS_EXPORT void JSReportExtraMemoryCost(JSContextRef ctx, size_t size) AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT void JSReportExtraMemoryCost(JSContextRef ctx, size_t size) AVAILABLE_IN_WEBKIT_VERSION_4_0; #ifdef __cplusplus } diff --git a/JavaScriptCore/API/JSCallbackObjectFunctions.h b/JavaScriptCore/API/JSCallbackObjectFunctions.h index 23f941d..d6ae9bd 100644 --- a/JavaScriptCore/API/JSCallbackObjectFunctions.h +++ b/JavaScriptCore/API/JSCallbackObjectFunctions.h @@ -99,7 +99,7 @@ template <class Base> UString JSCallbackObject<Base>::className() const { UString thisClassName = classRef()->className(); - if (!thisClassName.isNull()) + if (!thisClassName.isEmpty()) return thisClassName; return Base::className(); @@ -126,10 +126,17 @@ bool JSCallbackObject<Base>::getOwnPropertySlot(ExecState* exec, const Identifie if (!propertyNameRef) propertyNameRef = OpaqueJSString::create(propertyName.ustring()); JSLock::DropAllLocks dropAllLocks(exec); - if (JSValueRef value = getProperty(ctx, thisRef, propertyNameRef.get(), toRef(exec->exceptionSlot()))) { + JSValueRef exception = 0; + JSValueRef value = getProperty(ctx, thisRef, propertyNameRef.get(), &exception); + exec->setException(toJS(exception)); + if (value) { slot.setValue(toJS(value)); return true; } + if (exception) { + slot.setValue(jsUndefined()); + return true; + } } if (OpaqueJSClassStaticValuesTable* staticValues = jsClass->staticValues(exec)) { @@ -169,7 +176,10 @@ void JSCallbackObject<Base>::put(ExecState* exec, const Identifier& propertyName if (!propertyNameRef) propertyNameRef = OpaqueJSString::create(propertyName.ustring()); JSLock::DropAllLocks dropAllLocks(exec); - if (setProperty(ctx, thisRef, propertyNameRef.get(), valueRef, toRef(exec->exceptionSlot()))) + JSValueRef exception = 0; + bool result = setProperty(ctx, thisRef, propertyNameRef.get(), valueRef, &exception); + exec->setException(toJS(exception)); + if (result || exception) return; } @@ -181,7 +191,10 @@ void JSCallbackObject<Base>::put(ExecState* exec, const Identifier& propertyName if (!propertyNameRef) propertyNameRef = OpaqueJSString::create(propertyName.ustring()); JSLock::DropAllLocks dropAllLocks(exec); - if (setProperty(ctx, thisRef, propertyNameRef.get(), valueRef, toRef(exec->exceptionSlot()))) + JSValueRef exception = 0; + bool result = setProperty(ctx, thisRef, propertyNameRef.get(), valueRef, &exception); + exec->setException(toJS(exception)); + if (result || exception) return; } else throwError(exec, ReferenceError, "Attempt to set a property that is not settable."); @@ -213,7 +226,10 @@ bool JSCallbackObject<Base>::deleteProperty(ExecState* exec, const Identifier& p if (!propertyNameRef) propertyNameRef = OpaqueJSString::create(propertyName.ustring()); JSLock::DropAllLocks dropAllLocks(exec); - if (deleteProperty(ctx, thisRef, propertyNameRef.get(), toRef(exec->exceptionSlot()))) + JSValueRef exception = 0; + bool result = deleteProperty(ctx, thisRef, propertyNameRef.get(), &exception); + exec->setException(toJS(exception)); + if (result || exception) return true; } @@ -268,7 +284,10 @@ JSObject* JSCallbackObject<Base>::construct(ExecState* exec, JSObject* construct for (int i = 0; i < argumentCount; i++) arguments[i] = toRef(args.at(exec, i)); JSLock::DropAllLocks dropAllLocks(exec); - return toJS(callAsConstructor(execRef, constructorRef, argumentCount, arguments.data(), toRef(exec->exceptionSlot()))); + JSValueRef exception = 0; + JSObject* result = toJS(callAsConstructor(execRef, constructorRef, argumentCount, arguments.data(), &exception)); + exec->setException(toJS(exception)); + return result; } } @@ -285,7 +304,10 @@ bool JSCallbackObject<Base>::hasInstance(ExecState* exec, JSValuePtr value, JSVa for (JSClassRef jsClass = classRef(); jsClass; jsClass = jsClass->parentClass) { if (JSObjectHasInstanceCallback hasInstance = jsClass->hasInstance) { JSLock::DropAllLocks dropAllLocks(exec); - return hasInstance(execRef, thisRef, toRef(value), toRef(exec->exceptionSlot())); + JSValueRef exception = 0; + bool result = hasInstance(execRef, thisRef, toRef(value), &exception); + exec->setException(toJS(exception)); + return result; } } return false; @@ -317,7 +339,10 @@ JSValuePtr JSCallbackObject<Base>::call(ExecState* exec, JSObject* functionObjec for (int i = 0; i < argumentCount; i++) arguments[i] = toRef(args.at(exec, i)); JSLock::DropAllLocks dropAllLocks(exec); - return toJS(callAsFunction(execRef, functionRef, thisObjRef, argumentCount, arguments.data(), toRef(exec->exceptionSlot()))); + JSValueRef exception = 0; + JSValuePtr result = toJS(callAsFunction(execRef, functionRef, thisObjRef, argumentCount, arguments.data(), &exception)); + exec->setException(toJS(exception)); + return result; } } @@ -377,7 +402,11 @@ double JSCallbackObject<Base>::toNumber(ExecState* exec) const for (JSClassRef jsClass = classRef(); jsClass; jsClass = jsClass->parentClass) if (JSObjectConvertToTypeCallback convertToType = jsClass->convertToType) { JSLock::DropAllLocks dropAllLocks(exec); - if (JSValueRef value = convertToType(ctx, thisRef, kJSTypeNumber, toRef(exec->exceptionSlot()))) { + + JSValueRef exception = 0; + JSValueRef value = convertToType(ctx, thisRef, kJSTypeNumber, &exception); + exec->setException(toJS(exception)); + if (value) { double dValue; return toJS(value).getNumber(dValue) ? dValue : NaN; } @@ -395,12 +424,16 @@ UString JSCallbackObject<Base>::toString(ExecState* exec) const for (JSClassRef jsClass = classRef(); jsClass; jsClass = jsClass->parentClass) if (JSObjectConvertToTypeCallback convertToType = jsClass->convertToType) { JSValueRef value; + JSValueRef exception = 0; { JSLock::DropAllLocks dropAllLocks(exec); - value = convertToType(ctx, thisRef, kJSTypeString, toRef(exec->exceptionSlot())); + value = convertToType(ctx, thisRef, kJSTypeString, &exception); + exec->setException(toJS(exception)); } if (value) return toJS(value).getString(); + if (exception) + return ""; } return Base::toString(exec); @@ -443,8 +476,13 @@ JSValuePtr JSCallbackObject<Base>::staticValueGetter(ExecState* exec, const Iden if (!propertyNameRef) propertyNameRef = OpaqueJSString::create(propertyName.ustring()); JSLock::DropAllLocks dropAllLocks(exec); - if (JSValueRef value = getProperty(toRef(exec), thisRef, propertyNameRef.get(), toRef(exec->exceptionSlot()))) + JSValueRef exception = 0; + JSValueRef value = getProperty(toRef(exec), thisRef, propertyNameRef.get(), &exception); + exec->setException(toJS(exception)); + if (value) return toJS(value); + if (exception) + return jsUndefined(); } return throwError(exec, ReferenceError, "Static value property defined with NULL getProperty callback."); @@ -488,8 +526,14 @@ JSValuePtr JSCallbackObject<Base>::callbackGetter(ExecState* exec, const Identif if (!propertyNameRef) propertyNameRef = OpaqueJSString::create(propertyName.ustring()); JSLock::DropAllLocks dropAllLocks(exec); - if (JSValueRef value = getProperty(toRef(exec), thisRef, propertyNameRef.get(), toRef(exec->exceptionSlot()))) + + JSValueRef exception = 0; + JSValueRef value = getProperty(toRef(exec), thisRef, propertyNameRef.get(), &exception); + exec->setException(toJS(exception)); + if (value) return toJS(value); + if (exception) + return jsUndefined(); } return throwError(exec, ReferenceError, "hasProperty callback returned true for a property that doesn't exist."); diff --git a/JavaScriptCore/API/JSContextRef.h b/JavaScriptCore/API/JSContextRef.h index bc89511..c5c8a71 100644 --- a/JavaScriptCore/API/JSContextRef.h +++ b/JavaScriptCore/API/JSContextRef.h @@ -48,7 +48,7 @@ extern "C" { synchronization is required. @result The created JSContextGroup. */ -JS_EXPORT JSContextGroupRef JSContextGroupCreate() AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT JSContextGroupRef JSContextGroupCreate() AVAILABLE_IN_WEBKIT_VERSION_4_0; /*! @function @@ -56,14 +56,14 @@ JS_EXPORT JSContextGroupRef JSContextGroupCreate() AVAILABLE_AFTER_WEBKIT_VERSIO @param group The JSContextGroup to retain. @result A JSContextGroup that is the same as group. */ -JS_EXPORT JSContextGroupRef JSContextGroupRetain(JSContextGroupRef group) AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT JSContextGroupRef JSContextGroupRetain(JSContextGroupRef group) AVAILABLE_IN_WEBKIT_VERSION_4_0; /*! @function @abstract Releases a JavaScript context group. @param group The JSContextGroup to release. */ -JS_EXPORT void JSContextGroupRelease(JSContextGroupRef group) AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT void JSContextGroupRelease(JSContextGroupRef group) AVAILABLE_IN_WEBKIT_VERSION_4_0; /*! @function @@ -92,7 +92,7 @@ JS_EXPORT JSGlobalContextRef JSGlobalContextCreate(JSClassRef globalObjectClass) @result A JSGlobalContext with a global object of class globalObjectClass and a context group equal to group. */ -JS_EXPORT JSGlobalContextRef JSGlobalContextCreateInGroup(JSContextGroupRef group, JSClassRef globalObjectClass) AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT JSGlobalContextRef JSGlobalContextCreateInGroup(JSContextGroupRef group, JSClassRef globalObjectClass) AVAILABLE_IN_WEBKIT_VERSION_4_0; /*! @function @@ -123,7 +123,7 @@ JS_EXPORT JSObjectRef JSContextGetGlobalObject(JSContextRef ctx); @param ctx The JSContext whose group you want to get. @result ctx's group. */ -JS_EXPORT JSContextGroupRef JSContextGetGroup(JSContextRef ctx) AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT JSContextGroupRef JSContextGetGroup(JSContextRef ctx) AVAILABLE_IN_WEBKIT_VERSION_4_0; #ifdef __cplusplus } diff --git a/JavaScriptCore/API/JSObjectRef.cpp b/JavaScriptCore/API/JSObjectRef.cpp index 67bb2a5..e81e512 100644 --- a/JavaScriptCore/API/JSObjectRef.cpp +++ b/JavaScriptCore/API/JSObjectRef.cpp @@ -467,7 +467,7 @@ JSPropertyNameArrayRef JSObjectCopyPropertyNames(JSContextRef ctx, JSObjectRef o jsObject->getPropertyNames(exec, array); size_t size = array.size(); - propertyNames->array.reserveCapacity(size); + propertyNames->array.reserveInitialCapacity(size); for (size_t i = 0; i < size; ++i) propertyNames->array.append(JSRetainPtr<JSStringRef>(Adopt, OpaqueJSString::create(array[i].ustring()).releaseRef())); diff --git a/JavaScriptCore/API/JSObjectRef.h b/JavaScriptCore/API/JSObjectRef.h index 461764c..3e8b0eb 100644 --- a/JavaScriptCore/API/JSObjectRef.h +++ b/JavaScriptCore/API/JSObjectRef.h @@ -441,7 +441,7 @@ JS_EXPORT JSObjectRef JSObjectMakeConstructor(JSContextRef ctx, JSClassRef jsCla @discussion The behavior of this function does not exactly match the behavior of the built-in Array constructor. Specifically, if one argument is supplied, this function returns an array with one element. */ -JS_EXPORT JSObjectRef JSObjectMakeArray(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT JSObjectRef JSObjectMakeArray(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) AVAILABLE_IN_WEBKIT_VERSION_4_0; /*! @function @@ -452,7 +452,7 @@ JS_EXPORT JSObjectRef JSObjectMakeArray(JSContextRef ctx, size_t argumentCount, @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. @result A JSObject that is a Date. */ -JS_EXPORT JSObjectRef JSObjectMakeDate(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT JSObjectRef JSObjectMakeDate(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) AVAILABLE_IN_WEBKIT_VERSION_4_0; /*! @function @@ -463,7 +463,7 @@ JS_EXPORT JSObjectRef JSObjectMakeDate(JSContextRef ctx, size_t argumentCount, c @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. @result A JSObject that is a Error. */ -JS_EXPORT JSObjectRef JSObjectMakeError(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT JSObjectRef JSObjectMakeError(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) AVAILABLE_IN_WEBKIT_VERSION_4_0; /*! @function @@ -474,7 +474,7 @@ JS_EXPORT JSObjectRef JSObjectMakeError(JSContextRef ctx, size_t argumentCount, @param exception A pointer to a JSValueRef in which to store an exception, if any. Pass NULL if you do not care to store an exception. @result A JSObject that is a RegExp. */ -JS_EXPORT JSObjectRef JSObjectMakeRegExp(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) AVAILABLE_AFTER_WEBKIT_VERSION_3_1; +JS_EXPORT JSObjectRef JSObjectMakeRegExp(JSContextRef ctx, size_t argumentCount, const JSValueRef arguments[], JSValueRef* exception) AVAILABLE_IN_WEBKIT_VERSION_4_0; /*! @function diff --git a/JavaScriptCore/API/WebKitAvailability.h b/JavaScriptCore/API/WebKitAvailability.h index 1273360..8402528 100644 --- a/JavaScriptCore/API/WebKitAvailability.h +++ b/JavaScriptCore/API/WebKitAvailability.h @@ -38,6 +38,7 @@ #define WEBKIT_VERSION_2_0 0x0200 #define WEBKIT_VERSION_3_0 0x0300 #define WEBKIT_VERSION_3_1 0x0310 +#define WEBKIT_VERSION_4_0 0x0400 #define WEBKIT_VERSION_LATEST 0x9999 #ifdef __APPLE__ @@ -640,123 +641,123 @@ /* - * AVAILABLE_AFTER_WEBKIT_VERSION_3_1 + * AVAILABLE_IN_WEBKIT_VERSION_4_0 * - * Used on declarations introduced after WebKit 3.1 + * Used on declarations introduced in WebKit 4.0 */ #if WEBKIT_VERSION_MAX_ALLOWED < WEBKIT_VERSION_LATEST - #define AVAILABLE_AFTER_WEBKIT_VERSION_3_1 UNAVAILABLE_ATTRIBUTE + #define AVAILABLE_IN_WEBKIT_VERSION_4_0 UNAVAILABLE_ATTRIBUTE #elif WEBKIT_VERSION_MIN_REQUIRED < WEBKIT_VERSION_LATEST - #define AVAILABLE_AFTER_WEBKIT_VERSION_3_1 WEAK_IMPORT_ATTRIBUTE + #define AVAILABLE_IN_WEBKIT_VERSION_4_0 WEAK_IMPORT_ATTRIBUTE #else - #define AVAILABLE_AFTER_WEBKIT_VERSION_3_1 + #define AVAILABLE_IN_WEBKIT_VERSION_4_0 #endif /* - * AVAILABLE_AFTER_WEBKIT_VERSION_3_1_BUT_DEPRECATED + * AVAILABLE_IN_WEBKIT_VERSION_4_0_BUT_DEPRECATED * - * Used on declarations introduced after WebKit 3.1, - * and deprecated after WebKit 3.1 + * Used on declarations introduced in WebKit 4.0, + * and deprecated in WebKit 4.0 */ #if WEBKIT_VERSION_MIN_REQUIRED >= WEBKIT_VERSION_LATEST - #define AVAILABLE_AFTER_WEBKIT_VERSION_3_1_BUT_DEPRECATED DEPRECATED_ATTRIBUTE + #define AVAILABLE_IN_WEBKIT_VERSION_4_0_BUT_DEPRECATED DEPRECATED_ATTRIBUTE #else - #define AVAILABLE_AFTER_WEBKIT_VERSION_3_1_BUT_DEPRECATED AVAILABLE_AFTER_WEBKIT_VERSION_3_1 + #define AVAILABLE_IN_WEBKIT_VERSION_4_0_BUT_DEPRECATED AVAILABLE_IN_WEBKIT_VERSION_4_0 #endif /* - * AVAILABLE_WEBKIT_VERSION_1_0_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 + * AVAILABLE_WEBKIT_VERSION_1_0_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 * * Used on declarations introduced in WebKit 1.0, - * but later deprecated after WebKit 3.1 + * but later deprecated in WebKit 4.0 */ #if WEBKIT_VERSION_MIN_REQUIRED >= WEBKIT_VERSION_LATEST - #define AVAILABLE_WEBKIT_VERSION_1_0_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 DEPRECATED_ATTRIBUTE + #define AVAILABLE_WEBKIT_VERSION_1_0_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 DEPRECATED_ATTRIBUTE #else - #define AVAILABLE_WEBKIT_VERSION_1_0_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 AVAILABLE_WEBKIT_VERSION_1_0_AND_LATER + #define AVAILABLE_WEBKIT_VERSION_1_0_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 AVAILABLE_WEBKIT_VERSION_1_0_AND_LATER #endif /* - * AVAILABLE_WEBKIT_VERSION_1_1_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 + * AVAILABLE_WEBKIT_VERSION_1_1_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 * * Used on declarations introduced in WebKit 1.1, - * but later deprecated after WebKit 3.1 + * but later deprecated in WebKit 4.0 */ #if WEBKIT_VERSION_MIN_REQUIRED >= WEBKIT_VERSION_LATEST - #define AVAILABLE_WEBKIT_VERSION_1_1_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 DEPRECATED_ATTRIBUTE + #define AVAILABLE_WEBKIT_VERSION_1_1_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 DEPRECATED_ATTRIBUTE #else - #define AVAILABLE_WEBKIT_VERSION_1_1_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 AVAILABLE_WEBKIT_VERSION_1_1_AND_LATER + #define AVAILABLE_WEBKIT_VERSION_1_1_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 AVAILABLE_WEBKIT_VERSION_1_1_AND_LATER #endif /* - * AVAILABLE_WEBKIT_VERSION_1_2_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 + * AVAILABLE_WEBKIT_VERSION_1_2_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 * * Used on declarations introduced in WebKit 1.2, - * but later deprecated after WebKit 3.1 + * but later deprecated in WebKit 4.0 */ #if WEBKIT_VERSION_MIN_REQUIRED >= WEBKIT_VERSION_LATEST - #define AVAILABLE_WEBKIT_VERSION_1_2_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 DEPRECATED_ATTRIBUTE + #define AVAILABLE_WEBKIT_VERSION_1_2_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 DEPRECATED_ATTRIBUTE #else - #define AVAILABLE_WEBKIT_VERSION_1_2_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 AVAILABLE_WEBKIT_VERSION_1_2_AND_LATER + #define AVAILABLE_WEBKIT_VERSION_1_2_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 AVAILABLE_WEBKIT_VERSION_1_2_AND_LATER #endif /* - * AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 + * AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 * * Used on declarations introduced in WebKit 1.3, - * but later deprecated after WebKit 3.1 + * but later deprecated in WebKit 4.0 */ #if WEBKIT_VERSION_MIN_REQUIRED >= WEBKIT_VERSION_LATEST - #define AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 DEPRECATED_ATTRIBUTE + #define AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 DEPRECATED_ATTRIBUTE #else - #define AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER + #define AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER #endif /* - * AVAILABLE_WEBKIT_VERSION_2_0_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 + * AVAILABLE_WEBKIT_VERSION_2_0_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 * * Used on declarations introduced in WebKit 2.0, - * but later deprecated after WebKit 3.1 + * but later deprecated in WebKit 4.0 */ #if WEBKIT_VERSION_MIN_REQUIRED >= WEBKIT_VERSION_LATEST - #define AVAILABLE_WEBKIT_VERSION_2_0_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 DEPRECATED_ATTRIBUTE + #define AVAILABLE_WEBKIT_VERSION_2_0_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 DEPRECATED_ATTRIBUTE #else - #define AVAILABLE_WEBKIT_VERSION_2_0_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 AVAILABLE_WEBKIT_VERSION_2_0_AND_LATER + #define AVAILABLE_WEBKIT_VERSION_2_0_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 AVAILABLE_WEBKIT_VERSION_2_0_AND_LATER #endif /* - * AVAILABLE_WEBKIT_VERSION_3_0_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 + * AVAILABLE_WEBKIT_VERSION_3_0_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 * * Used on declarations introduced in WebKit 3.0, - * but later deprecated after WebKit 3.1 + * but later deprecated in WebKit 4.0 */ #if WEBKIT_VERSION_MIN_REQUIRED >= WEBKIT_VERSION_LATEST - #define AVAILABLE_WEBKIT_VERSION_3_0_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 DEPRECATED_ATTRIBUTE + #define AVAILABLE_WEBKIT_VERSION_3_0_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 DEPRECATED_ATTRIBUTE #else - #define AVAILABLE_WEBKIT_VERSION_3_0_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 AVAILABLE_WEBKIT_VERSION_3_0_AND_LATER + #define AVAILABLE_WEBKIT_VERSION_3_0_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 AVAILABLE_WEBKIT_VERSION_3_0_AND_LATER #endif /* - * AVAILABLE_WEBKIT_VERSION_3_1_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 + * AVAILABLE_WEBKIT_VERSION_3_1_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 * * Used on declarations introduced in WebKit 3.1, - * but later deprecated after WebKit 3.1 + * but later deprecated in WebKit 4.0 */ #if WEBKIT_VERSION_MIN_REQUIRED >= WEBKIT_VERSION_LATEST - #define AVAILABLE_WEBKIT_VERSION_3_1_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 DEPRECATED_ATTRIBUTE + #define AVAILABLE_WEBKIT_VERSION_3_1_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 DEPRECATED_ATTRIBUTE #else - #define AVAILABLE_WEBKIT_VERSION_3_1_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_3_1 AVAILABLE_WEBKIT_VERSION_3_1_AND_LATER + #define AVAILABLE_WEBKIT_VERSION_3_1_AND_LATER_BUT_DEPRECATED_IN_WEBKIT_VERSION_4_0 AVAILABLE_WEBKIT_VERSION_3_1_AND_LATER #endif /* - * DEPRECATED_AFTER_WEBKIT_VERSION_3_1 + * DEPRECATED_IN_WEBKIT_VERSION_4_0 * - * Used on types deprecated after WebKit 3.1 + * Used on types deprecated in WebKit 4.0 */ #if WEBKIT_VERSION_MIN_REQUIRED >= WEBKIT_VERSION_LATEST - #define DEPRECATED_AFTER_WEBKIT_VERSION_3_1 DEPRECATED_ATTRIBUTE + #define DEPRECATED_IN_WEBKIT_VERSION_4_0 DEPRECATED_ATTRIBUTE #else - #define DEPRECATED_AFTER_WEBKIT_VERSION_3_1 + #define DEPRECATED_IN_WEBKIT_VERSION_4_0 #endif diff --git a/JavaScriptCore/API/tests/testapi.c b/JavaScriptCore/API/tests/testapi.c index 48c8583..bbcf1e7 100644 --- a/JavaScriptCore/API/tests/testapi.c +++ b/JavaScriptCore/API/tests/testapi.c @@ -26,6 +26,7 @@ #include "JavaScriptCore.h" #include "JSBasePrivate.h" #include <math.h> +#define ASSERT_DISABLED 0 #include <wtf/Assertions.h> #include <wtf/UnusedParam.h> @@ -41,11 +42,13 @@ static double nan(const char*) #endif static JSGlobalContextRef context = 0; - +static int failed = 0; static void assertEqualsAsBoolean(JSValueRef value, bool expectedValue) { - if (JSValueToBoolean(context, value) != expectedValue) + if (JSValueToBoolean(context, value) != expectedValue) { fprintf(stderr, "assertEqualsAsBoolean failed: %p, %d\n", value, expectedValue); + failed = 1; + } } static void assertEqualsAsNumber(JSValueRef value, double expectedValue) @@ -55,8 +58,10 @@ static void assertEqualsAsNumber(JSValueRef value, double expectedValue) // FIXME <rdar://4668451> - On i386 the isnan(double) macro tries to map to the isnan(float) function, // causing a build break with -Wshorten-64-to-32 enabled. The issue is known by the appropriate team. // After that's resolved, we can remove these casts - if (number != expectedValue && !(isnan((float)number) && isnan((float)expectedValue))) + if (number != expectedValue && !(isnan((float)number) && isnan((float)expectedValue))) { fprintf(stderr, "assertEqualsAsNumber failed: %p, %lf\n", value, expectedValue); + failed = 1; + } } static void assertEqualsAsUTF8String(JSValueRef value, const char* expectedValue) @@ -68,12 +73,17 @@ static void assertEqualsAsUTF8String(JSValueRef value, const char* expectedValue JSStringGetUTF8CString(valueAsString, jsBuffer, jsSize); unsigned i; - for (i = 0; jsBuffer[i]; i++) - if (jsBuffer[i] != expectedValue[i]) + for (i = 0; jsBuffer[i]; i++) { + if (jsBuffer[i] != expectedValue[i]) { fprintf(stderr, "assertEqualsAsUTF8String failed at character %d: %c(%d) != %c(%d)\n", i, jsBuffer[i], jsBuffer[i], expectedValue[i], expectedValue[i]); - - if (jsSize < strlen(jsBuffer) + 1) + failed = 1; + } + } + + if (jsSize < strlen(jsBuffer) + 1) { fprintf(stderr, "assertEqualsAsUTF8String failed: jsSize was too small\n"); + failed = 1; + } free(jsBuffer); JSStringRelease(valueAsString); @@ -94,12 +104,16 @@ static void assertEqualsAsCharactersPtr(JSValueRef value, const char* expectedVa CFStringGetCharacters(expectedValueAsCFString, CFRangeMake(0, cfLength), cfBuffer); CFRelease(expectedValueAsCFString); - if (memcmp(jsBuffer, cfBuffer, cfLength * sizeof(UniChar)) != 0) + if (memcmp(jsBuffer, cfBuffer, cfLength * sizeof(UniChar)) != 0) { fprintf(stderr, "assertEqualsAsCharactersPtr failed: jsBuffer != cfBuffer\n"); + failed = 1; + } - if (jsLength != (size_t)cfLength) + if (jsLength != (size_t)cfLength) { fprintf(stderr, "assertEqualsAsCharactersPtr failed: jsLength(%ld) != cfLength(%ld)\n", jsLength, cfLength); - + failed = 1; + } + free(cfBuffer); JSStringRelease(valueAsString); } @@ -115,6 +129,7 @@ static bool MyObject_hasProperty(JSContextRef context, JSObjectRef object, JSStr if (JSStringIsEqualToUTF8CString(propertyName, "alwaysOne") || JSStringIsEqualToUTF8CString(propertyName, "cantFind") + || JSStringIsEqualToUTF8CString(propertyName, "throwOnGet") || JSStringIsEqualToUTF8CString(propertyName, "myPropertyName") || JSStringIsEqualToUTF8CString(propertyName, "hasPropertyLie") || JSStringIsEqualToUTF8CString(propertyName, "0")) { @@ -140,7 +155,11 @@ static JSValueRef MyObject_getProperty(JSContextRef context, JSObjectRef object, if (JSStringIsEqualToUTF8CString(propertyName, "cantFind")) { return JSValueMakeUndefined(context); } - + + if (JSStringIsEqualToUTF8CString(propertyName, "throwOnGet")) { + return JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception); + } + if (JSStringIsEqualToUTF8CString(propertyName, "0")) { *exception = JSValueMakeNumber(context, 1); return JSValueMakeNumber(context, 1); @@ -159,6 +178,10 @@ static bool MyObject_setProperty(JSContextRef context, JSObjectRef object, JSStr if (JSStringIsEqualToUTF8CString(propertyName, "cantSet")) return true; // pretend we set the property in order to swallow it + if (JSStringIsEqualToUTF8CString(propertyName, "throwOnSet")) { + JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception); + } + return false; } @@ -171,7 +194,7 @@ static bool MyObject_deleteProperty(JSContextRef context, JSObjectRef object, JS return true; if (JSStringIsEqualToUTF8CString(propertyName, "throwOnDelete")) { - *exception = JSValueMakeNumber(context, 2); + JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception); return false; } @@ -201,6 +224,11 @@ static JSValueRef MyObject_callAsFunction(JSContextRef context, JSObjectRef obje UNUSED_PARAM(thisObject); UNUSED_PARAM(exception); + if (argumentCount > 0 && JSValueIsString(context, arguments[0]) && JSStringIsEqualToUTF8CString(JSValueToStringCopy(context, arguments[0], 0), "throwOnCall")) { + JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception); + return JSValueMakeUndefined(context); + } + if (argumentCount > 0 && JSValueIsStrictEqual(context, arguments[0], JSValueMakeNumber(context, 0))) return JSValueMakeNumber(context, 1); @@ -212,6 +240,11 @@ static JSObjectRef MyObject_callAsConstructor(JSContextRef context, JSObjectRef UNUSED_PARAM(context); UNUSED_PARAM(object); + if (argumentCount > 0 && JSValueIsString(context, arguments[0]) && JSStringIsEqualToUTF8CString(JSValueToStringCopy(context, arguments[0], 0), "throwOnConstruct")) { + JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), object, JSStringCreateWithUTF8CString("test script"), 1, exception); + return object; + } + if (argumentCount > 0 && JSValueIsStrictEqual(context, arguments[0], JSValueMakeNumber(context, 0))) return JSValueToObject(context, JSValueMakeNumber(context, 1), exception); @@ -223,6 +256,11 @@ static bool MyObject_hasInstance(JSContextRef context, JSObjectRef constructor, UNUSED_PARAM(context); UNUSED_PARAM(constructor); + if (JSValueIsString(context, possibleValue) && JSStringIsEqualToUTF8CString(JSValueToStringCopy(context, possibleValue, 0), "throwOnHasInstance")) { + JSEvaluateScript(context, JSStringCreateWithUTF8CString("throw 'an exception'"), constructor, JSStringCreateWithUTF8CString("test script"), 1, exception); + return false; + } + JSStringRef numberString = JSStringCreateWithUTF8CString("Number"); JSObjectRef numberConstructor = JSValueToObject(context, JSObjectGetProperty(context, JSContextGetGlobalObject(context), numberString, exception), exception); JSStringRelease(numberString); @@ -297,6 +335,118 @@ static JSClassRef MyObject_class(JSContextRef context) return jsClass; } +static bool EvilExceptionObject_hasInstance(JSContextRef context, JSObjectRef constructor, JSValueRef possibleValue, JSValueRef* exception) +{ + UNUSED_PARAM(context); + UNUSED_PARAM(constructor); + + JSStringRef hasInstanceName = JSStringCreateWithUTF8CString("hasInstance"); + JSValueRef hasInstance = JSObjectGetProperty(context, constructor, hasInstanceName, exception); + JSStringRelease(hasInstanceName); + if (!hasInstance) + return false; + JSObjectRef function = JSValueToObject(context, hasInstance, exception); + JSValueRef result = JSObjectCallAsFunction(context, function, constructor, 1, &possibleValue, exception); + return result && JSValueToBoolean(context, result); +} + +static JSValueRef EvilExceptionObject_convertToType(JSContextRef context, JSObjectRef object, JSType type, JSValueRef* exception) +{ + UNUSED_PARAM(object); + UNUSED_PARAM(exception); + JSStringRef funcName; + switch (type) { + case kJSTypeNumber: + funcName = JSStringCreateWithUTF8CString("toNumber"); + break; + case kJSTypeString: + funcName = JSStringCreateWithUTF8CString("toStringExplicit"); + break; + default: + return NULL; + break; + } + + JSValueRef func = JSObjectGetProperty(context, object, funcName, exception); + JSStringRelease(funcName); + JSObjectRef function = JSValueToObject(context, func, exception); + if (!function) + return NULL; + JSValueRef value = JSObjectCallAsFunction(context, function, object, 0, NULL, exception); + if (!value) + return (JSValueRef)JSStringCreateWithUTF8CString("convertToType failed"); + return value; +} + +JSClassDefinition EvilExceptionObject_definition = { + 0, + kJSClassAttributeNone, + + "EvilExceptionObject", + NULL, + + NULL, + NULL, + + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + EvilExceptionObject_hasInstance, + EvilExceptionObject_convertToType, +}; + +static JSClassRef EvilExceptionObject_class(JSContextRef context) +{ + UNUSED_PARAM(context); + + static JSClassRef jsClass; + if (!jsClass) + jsClass = JSClassCreate(&EvilExceptionObject_definition); + + return jsClass; +} + +JSClassDefinition EmptyObject_definition = { + 0, + kJSClassAttributeNone, + + NULL, + NULL, + + NULL, + NULL, + + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, +}; + +static JSClassRef EmptyObject_class(JSContextRef context) +{ + UNUSED_PARAM(context); + + static JSClassRef jsClass; + if (!jsClass) + jsClass = JSClassCreate(&EmptyObject_definition); + + return jsClass; +} + + static JSValueRef Base_get(JSContextRef ctx, JSObjectRef object, JSStringRef propertyName, JSValueRef* exception) { UNUSED_PARAM(object); @@ -656,6 +806,16 @@ int main(int argc, char* argv[]) JSObjectSetProperty(context, globalObject, myObjectIString, myObject, kJSPropertyAttributeNone, NULL); JSStringRelease(myObjectIString); + JSObjectRef EvilExceptionObject = JSObjectMake(context, EvilExceptionObject_class(context), NULL); + JSStringRef EvilExceptionObjectIString = JSStringCreateWithUTF8CString("EvilExceptionObject"); + JSObjectSetProperty(context, globalObject, EvilExceptionObjectIString, EvilExceptionObject, kJSPropertyAttributeNone, NULL); + JSStringRelease(EvilExceptionObjectIString); + + JSObjectRef EmptyObject = JSObjectMake(context, EmptyObject_class(context), NULL); + JSStringRef EmptyObjectIString = JSStringCreateWithUTF8CString("EmptyObject"); + JSObjectSetProperty(context, globalObject, EmptyObjectIString, EmptyObject, kJSPropertyAttributeNone, NULL); + JSStringRelease(EmptyObjectIString); + JSValueRef exception; // Conversions that throw exceptions @@ -846,7 +1006,7 @@ int main(int argc, char* argv[]) JSStringRelease(functionBody); string = JSValueToStringCopy(context, function, NULL); - assertEqualsAsUTF8String(JSValueMakeString(context, string), "function foo(foo) {return foo;}"); + assertEqualsAsUTF8String(JSValueMakeString(context, string), "function foo(foo) { return foo;\n}"); JSStringRelease(string); JSStringRef print = JSStringCreateWithUTF8CString("print"); @@ -954,9 +1114,10 @@ int main(int argc, char* argv[]) JSStringRelease(script); char* scriptUTF8 = createStringWithContentsOfFile(scriptPath); - if (!scriptUTF8) + if (!scriptUTF8) { printf("FAIL: Test script could not be loaded.\n"); - else { + failed = 1; + } else { script = JSStringCreateWithUTF8CString(scriptUTF8); result = JSEvaluateScript(context, script, NULL, NULL, 1, &exception); if (JSValueIsUndefined(context, result)) @@ -968,6 +1129,7 @@ int main(int argc, char* argv[]) CFShow(exceptionCF); CFRelease(exceptionCF); JSStringRelease(exceptionIString); + failed = 1; } JSStringRelease(script); free(scriptUTF8); @@ -991,6 +1153,27 @@ int main(int argc, char* argv[]) JSGlobalContextRelease(context); JSClassRelease(globalObjectClass); + // Test for an infinite prototype chain that used to be created. This test + // passes if the call to JSObjectHasProperty() does not hang. + + JSClassDefinition prototypeLoopClassDefinition = kJSClassDefinitionEmpty; + prototypeLoopClassDefinition.staticFunctions = globalObject_staticFunctions; + JSClassRef prototypeLoopClass = JSClassCreate(&prototypeLoopClassDefinition); + JSGlobalContextRef prototypeLoopContext = JSGlobalContextCreateInGroup(NULL, prototypeLoopClass); + + JSStringRef nameProperty = JSStringCreateWithUTF8CString("name"); + JSObjectHasProperty(prototypeLoopContext, JSContextGetGlobalObject(prototypeLoopContext), nameProperty); + + JSGlobalContextRelease(prototypeLoopContext); + JSClassRelease(prototypeLoopClass); + + printf("PASS: Infinite prototype chain does not occur.\n"); + + if (failed) { + printf("FAIL: Some tests failed.\n"); + return 1; + } + printf("PASS: Program exited normally.\n"); return 0; } diff --git a/JavaScriptCore/API/tests/testapi.js b/JavaScriptCore/API/tests/testapi.js index 9c8ca9e..6a1fab3 100644 --- a/JavaScriptCore/API/tests/testapi.js +++ b/JavaScriptCore/API/tests/testapi.js @@ -22,6 +22,17 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +var failed = false; +function pass(msg) +{ + print("PASS: " + msg, "green"); +} + +function fail(msg) +{ + print("FAIL: " + msg, "red"); + failed = true; +} function shouldBe(a, b) { @@ -33,23 +44,22 @@ function shouldBe(a, b) } if (evalA == b || isNaN(evalA) && typeof evalA == 'number' && isNaN(b) && typeof b == 'number') - print("PASS: " + a + " should be " + b + " and is.", "green"); + pass(a + " should be " + b + " and is."); else - print("__FAIL__: " + a + " should be " + b + " but instead is " + evalA + ".", "red"); + fail(a + " should be " + b + " but instead is " + evalA + "."); } function shouldThrow(a) { - var result = "__FAIL__: " + a + " did not throw an exception."; - var evalA; try { eval(a); } catch(e) { - result = "PASS: " + a + " threw: " + e; + pass(a + " threw: " + e); + return; } - - print(result); + + fail(a + " did not throw an exception."); } function globalStaticFunction() @@ -70,9 +80,14 @@ shouldBe("MyObject.alwaysOne", 1); MyObject.cantDelete = 1; delete MyObject.cantDelete; shouldBe("MyObject.cantDelete", 1); -shouldBe("delete MyObject.throwOnDelete", 2); // deleteProperty -- should throw 2 +shouldBe("delete MyObject.throwOnDelete", "an exception"); MyObject.cantSet = 1; shouldBe("MyObject.cantSet", undefined); +shouldBe("MyObject.throwOnGet", "an exception"); +shouldBe("MyObject.throwOnSet = 5", "an exception"); +shouldBe("MyObject('throwOnCall')", "an exception"); +shouldBe("new MyObject('throwOnConstruct')", "an exception"); +shouldBe("'throwOnHasInstance' instanceof MyObject", "an exception"); var foundMyPropertyName = false; var foundRegularType = false; @@ -82,12 +97,16 @@ for (var p in MyObject) { if (p == "regularType") foundRegularType = true; } -print(foundMyPropertyName - ? "PASS: MyObject.myPropertyName was enumerated" - : "__FAIL__: MyObject.myPropertyName was not enumerated"); -print(foundRegularType - ? "PASS: MyObject.regularType was enumerated" - : "__FAIL__: MyObject.regularType was not enumerated"); + +if (foundMyPropertyName) + pass("MyObject.myPropertyName was enumerated"); +else + fail("MyObject.myPropertyName was not enumerated"); + +if (foundRegularType) + pass("MyObject.regularType was enumerated"); +else + fail("MyObject.regularType was not enumerated"); myObject = new MyObject(); @@ -100,7 +119,7 @@ shouldBe("MyObject ? 1 : 0", true); // toBoolean shouldBe("+MyObject", 1); // toNumber shouldBe("(MyObject.toString())", "[object MyObject]"); // toString shouldBe("String(MyObject)", "MyObjectAsString"); // type conversion to string -shouldBe("MyObject - 0", NaN); // toPrimitive +shouldBe("MyObject - 0", 1); // toNumber shouldBe("typeof MyConstructor", "object"); constructedObject = new MyConstructor(1); @@ -130,3 +149,21 @@ shouldBe("derived.baseDup = 0", 2); shouldBe("derived.baseOnly = 0", 1); shouldBe("derived.derivedOnly = 0", 2) shouldBe("derived.protoDup = 0", 2); + +shouldBe("undefined instanceof MyObject", false); +EvilExceptionObject.hasInstance = function f() { return f(); }; +EvilExceptionObject.__proto__ = undefined; +shouldThrow("undefined instanceof EvilExceptionObject"); +EvilExceptionObject.hasInstance = function () { return true; }; +shouldBe("undefined instanceof EvilExceptionObject", true); + +EvilExceptionObject.toNumber = function f() { return f(); } +shouldThrow("EvilExceptionObject*5"); +EvilExceptionObject.toStringExplicit = function f() { return f(); } +shouldThrow("String(EvilExceptionObject)"); + +shouldBe("EmptyObject", "[object CallbackObject]"); + +if (failed) + throw "Some tests failed"; + diff --git a/JavaScriptCore/Android.mk b/JavaScriptCore/Android.mk index b04ebaf..fbad0f6 100644 --- a/JavaScriptCore/Android.mk +++ b/JavaScriptCore/Android.mk @@ -48,6 +48,10 @@ # /wx/* \ LOCAL_SRC_FILES := \ + API/JSValueRef.cpp \ + API/JSCallbackObject.cpp \ + API/OpaqueJSString.cpp \ + \ bytecode/CodeBlock.cpp \ bytecode/JumpTable.cpp \ bytecode/Opcode.cpp \ @@ -152,6 +156,7 @@ LOCAL_SRC_FILES := \ runtime/StringPrototype.cpp \ runtime/Structure.cpp \ runtime/StructureChain.cpp \ + runtime/TimeoutChecker.cpp \ runtime/UString.cpp \ \ wrec/CharacterClass.cpp \ @@ -175,6 +180,7 @@ LOCAL_SRC_FILES := \ \ wtf/android/MainThreadAndroid.cpp \ \ + wtf/TypeTraits.cpp \ wtf/dtoa.cpp \ \ wtf/unicode/CollatorDefault.cpp \ diff --git a/JavaScriptCore/ChangeLog b/JavaScriptCore/ChangeLog index 5b9cc4b..2cecfd2 100644 --- a/JavaScriptCore/ChangeLog +++ b/JavaScriptCore/ChangeLog @@ -1,52 +1,1663 @@ -2009-02-13 Mark Rowe <mrowe@apple.com> +2009-03-26 Adam Roben <aroben@apple.com> - Merge r40975. + Copy testapi.js to $WebKitOutputDir on Windows - 2009-02-12 Darin Adler <darin@apple.com> + Part of Bug 24856: run-javascriptcore-tests should run testapi on + Windows + <https://bugs.webkit.org/show_bug.cgi?id=24856> - Reviewed by Oliver Hunt and Alexey Proskuryakov. + This matches what Mac does, which will help once we enable running + testapi from run-javascriptcore-tests on Windows. - Speed up a couple string functions. + Reviewed by Steve Falkenburg. - * runtime/StringPrototype.cpp: - (JSC::stringProtoFuncIndexOf): Added a fast path for cases where the second - argument is either missing or an integer. - (JSC::stringProtoFuncBig): Use jsNontrivialString since the string is guaranteed - to be 2 or more characters long. - (JSC::stringProtoFuncSmall): Ditto. - (JSC::stringProtoFuncBlink): Ditto. - (JSC::stringProtoFuncBold): Ditto. - (JSC::stringProtoFuncItalics): Ditto. - (JSC::stringProtoFuncStrike): Ditto. - (JSC::stringProtoFuncSub): Ditto. - (JSC::stringProtoFuncSup): Ditto. - (JSC::stringProtoFuncFontcolor): Ditto. - (JSC::stringProtoFuncFontsize): Make the fast path Sam recently added even faster - by avoiding all but the minimum memory allocation. - (JSC::stringProtoFuncAnchor): Use jsNontrivialString. - (JSC::stringProtoFuncLink): Added a fast path. + * JavaScriptCore.vcproj/testapi/testapi.vcproj: Copy testapi.js next + to testapi.exe. + +2009-03-25 Oliver Hunt <oliver@apple.com> + + Reviewed by Geoff Garen. + + Fix exception handling for instanceof in the interpreter. + + * interpreter/Interpreter.cpp: + (JSC::Interpreter::privateExecute): + +2009-03-25 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Cameron Zwarich. + + Fixed <rdar://problem/6724011> Write to freed memory in JSC::Label::deref + when reloading http://helpme.att.net/speedtest/ + + * bytecompiler/BytecodeGenerator.h: Reversed the declaration order for + m_labelScopes and m_labels to reverse their destruction order. + m_labelScopes has references to memory within m_labels, so its destructor + needs to run first. + +2009-03-24 Eli Fidler <eli.fidler@torchmobile.com> + + Reviewed by George Staikos. + + Correct warnings which in some environments are treated as errors. + + * wtf/dtoa.cpp: + (WTF::b2d): + (WTF::d2b): + (WTF::strtod): + (WTF::dtoa): + +2009-03-24 Kevin Ollivier <kevino@theolliviers.com> + + Reviewed by Darin Adler. + + Explicitly define HAVE_LANGINFO_H on Darwin. Fixes the wx build bot jscore + test failure. + + https://bugs.webkit.org/show_bug.cgi?id=24780 + * wtf/Platform.h: + +2009-03-23 Oliver Hunt <oliver@apple.com> + + Reviewed by Cameron Zwarich. + + Fix className() for API defined class + + * API/JSCallbackObjectFunctions.h: + (JSC::::className): + * API/tests/testapi.c: + (EmptyObject_class): + (main): + * API/tests/testapi.js: + +2009-03-23 Oliver Hunt <oliver@apple.com> + + Reviewed by Geoff Garen. + + Make testapi assertions run in release builds, so that testapi actually + works in a release build. + + Many of the testapi assertions have side effects that are necessary, and + given testapi is a testing program, perf impact of an assertion is not + important, so it makes sense to apply the assertions in release builds + anyway. + + * API/tests/testapi.c: + (EvilExceptionObject_hasInstance): + +2009-03-23 David Kilzer <ddkilzer@apple.com> + + Provide JavaScript exception information after slow script timeout + + Reviewed by Oliver Hunt. + + * runtime/Completion.cpp: + (JSC::evaluate): Set the exception object as the Completion + object's value for slow script timeouts. This is used in + WebCore when reporting the exception. + * runtime/ExceptionHelpers.cpp: + (JSC::InterruptedExecutionError::toString): Added. Provides a + description message for the exception when it is reported. + +2009-03-23 Gustavo Noronha Silva <gns@gnome.org> and Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com> + + Reviewed by Adam Roben. + + https://bugs.webkit.org/show_bug.cgi?id=24674 + Crashes in !PLATFORM(MAC)'s formatLocaleDate, in very specific situations + + Make sure strftime never returns 2-digits years to avoid ambiguity + and a crash. We wrap this new code option in HAVE_LANGINFO_H, + since it is apparently not available in all platforms. + + * runtime/DatePrototype.cpp: + (JSC::formatLocaleDate): + * wtf/Platform.h: + +2009-03-22 Oliver Hunt <oliver@apple.com> + + Reviewed by Cameron Zwarich. + + Fix exception handling in API + + We can't just use the ExecState exception slot for returning exceptions + from class introspection functions provided through the API as many JSC + functions will explicitly clear the ExecState exception when returning. + + * API/JSCallbackObjectFunctions.h: + (JSC::JSCallbackObject<Base>::getOwnPropertySlot): + (JSC::JSCallbackObject<Base>::put): + (JSC::JSCallbackObject<Base>::deleteProperty): + (JSC::JSCallbackObject<Base>::construct): + (JSC::JSCallbackObject<Base>::hasInstance): + (JSC::JSCallbackObject<Base>::call): + (JSC::JSCallbackObject<Base>::toNumber): + (JSC::JSCallbackObject<Base>::toString): + (JSC::JSCallbackObject<Base>::staticValueGetter): + (JSC::JSCallbackObject<Base>::callbackGetter): + * API/tests/testapi.c: + (MyObject_hasProperty): + (MyObject_getProperty): + (MyObject_setProperty): + (MyObject_deleteProperty): + (MyObject_callAsFunction): + (MyObject_callAsConstructor): + (MyObject_hasInstance): + (EvilExceptionObject_hasInstance): + (EvilExceptionObject_convertToType): + (EvilExceptionObject_class): + (main): + * API/tests/testapi.js: + (EvilExceptionObject.hasInstance): + (EvilExceptionObject.toNumber): + (EvilExceptionObject.toStringExplicit): + +2009-03-21 Cameron Zwarich <cwzwarich@uwaterloo.ca> + + Reviewed by Oliver Hunt. + + Bug 20049: testapi failure: MyObject - 0 should be NaN but instead is 1. + <https://bugs.webkit.org/show_bug.cgi?id=20049> + <rdar://problem/6079127> + + In this case, the test is wrong. According to the ECMA spec, subtraction + uses ToNumber, not ToPrimitive. Change the test to match the spec. + + * API/tests/testapi.js: + +2009-03-21 Oliver Hunt <oliver@apple.com> + + Reviewed by Cameron Zwarich. + + Ensure that JSObjectMakeFunction doesn't produce incorrect line numbers. + + Also make test api correctly propagate failures. + + * API/tests/testapi.c: + (main): + * runtime/FunctionConstructor.cpp: + (JSC::constructFunction): + +2009-03-21 Oliver Hunt <oliver@apple.com> + + Reviewed by Mark Rowe. + + Improve testapi by making it report failures in a way we can pick up + from our test scripts. + + * API/tests/testapi.c: + (assertEqualsAsBoolean): + (assertEqualsAsNumber): + (assertEqualsAsUTF8String): + (assertEqualsAsCharactersPtr): + (main): + * API/tests/testapi.js: + (pass): + (fail): + (shouldBe): + (shouldThrow): + +2009-03-20 Norbert Leser <norbert.leser@nokia.com> + + Reviewed by Darin Adler. + + https://bugs.webkit.org/show_bug.cgi?id=24535 + + Fixes missing line terminator character (;) after macro call. + It is common practice to add the trailing ";" where macros are substituted + and not where they are defined with #define. + This change is consistent with other macro declarations across webkit, + and it also solves compilation failure with symbian compilers. + * runtime/UString.cpp: - (JSC::UString::find): Added a fast path for single-character search strings. + * wtf/Assertions.h: + +2009-03-20 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Darin Adler. + + Fixed a JavaScriptCore crash on the Windows buildbot. + + * bytecompiler/BytecodeGenerator.h: Reduced the AST recursion limit. + Apparently, Windows has small stacks. + +2009-03-20 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Oliver Hunt. + + A little cleanup in the RegisterFile code. + + Moved large inline functions out of the class declaration, to make it + more readable. + + Switched over to using the roundUpAllocationSize function to avoid + duplicate code and subtle bugs. + + Renamed m_maxCommitted to m_commitEnd, to match m_end. + + Renamed allocationSize to commitSize because it's the chunk size for + committing memory, not allocating memory. + + SunSpider reports no change. + + * interpreter/RegisterFile.h: + (JSC::RegisterFile::RegisterFile): + (JSC::RegisterFile::shrink): + (JSC::RegisterFile::grow): + * jit/ExecutableAllocator.h: + (JSC::roundUpAllocationSize): + +2009-03-19 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Oliver Hunt. + + Fixed <rdar://problem/6033712> -- a little bit of hardening in the Collector. + + SunSpider reports no change. I also verified in the disassembly that + we end up with a single compare to constant. + + * runtime/Collector.cpp: + (JSC::Heap::heapAllocate): + +2009-03-19 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Cameron Zwarich and Oliver Hunt. + + Fixed <rdar://problem/6406045> REGRESSION: Stack overflow on PowerPC on + fast/workers/use-machine-stack.html (22531) + + Dialed down the re-entry allowance to 64 (from 128). + + On a 512K stack, this leaves about 64K for other code on the stack while + JavaScript is running. Not perfect, but it solves our crash on PPC. + + Different platforms may want to dial this down even more. + + Also, substantially shrunk BytecodeGenerator. Since we allocate one on + the stack in order to throw a stack overflow exception -- well, let's + just say the old code had an appreciation for irony. + + SunSpider reports no change. + + * bytecompiler/BytecodeGenerator.h: + * interpreter/Interpreter.h: + (JSC::): + +2009-03-19 Cameron Zwarich <cwzwarich@uwaterloo.ca> + + Reviewed by Oliver Hunt. + + Bug 24350: REGRESSION: Safari 4 breaks SPAW wysiwyg editor multiple instances + <https://bugs.webkit.org/show_bug.cgi?id=24350> + <rdar://problem/6674182> + + The SPAW editor's JavaScript assumes that toString() on a function + constructed with the Function constructor produces a function with + a newline after the opening brace. + + * runtime/FunctionConstructor.cpp: + (JSC::constructFunction): Add a newline after the opening brace of the + function's source code. + +2009-03-19 Cameron Zwarich <cwzwarich@uwaterloo.ca> + + Reviewed by Geoff Garen. + + Bug 23771: REGRESSION (r36016): JSObjectHasProperty freezes on global class without kJSClassAttributeNoAutomaticPrototype + <https://bugs.webkit.org/show_bug.cgi?id=23771> + <rdar://problem/6561016> + + * API/tests/testapi.c: + (main): Add a test for this bug. + * runtime/JSGlobalObject.cpp: + (JSC::JSGlobalObject::resetPrototype): Don't set the prototype of the + last object in the prototype chain to the object prototype when the + object prototype is already the last object in the prototype chain. + +2009-03-19 Timothy Hatcher <timothy@apple.com> + + <rdar://problem/6687342> -[WebView scheduleInRunLoop:forMode:] has no affect on timers + + Reviewed by Darin Adler. + + * wtf/Platform.h: Added HAVE_RUNLOOP_TIMER for PLATFORM(MAC). + +2009-03-19 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Oliver Hunt. + + Fixed <rdar://problem/6279213> Regular expression run-time complexity + limit too low for long inputs (21485) + + I raised PCRE's "matchLimit" (limit on backtracking) by an order of + magnitude. This fixes all the reported examples of timing out on legitimate + regular expression matches. + + In my testing on a Core Duo MacBook Pro, the longest you can get stuck + trying to match a string is still under 1s, so this seems like a safe change. + + I can think of a number of better solutions that are more complicated, + but this is a good improvement for now. + + * pcre/pcre_exec.cpp: + +2009-03-19 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Sam Weinig. + + Fixed <rdar://problem/6603562> REGRESSION (Safari 4): regular expression + pattern size limit lower than Safari 3.2, other browsers, breaks SAP (14873) + + Bumped the pattern size limit to 1MB, and standardized it between PCRE + and WREC. (Empirical testing says that we can easily compile a 1MB regular + expression without risking a hang. Other browsers support bigger regular + expressions, but also hang.) + + SunSpider reports no change. + + I started with a patch posted to Bugzilla by Erik Corry (erikcorry@google.com). + + * pcre/pcre_internal.h: + (put3ByteValue): + (get3ByteValue): + (put3ByteValueAndAdvance): + (putLinkValueAllowZero): + (getLinkValueAllowZero): Made PCRE's "LINK_SIZE" (the number of bytes + used to record jumps between bytecodes) 3, to accomodate larger potential + jumps. Bumped PCRE's "MAX_PATTERN_SIZE" to 1MB. (Technically, at this + LINK_SIZE, we can support even larger patterns, but we risk a hang during + compilation, and it's not clear that such large patterns are important + on the web.) + + * wrec/WREC.cpp: + (JSC::WREC::Generator::compileRegExp): Match PCRE's maximum pattern size, + to avoid quirks between platforms. + +2009-03-18 Ada Chan <adachan@apple.com> + + Rolling out r41818 since it broke the windows build. + Error: ..\..\runtime\DatePrototype.cpp(30) : fatal error C1083: Cannot open include file: 'langinfo.h': No such file or directory + + * runtime/DatePrototype.cpp: + (JSC::formatLocaleDate): + +2009-03-17 Oliver Hunt <oliver@apple.com> + + Reviewed by Cameron Zwarich. + + <rdar://problem/6692138> REGRESSION (Safari 4): Incorrect function return value when using IE "try ... finally" memory leak work-around (24654) + <https://bugs.webkit.org/show_bug.cgi?id=24654> + + If the return value for a function is in a local register we need + to copy it before executing any finalisers, otherwise it is possible + for the finaliser to clobber the result. + + * bytecompiler/BytecodeGenerator.h: + (JSC::BytecodeGenerator::hasFinaliser): + * parser/Nodes.cpp: + (JSC::ReturnNode::emitBytecode): + +2009-03-17 Kevin Ollivier <kevino@theolliviers.com> + + Reviewed by Mark Rowe. + + Move BUILDING_ON_* defines into Platform.h to make them available to other ports. + Also tweak the defines so that they work with the default values set by + AvailabilityMacros.h. + + https://bugs.webkit.org/show_bug.cgi?id=24630 + + * JavaScriptCorePrefix.h: + * wtf/Platform.h: + +2009-03-15 Simon Fraser <simon.fraser@apple.com> + + Revert r41718 because it broke DumpRenderTree on Tiger. + + * JavaScriptCorePrefix.h: + * wtf/Platform.h: + +2009-03-15 Kevin Ollivier <kevino@theolliviers.com> + + Non-Apple Mac ports build fix. Move defines for the BUILDING_ON_ macros into + Platform.h so that they're defined for all ports building on Mac, and tweak + the definitions of those macros based on Mark Rowe's suggestions to accomodate + cases where the values may not be <= to the .0 release for that version. + + * JavaScriptCorePrefix.h: + * wtf/Platform.h: + +2009-03-13 Mark Rowe <mrowe@apple.com> + + Rubber-stamped by Dan Bernstein. + + Take advantage of the ability of recent versions of Xcode to easily switch the active + architecture. + + * Configurations/DebugRelease.xcconfig: + +2009-03-13 Mark Rowe <mrowe@apple.com> + + Reviewed by David Kilzer. + + Prevent AllInOneFile.cpp and ProfileGenerator.cpp from rebuilding unnecessarily when + switching between building in Xcode and via build-webkit. + + build-webkit passes FEATURE_DEFINES to xcodebuild, resulting in it being present in the + Derived Sources build settings. When building in Xcode, this setting isn't present so + Xcode reruns the script build phases. This results in a new version of TracingDtrace.h + being generated, and the files that include it being rebuilt. + + * JavaScriptCore.xcodeproj/project.pbxproj: Don't regenerate TracingDtrace.h if it is + already newer than the input file. + +2009-03-13 Norbert Leser <norbert.leser@nokia.com> + + Reviewed by Darin Adler. + + Resolved name conflict with globally defined tzname in Symbian. + Replaced with different name instead of using namespace qualifier + (appeared to be less clumsy). + + * runtime/DateMath.cpp: + +2009-03-12 Mark Rowe <mrowe@apple.com> + + Reviewed by Darin Adler. + + <rdar://problem/6548446> TCMalloc_SystemRelease should use madvise rather than re-mmaping span of pages + + * wtf/FastMalloc.cpp: + (WTF::mergeDecommittedStates): If either of the spans has been released to the system, release the other + span as well so that the flag in the merged span is accurate. + * wtf/Platform.h: + * wtf/TCSystemAlloc.cpp: Track decommitted spans when using MADV_FREE_REUSABLE / MADV_FREE_REUSE. + (TCMalloc_SystemRelease): Use madvise with MADV_FREE_REUSABLE when it is available. + (TCMalloc_SystemCommit): Use madvise with MADV_FREE_REUSE when it is available. + * wtf/TCSystemAlloc.h: + +2009-03-12 Adam Treat <adam.treat@torchmobile.com> + + Reviewed by NOBODY (Build fix). + + Include string.h for strlen usage. + + * wtf/Threading.cpp: + +2009-03-12 David Kilzer <ddkilzer@apple.com> + + Add NO_RETURN attribute to runInteractive() when not using readline + + Reviewed by Darin Adler. + + * jsc.cpp: + (runInteractive): If the readline library is not used, this method + will never return, thus the NO_RETURN attribute is needed to prevent + a gcc warning. + +2009-03-12 Adam Roben <aroben@apple.com> + + Adopt setThreadNameInternal on Windows + + Also changed a Windows-only assertion about thread name length to an + all-platform log message. + + Reviewed by Adam Treat. + + * wtf/Threading.cpp: + (WTF::createThread): Warn if the thread name is longer than 31 + characters, as Visual Studio will truncate names longer than that + length. + + * wtf/ThreadingWin.cpp: + (WTF::setThreadNameInternal): Renamed from setThreadName and changed + to always operate on the current thread. + (WTF::initializeThreading): Changed to use setThreadNameInternal. + (WTF::createThreadInternal): Removed call to setThreadName. This is + now handled by threadEntryPoint and setThreadNameInternal. + +2009-03-11 David Kilzer <ddkilzer@apple.com> + + Clarify comments regarding order of FEATURE_DEFINES + + Rubber-stamped by Mark Rowe. + + * Configurations/JavaScriptCore.xcconfig: Added warning about + the consequences when FEATURE_DEFINES are not kept in sync. + +2009-03-11 Dan Bernstein <mitz@apple.com> + + Reviewed by Darin Adler. + + - WTF support for fixing <rdar://problem/3919124> Thai text selection + in Safari is incorrect + + * wtf/unicode/icu/UnicodeIcu.h: + (WTF::Unicode::hasLineBreakingPropertyComplexContext): Added. Returns + whether the character has Unicode line breaking property value SA + ("Complex Context"). + * wtf/unicode/qt4/UnicodeQt4.h: + (WTF::Unicode::hasLineBreakingPropertyComplexContext): Added an + implementation that always returns false. + +2009-03-11 Darin Adler <darin@apple.com> + + Reviewed by Mark Rowe. + + Give threads names on platforms with pthread_setname_np. + + * wtf/Threading.cpp: + (WTF::NewThreadContext::NewThreadContext): Initialize thread name. + (WTF::threadEntryPoint): Call setThreadNameInternal. + (WTF::createThread): Pass thread name. + + * wtf/Threading.h: Added new comments, setThreadNameInternal. + + * wtf/ThreadingGtk.cpp: + (WTF::setThreadNameInternal): Added. Empty. + * wtf/ThreadingNone.cpp: + (WTF::setThreadNameInternal): Added. Empty. + * wtf/ThreadingPthreads.cpp: + (WTF::setThreadNameInternal): Call pthread_setname_np when available. + * wtf/ThreadingQt.cpp: + (WTF::setThreadNameInternal): Added. Empty. + * wtf/ThreadingWin.cpp: + (WTF::setThreadNameInternal): Added. Empty. + +2009-03-11 Adam Roben <aroben@apple.com> + + Change the Windows implementation of ThreadSpecific to use functions + instead of extern globals + + This will make it easier to export ThreadSpecific from WebKit. + + Reviewed by John Sullivan. + + * API/JSBase.cpp: + (JSEvaluateScript): + Touched this file to force ThreadSpecific.h to be copied into + $WebKitOutputDir. + + * wtf/ThreadSpecific.h: Replaced g_tls_key_count with tlsKeyCount() + and g_tls_keys with tlsKeys(). + + (WTF::::ThreadSpecific): + (WTF::::~ThreadSpecific): + (WTF::::get): + (WTF::::set): + (WTF::::destroy): + Updated to use the new functions. + + * wtf/ThreadSpecificWin.cpp: + (WTF::tlsKeyCount): + (WTF::tlsKeys): + Added. + + (WTF::ThreadSpecificThreadExit): Changed to use the new functions. + +2009-03-10 Cameron Zwarich <cwzwarich@uwaterloo.ca> + + Reviewed by Geoff Garen. + + Bug 24291: REGRESSION (r38635): Single line JavaScript comment prevents HTML button click handler execution + <https://bugs.webkit.org/show_bug.cgi?id=24291> + <rdar://problem/6663472> + + Add an extra newline to the end of the body of the program text constructed + by the Function constructor for parsing. This allows single line comments to + be handled correctly by the parser. + + * runtime/FunctionConstructor.cpp: + (JSC::constructFunction): + +2009-03-09 Oliver Hunt <oliver@apple.com> + + Reviewed by Gavin Barraclough. + + Bug 24447: REGRESSION (r41508): Google Maps does not complete initialization + <rdar://problem/6657774> + + r41508 actually exposed a pre-existing bug where we were not invalidating the result + register cache at jump targets. This causes problems when condition loads occur in an + expression -- namely through the ?: and || operators. This patch corrects these issues + by marking the target of all forward jumps as being a jump target, and then clears the + result register cache when ever it starts generating code for a targeted instruction. + + I do not believe it is possible to cause this class of failure outside of a single + expression, and expressions only provide forward branches, so this should resolve this + entire class of bug. That said i've included a test case that gets as close as possible + to hitting this bug with a back branch, to hopefully prevent anyone from introducing the + problem in future. + + * assembler/AbstractMacroAssembler.h: + (JSC::AbstractMacroAssembler::Label::isUsed): + (JSC::AbstractMacroAssembler::Label::used): + * assembler/X86Assembler.h: + (JSC::X86Assembler::JmpDst::JmpDst): + (JSC::X86Assembler::JmpDst::isUsed): + (JSC::X86Assembler::JmpDst::used): + * jit/JIT.cpp: + (JSC::JIT::privateCompileMainPass): + +2009-03-09 David Levin <levin@chromium.org> + + Reviewed by Darin Adler. + + Bug 23175: String and UString should be able to share a UChar* buffer. + <https://bugs.webkit.org/show_bug.cgi?id=23175> + + Add CrossThreadRefCounted. + + * wtf/CrossThreadRefCounted.h: Added. + (WTF::CrossThreadRefCounted::create): + (WTF::CrossThreadRefCounted::isShared): + (WTF::CrossThreadRefCounted::dataAccessMustBeThreadSafe): + (WTF::CrossThreadRefCounted::mayBePassedToAnotherThread): + (WTF::CrossThreadRefCounted::CrossThreadRefCounted): + (WTF::CrossThreadRefCounted::~CrossThreadRefCounted): + (WTF::CrossThreadRefCounted::ref): + (WTF::CrossThreadRefCounted::deref): + (WTF::CrossThreadRefCounted::release): + (WTF::CrossThreadRefCounted::copy): + (WTF::CrossThreadRefCounted::threadSafeDeref): + * wtf/RefCounted.h: + * wtf/Threading.h: + (WTF::ThreadSafeSharedBase::ThreadSafeSharedBase): + (WTF::ThreadSafeSharedBase::derefBase): + (WTF::ThreadSafeShared::ThreadSafeShared): + (WTF::ThreadSafeShared::deref): + +2009-03-09 Laszlo Gombos <laszlo.1.gombos@nokia.com> + + Reviewed by George Staikos. + + https://bugs.webkit.org/show_bug.cgi?id=24353 + Allow to overrule default build options for Qt build. + + * JavaScriptCore.pri: Allow to overrule ENABLE_JIT + +2009-03-08 Oliver Hunt <oliver@apple.com> + + Reviewed by NOBODY (build fix). + + Build fix. + + * runtime/ArrayPrototype.cpp: + (JSC::arrayProtoFuncConcat): + +2009-03-01 Oliver Hunt <oliver@apple.com> + + Reviewed by Cameron Zwarich. + + Bug 24268: RuntimeArray is not a fully implemented JSArray + <https://bugs.webkit.org/show_bug.cgi?id=24268> + + Don't cast a type to JSArray, just because it reportsArray as a supertype + in the JS type system. Doesn't appear feasible to create a testcase + unfortunately as setting up the failure conditions requires internal access + to JSC not present in DRT. + + * runtime/ArrayPrototype.cpp: + (JSC::arrayProtoFuncConcat): + +2009-03-06 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Oliver Hunt. + + When preforming an op_mov, preserve any existing register mapping. + + ~0.5% progression on v8 tests x86-64. + + * jit/JIT.cpp: + (JSC::JIT::privateCompileMainPass): + +2009-03-05 Simone Fiorentino <simone.fiorentino@consulenti.fastweb.it> + + Bug 24382: request to add SH4 platform + + <https://bugs.webkit.org/show_bug.cgi?id=24382> + + Reviewed by David Kilzer. + + * wtf/Platform.h: Added support for SH4 platform. + +2009-03-05 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Oliver Hunt. + + Writes of constant values to SF registers should be made with direct memory + writes where possible, rather than moving the value via a hardware register. + + ~3% win on SunSpider tests on x86, ~1.5% win on v8 tests on x86-64. + + * assembler/MacroAssemblerX86_64.h: + (JSC::MacroAssemblerX86_64::storePtr): + * assembler/X86Assembler.h: + (JSC::X86Assembler::movq_i32m): + * jit/JIT.cpp: + (JSC::JIT::privateCompileMainPass): + +2009-03-05 Mark Rowe <mrowe@apple.com> + + Fix the build. + + Sprinkle "static" around NumberConstructor.cpp in order to please the compiler. + + * runtime/NumberConstructor.cpp: + (JSC::numberConstructorNaNValue): + (JSC::numberConstructorNegInfinity): + (JSC::numberConstructorPosInfinity): + (JSC::numberConstructorMaxValue): + (JSC::numberConstructorMinValue): + +2009-03-04 Mark Rowe <mrowe@apple.com> + + Reviewed by Oliver Hunt. + + <rdar://problem/6354858> FastMallocZone's enumeration code reports fragmented administration space + + The handling of MALLOC_ADMIN_REGION_RANGE_TYPE in FastMalloc's zone was incorrect. It was attempting + to record the memory containing and individual span as an administrative region, when all memory + allocated via MetaDataAlloc should in fact be recorded. This was causing memory regions allocated + via MetaDataAlloc to appear as "VM_ALLOCATE ?" in vmmap output. They are now correctly reported as + "MALLOC_OTHER" regions associated with the JavaScriptCore FastMalloc zone. + + Memory is allocated via MetaDataAlloc from two locations: PageHeapAllocator, and TCMalloc_PageMap{2,3}. + These two cases are handled differently. + + PageHeapAllocator is extended to keep a linked list of memory regions that it has allocated. The + first object in an allocated region contains the link to the previously allocated region. To record + the administrative regions of a PageHeapAllocator we can simply walk the linked list and record + each allocated region we encounter. + + TCMalloc_PageMaps allocate memory via MetaDataAlloc to store each level of the radix tree. To record + the administrative regions of a TCMalloc_PageMap we walk the tree and record the storage used for nodes + at each position rather than the nodes themselves. + + A small performance improvement is achieved by coalescing adjacent memory regions inside the PageMapMemoryUsageRecorder + so that fewer calls in to the range recorder are necessary. We further reduce the number of calls to the + range recorder by aggregating the in-use ranges of a given memory region into a local buffer before recording + them with a single call. A similar approach is also used by AdminRegionRecorder. -2009-02-13 Mark Rowe <mrowe@apple.com> + * wtf/FastMalloc.cpp: + (WTF::PageHeapAllocator::Init): + (WTF::PageHeapAllocator::New): + (WTF::PageHeapAllocator::recordAdministrativeRegions): + (WTF::TCMallocStats::FreeObjectFinder::isFreeObject): + (WTF::TCMallocStats::PageMapMemoryUsageRecorder::~PageMapMemoryUsageRecorder): + (WTF::TCMallocStats::PageMapMemoryUsageRecorder::recordPendingRegions): + (WTF::TCMallocStats::PageMapMemoryUsageRecorder::visit): + (WTF::TCMallocStats::AdminRegionRecorder::AdminRegionRecorder): + (WTF::TCMallocStats::AdminRegionRecorder::recordRegion): + (WTF::TCMallocStats::AdminRegionRecorder::visit): + (WTF::TCMallocStats::AdminRegionRecorder::recordPendingRegions): + (WTF::TCMallocStats::AdminRegionRecorder::~AdminRegionRecorder): + (WTF::TCMallocStats::FastMallocZone::enumerate): + (WTF::TCMallocStats::FastMallocZone::FastMallocZone): + (WTF::TCMallocStats::FastMallocZone::init): + * wtf/TCPageMap.h: + (TCMalloc_PageMap2::visitValues): + (TCMalloc_PageMap2::visitAllocations): + (TCMalloc_PageMap3::visitValues): + (TCMalloc_PageMap3::visitAllocations): + +2009-03-04 Antti Koivisto <antti@apple.com> + + Reviewed by Dave Hyatt. + + https://bugs.webkit.org/show_bug.cgi?id=24359 + Repaint throttling mechanism + + Set ENABLE_REPAINT_THROTTLING to 0 by default. - Merge r40945. + * wtf/Platform.h: + +2009-03-03 David Kilzer <ddkilzer@apple.com> + + <rdar://problem/6581203> WebCore and WebKit should install the same set of headers during installhdrs phase as build phase + + Reviewed by Mark Rowe. + + * Configurations/Base.xcconfig: Defined REAL_PLATFORM_NAME based + on PLATFORM_NAME to work around the missing definition on Tiger. + Updated HAVE_DTRACE to use REAL_PLATFORM_NAME. + +2009-03-03 Kevin McCullough <kmccullough@apple.com> + + Reviewed by Oliver Hunt. + + <rdar://problem/6639110> console.profile() doesn't work without a title + + * profiler/Profiler.cpp: + (JSC::Profiler::startProfiling): assert if there is not title to ensure + we don't start profiling without one. + +2009-03-02 Sam Weinig <sam@webkit.org> + + Reviewed by Mark Rowe. - 2009-02-12 Sam Weinig <sam@webkit.org> + Enable Geolocation (except on Tiger and Leopard). + + * Configurations/JavaScriptCore.xcconfig: + +2009-03-01 David Kilzer <ddkilzer@apple.com> + + <rdar://problem/6635688> Move HAVE_DTRACE check to Base.xcconfig + + Reviewed by Mark Rowe. + + * Configurations/Base.xcconfig: Set HAVE_DTRACE Xcode variable + based on PLATFORM_NAME and MAC_OS_X_VERSION_MAJOR. Also define + it as a preprocessor macro by modifying + GCC_PREPROCESSOR_DEFINITIONS. + * JavaScriptCore.xcodeproj/project.pbxproj: Changed "Generate + DTrace header" script phase to check for HAVE_DTRACE instead of + MACOSX_DEPLOYMENT_TARGET. + * wtf/Platform.h: Removed definition of HAVE_DTRACE macro since + it's defined in Base.xcconfig now. + +2009-03-01 Horia Olaru <olaru@adobe.com> + + By looking in grammar.y there are only a few types of statement nodes + on which the debugger should stop. + + Removed isBlock and isLoop virtual calls. No need to emit debug hooks in + the "statementListEmitCode" method as long as the necessary hooks can be + added in each "emitCode". + + https://bugs.webkit.org/show_bug.cgi?id=21073 + + Reviewed by Kevin McCullough. + + * parser/Nodes.cpp: + (JSC::ConstStatementNode::emitBytecode): + (JSC::statementListEmitCode): + (JSC::EmptyStatementNode::emitBytecode): + (JSC::ExprStatementNode::emitBytecode): + (JSC::VarStatementNode::emitBytecode): + (JSC::IfNode::emitBytecode): + (JSC::IfElseNode::emitBytecode): + (JSC::DoWhileNode::emitBytecode): + (JSC::WhileNode::emitBytecode): + (JSC::ForNode::emitBytecode): + (JSC::ForInNode::emitBytecode): + (JSC::ContinueNode::emitBytecode): + (JSC::BreakNode::emitBytecode): + (JSC::ReturnNode::emitBytecode): + (JSC::WithNode::emitBytecode): + (JSC::SwitchNode::emitBytecode): + (JSC::LabelNode::emitBytecode): + (JSC::ThrowNode::emitBytecode): + (JSC::TryNode::emitBytecode): + * parser/Nodes.h: + +2009-02-26 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Geoff Garen. + + Fix bug #23614. Switches on double precision values were incorrectly + truncating the scrutinee value. E.g.: + + switch (1.1) { case 1: print("FAIL"); } + + Was resulting in FAIL. + + * interpreter/Interpreter.cpp: + (JSC::Interpreter::privateExecute): + * jit/JITStubs.cpp: + (JSC::JITStubs::cti_op_switch_imm): + +2009-02-26 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Oliver Hunt. + + Integer Immediate representation need not be canonical in x86 JIT code. + On x86-64 we already have loosened the requirement that the int immediate + representation in canonical, we should bring x86 into line. + + This patch is a minor (~0.5%) improvement on sunspider & v8-tests, and + should reduce memory footoprint (reduces JIT code size). + + * jit/JIT.cpp: + (JSC::JIT::compileOpStrictEq): + (JSC::JIT::privateCompileSlowCases): + * jit/JIT.h: + (JSC::JIT::emitJumpIfImmediateNumber): + (JSC::JIT::emitJumpIfNotImmediateNumber): + * jit/JITArithmetic.cpp: + (JSC::JIT::putDoubleResultToJSNumberCellOrJSImmediate): + (JSC::JIT::compileBinaryArithOp): + +2009-02-26 Carol Szabo <carol.szabo@nokia.com> + + Reviewed by Darin Adler. + + https://bugs.webkit.org/show_bug.cgi?id=24099 + ARM Compiler Warnings in pcre_exec.cpp + + * pcre/pcre_exec.cpp: + (match): + +2009-02-25 Cameron Zwarich <cwzwarich@uwaterloo.ca> + + Reviewed by Gavin Barraclough. + + Bug 24086: Regression (r40993): WebKit crashes after logging in to lists.zenbe + <https://bugs.webkit.org/show_bug.cgi?id=24086> + <rdar://problem/6625111> + + The numeric sort optimization in r40993 generated bytecode for a function + without generating JIT code. This breaks an assumption in some parts of + the JIT's function calling logic that the presence of a CodeBlock implies + the existence of JIT code. + + In order to fix this, we simply generate JIT code whenever we check whether + a function is a numeric sort function. This only incurs an additional cost + in the case when the function is a numeric sort function, in which case it + is not expensive to generate JIT code for it. + + * runtime/ArrayPrototype.cpp: + (JSC::isNumericCompareFunction): + +2009-02-25 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Maciej Stachowiak. + + Fixed <rdar://problem/6611174> REGRESSION (r36701): Unable to select + messages on hotmail (24052) + + The bug was that for-in enumeration used a cached prototype chain without + validating that it was up-to-date. + + This led me to refactor prototype chain caching so it was easier to work + with and harder to get wrong. + + After a bit of inlining, this patch is performance-neutral on SunSpider + and the v8 benchmarks. + + * interpreter/Interpreter.cpp: + (JSC::Interpreter::tryCachePutByID): + (JSC::Interpreter::tryCacheGetByID): + * jit/JITStubs.cpp: + (JSC::JITStubs::tryCachePutByID): + (JSC::JITStubs::tryCacheGetByID): + (JSC::JITStubs::cti_op_get_by_id_proto_list): Use the new refactored goodness. See + lines beginning with "-" and smile. + + * runtime/JSGlobalObject.h: + (JSC::Structure::prototypeForLookup): A shout out to const. + + * runtime/JSPropertyNameIterator.h: + (JSC::JSPropertyNameIterator::next): We can use a pointer comparison to + see if our cached structure chain is equal to the object's structure chain, + since in the case of a cache hit, we share references to the same structure + chain. + + * runtime/Operations.h: + (JSC::countPrototypeChainEntriesAndCheckForProxies): Use the new refactored + goodness. + + * runtime/PropertyNameArray.h: + (JSC::PropertyNameArray::PropertyNameArray): + (JSC::PropertyNameArray::setShouldCache): + (JSC::PropertyNameArray::shouldCache): Renamed "cacheable" to "shouldCache" + to communicate that the client is specifying a recommendation, not a + capability. + + * runtime/Structure.cpp: + (JSC::Structure::Structure): No need to initialize a RefPtr. + (JSC::Structure::getEnumerablePropertyNames): Moved some code into helper + functions. + + (JSC::Structure::prototypeChain): New centralized accessor for a prototype + chain. Revalidates on every access, since the objects in the prototype + chain may have mutated. + + (JSC::Structure::isValid): Helper function for revalidating a cached + prototype chain. + + (JSC::Structure::getEnumerableNamesFromPropertyTable): + (JSC::Structure::getEnumerableNamesFromClassInfoTable): Factored out of + getEnumerablePropertyNames. + + * runtime/Structure.h: + + * runtime/StructureChain.cpp: + (JSC::StructureChain::StructureChain): + * runtime/StructureChain.h: + (JSC::StructureChain::create): No need for structureChainsAreEqual, since + we use pointer equality now. Refactored StructureChain to make a little + more sense and eliminate special cases for null prototypes. + +2009-02-25 Steve Falkenburg <sfalken@apple.com> + + Use timeBeginPeriod to enable timing resolution greater than 16ms in command line jsc for Windows. + Allows more accurate reporting of benchmark times via command line jsc.exe. Doesn't affect WebKit's use of JavaScriptCore. + + Reviewed by Adam Roben. + + * jsc.cpp: + (main): + +2009-02-24 Geoffrey Garen <ggaren@apple.com> + + Build fix? + + * GNUmakefile.am: + +2009-02-24 Mark Rowe <mrowe@apple.com> + + Reviewed by Oliver Hunt. + + <rdar://problem/6259220> Rename AVAILABLE_AFTER_WEBKIT_VERSION_3_1 (etc.) to match the other macros + + * API/JSBasePrivate.h: + * API/JSContextRef.h: + * API/JSObjectRef.h: + * API/WebKitAvailability.h: + +2009-02-23 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Sam Weinig. + + Next step in splitting JIT functionality out of the Interpreter class: + Moved vptr storage from Interpreter to JSGlobalData, so it could be shared + between Interpreter and JITStubs, and moved the *Trampoline JIT stubs + into the JITStubs class. Also added a VPtrSet class to encapsulate vptr + hacks during JSGlobalData initialization. + + SunSpider says 0.4% faster. Meh. + + * JavaScriptCore.exp: + * JavaScriptCore.xcodeproj/project.pbxproj: + * interpreter/Interpreter.cpp: + (JSC::Interpreter::Interpreter): + (JSC::Interpreter::tryCacheGetByID): + (JSC::Interpreter::privateExecute): + * interpreter/Interpreter.h: + * jit/JIT.cpp: + (JSC::JIT::privateCompileMainPass): + (JSC::JIT::privateCompile): + (JSC::JIT::privateCompileCTIMachineTrampolines): + * jit/JIT.h: + (JSC::JIT::compileCTIMachineTrampolines): + * jit/JITCall.cpp: + (JSC::JIT::compileOpCall): + (JSC::JIT::compileOpCallSlowCase): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::privateCompilePatchGetArrayLength): + * jit/JITStubs.cpp: + (JSC::JITStubs::JITStubs): + (JSC::JITStubs::tryCacheGetByID): + (JSC::JITStubs::cti_vm_dontLazyLinkCall): + (JSC::JITStubs::cti_op_get_by_val): + (JSC::JITStubs::cti_op_get_by_val_byte_array): + (JSC::JITStubs::cti_op_put_by_val): + (JSC::JITStubs::cti_op_put_by_val_array): + (JSC::JITStubs::cti_op_put_by_val_byte_array): + (JSC::JITStubs::cti_op_is_string): + * jit/JITStubs.h: + (JSC::JITStubs::ctiArrayLengthTrampoline): + (JSC::JITStubs::ctiStringLengthTrampoline): + (JSC::JITStubs::ctiVirtualCallPreLink): + (JSC::JITStubs::ctiVirtualCallLink): + (JSC::JITStubs::ctiVirtualCall): + * runtime/ArrayPrototype.cpp: + (JSC::arrayProtoFuncPop): + (JSC::arrayProtoFuncPush): + * runtime/FunctionPrototype.cpp: + (JSC::functionProtoFuncApply): + * runtime/JSArray.h: + (JSC::isJSArray): + * runtime/JSByteArray.h: + (JSC::asByteArray): + (JSC::isJSByteArray): + * runtime/JSCell.h: + * runtime/JSFunction.h: + * runtime/JSGlobalData.cpp: + (JSC::VPtrSet::VPtrSet): + (JSC::JSGlobalData::JSGlobalData): + (JSC::JSGlobalData::create): + (JSC::JSGlobalData::sharedInstance): + * runtime/JSGlobalData.h: + * runtime/JSString.h: + (JSC::isJSString): + * runtime/Operations.h: + (JSC::jsLess): + (JSC::jsLessEq): + * wrec/WREC.cpp: + (JSC::WREC::Generator::compileRegExp): + +2009-02-23 Csaba Osztrogonac <oszi@inf.u-szeged.hu> + + Reviewed by Oliver Hunt. + + Bug 23787: Allow JIT to generate SSE2 code if using GCC + <https://bugs.webkit.org/show_bug.cgi?id=23787> + + GCC version of the cpuid check. + + * jit/JITArithmetic.cpp: + (JSC::isSSE2Present): previous assembly code fixed. + +2009-02-23 David Levin <levin@chromium.org> + + Reviewed by Alexey Proskuryakov. + + Bug 24047: Need to simplify nested if's in WorkerRunLoop::runInMode + <https://bugs.webkit.org/show_bug.cgi?id=24047> + + * wtf/MessageQueue.h: + (WTF::MessageQueue::infiniteTime): + Allows for one to call waitForMessageFilteredWithTimeout and wait forever. + + (WTF::MessageQueue::alwaysTruePredicate): + (WTF::MessageQueue::waitForMessage): + Made waitForMessage call waitForMessageFilteredWithTimeout, so that there is less + duplicate code. + + (WTF::MessageQueue::waitForMessageFilteredWithTimeout): + + * wtf/ThreadingQt.cpp: + (WTF::ThreadCondition::timedWait): + * wtf/ThreadingWin.cpp: + (WTF::ThreadCondition::timedWait): + Made these two implementations consistent with the pthread and gtk implementations. + Currently, the time calculations would overflow when passed large values. + +2009-02-23 Jeremy Moskovich <jeremy@chromium.org> + + Reviewed by Adam Roben. + + https://bugs.webkit.org/show_bug.cgi?id=24096 + PLATFORM(MAC)->PLATFORM(CF) since we want to use the CF functions in Chrome on OS X. + + * wtf/CurrentTime.cpp: + +2009-02-22 Geoffrey Garen <ggaren@apple.com> + + Build fix? + + * GNUmakefile.am: + +2009-02-22 Geoffrey Garen <ggaren@apple.com> + + Build fix. + + * GNUmakefile.am: + +2009-02-22 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Sam Weinig. + + Next step in splitting JIT functionality out of the Interpreter class: + Created a JITStubs class and renamed Interpreter::cti_* to JITStubs::cti_*. + + Also, moved timeout checking into its own class, located in JSGlobalData, + so both the Interpreter and the JIT could have access to it. + + * JavaScriptCore.exp: + * JavaScriptCore.pri: + * JavaScriptCore.scons: + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + * JavaScriptCoreSources.bkl: + * interpreter/CallFrame.h: + * interpreter/Interpreter.cpp: + (JSC::Interpreter::Interpreter): + (JSC::Interpreter::privateExecute): + * interpreter/Interpreter.h: + * interpreter/Register.h: + * jit/JIT.cpp: + (JSC::): + (JSC::JIT::emitTimeoutCheck): + (JSC::JIT::privateCompileMainPass): + (JSC::JIT::privateCompileSlowCases): + (JSC::JIT::privateCompile): + (JSC::JIT::privateCompileCTIMachineTrampolines): + * jit/JIT.h: + * jit/JITArithmetic.cpp: + (JSC::JIT::compileFastArithSlow_op_lshift): + (JSC::JIT::compileFastArithSlow_op_rshift): + (JSC::JIT::compileFastArithSlow_op_bitand): + (JSC::JIT::compileFastArithSlow_op_mod): + (JSC::JIT::compileFastArith_op_mod): + (JSC::JIT::compileFastArithSlow_op_post_inc): + (JSC::JIT::compileFastArithSlow_op_post_dec): + (JSC::JIT::compileFastArithSlow_op_pre_inc): + (JSC::JIT::compileFastArithSlow_op_pre_dec): + (JSC::JIT::compileFastArith_op_add): + (JSC::JIT::compileFastArith_op_mul): + (JSC::JIT::compileFastArith_op_sub): + (JSC::JIT::compileBinaryArithOpSlowCase): + (JSC::JIT::compileFastArithSlow_op_add): + (JSC::JIT::compileFastArithSlow_op_mul): + * jit/JITCall.cpp: + (JSC::JIT::compileOpCall): + (JSC::JIT::compileOpCallSlowCase): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::compileGetByIdHotPath): + (JSC::JIT::compilePutByIdHotPath): + (JSC::JIT::compileGetByIdSlowCase): + (JSC::JIT::compilePutByIdSlowCase): + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::patchGetByIdSelf): + (JSC::JIT::patchPutByIdReplace): + (JSC::JIT::privateCompilePatchGetArrayLength): + (JSC::JIT::privateCompileGetByIdSelf): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdChain): + (JSC::JIT::privateCompilePutByIdReplace): + * jit/JITStubs.cpp: + (JSC::JITStubs::tryCachePutByID): + (JSC::JITStubs::tryCacheGetByID): + (JSC::JITStubs::cti_op_convert_this): + (JSC::JITStubs::cti_op_end): + (JSC::JITStubs::cti_op_add): + (JSC::JITStubs::cti_op_pre_inc): + (JSC::JITStubs::cti_timeout_check): + (JSC::JITStubs::cti_register_file_check): + (JSC::JITStubs::cti_op_loop_if_less): + (JSC::JITStubs::cti_op_loop_if_lesseq): + (JSC::JITStubs::cti_op_new_object): + (JSC::JITStubs::cti_op_put_by_id_generic): + (JSC::JITStubs::cti_op_get_by_id_generic): + (JSC::JITStubs::cti_op_put_by_id): + (JSC::JITStubs::cti_op_put_by_id_second): + (JSC::JITStubs::cti_op_put_by_id_fail): + (JSC::JITStubs::cti_op_get_by_id): + (JSC::JITStubs::cti_op_get_by_id_second): + (JSC::JITStubs::cti_op_get_by_id_self_fail): + (JSC::JITStubs::cti_op_get_by_id_proto_list): + (JSC::JITStubs::cti_op_get_by_id_proto_list_full): + (JSC::JITStubs::cti_op_get_by_id_proto_fail): + (JSC::JITStubs::cti_op_get_by_id_array_fail): + (JSC::JITStubs::cti_op_get_by_id_string_fail): + (JSC::JITStubs::cti_op_instanceof): + (JSC::JITStubs::cti_op_del_by_id): + (JSC::JITStubs::cti_op_mul): + (JSC::JITStubs::cti_op_new_func): + (JSC::JITStubs::cti_op_call_JSFunction): + (JSC::JITStubs::cti_op_call_arityCheck): + (JSC::JITStubs::cti_vm_dontLazyLinkCall): + (JSC::JITStubs::cti_vm_lazyLinkCall): + (JSC::JITStubs::cti_op_push_activation): + (JSC::JITStubs::cti_op_call_NotJSFunction): + (JSC::JITStubs::cti_op_create_arguments): + (JSC::JITStubs::cti_op_create_arguments_no_params): + (JSC::JITStubs::cti_op_tear_off_activation): + (JSC::JITStubs::cti_op_tear_off_arguments): + (JSC::JITStubs::cti_op_profile_will_call): + (JSC::JITStubs::cti_op_profile_did_call): + (JSC::JITStubs::cti_op_ret_scopeChain): + (JSC::JITStubs::cti_op_new_array): + (JSC::JITStubs::cti_op_resolve): + (JSC::JITStubs::cti_op_construct_JSConstruct): + (JSC::JITStubs::cti_op_construct_NotJSConstruct): + (JSC::JITStubs::cti_op_get_by_val): + (JSC::JITStubs::cti_op_get_by_val_byte_array): + (JSC::JITStubs::cti_op_resolve_func): + (JSC::JITStubs::cti_op_sub): + (JSC::JITStubs::cti_op_put_by_val): + (JSC::JITStubs::cti_op_put_by_val_array): + (JSC::JITStubs::cti_op_put_by_val_byte_array): + (JSC::JITStubs::cti_op_lesseq): + (JSC::JITStubs::cti_op_loop_if_true): + (JSC::JITStubs::cti_op_negate): + (JSC::JITStubs::cti_op_resolve_base): + (JSC::JITStubs::cti_op_resolve_skip): + (JSC::JITStubs::cti_op_resolve_global): + (JSC::JITStubs::cti_op_div): + (JSC::JITStubs::cti_op_pre_dec): + (JSC::JITStubs::cti_op_jless): + (JSC::JITStubs::cti_op_not): + (JSC::JITStubs::cti_op_jtrue): + (JSC::JITStubs::cti_op_post_inc): + (JSC::JITStubs::cti_op_eq): + (JSC::JITStubs::cti_op_lshift): + (JSC::JITStubs::cti_op_bitand): + (JSC::JITStubs::cti_op_rshift): + (JSC::JITStubs::cti_op_bitnot): + (JSC::JITStubs::cti_op_resolve_with_base): + (JSC::JITStubs::cti_op_new_func_exp): + (JSC::JITStubs::cti_op_mod): + (JSC::JITStubs::cti_op_less): + (JSC::JITStubs::cti_op_neq): + (JSC::JITStubs::cti_op_post_dec): + (JSC::JITStubs::cti_op_urshift): + (JSC::JITStubs::cti_op_bitxor): + (JSC::JITStubs::cti_op_new_regexp): + (JSC::JITStubs::cti_op_bitor): + (JSC::JITStubs::cti_op_call_eval): + (JSC::JITStubs::cti_op_throw): + (JSC::JITStubs::cti_op_get_pnames): + (JSC::JITStubs::cti_op_next_pname): + (JSC::JITStubs::cti_op_push_scope): + (JSC::JITStubs::cti_op_pop_scope): + (JSC::JITStubs::cti_op_typeof): + (JSC::JITStubs::cti_op_is_undefined): + (JSC::JITStubs::cti_op_is_boolean): + (JSC::JITStubs::cti_op_is_number): + (JSC::JITStubs::cti_op_is_string): + (JSC::JITStubs::cti_op_is_object): + (JSC::JITStubs::cti_op_is_function): + (JSC::JITStubs::cti_op_stricteq): + (JSC::JITStubs::cti_op_nstricteq): + (JSC::JITStubs::cti_op_to_jsnumber): + (JSC::JITStubs::cti_op_in): + (JSC::JITStubs::cti_op_push_new_scope): + (JSC::JITStubs::cti_op_jmp_scopes): + (JSC::JITStubs::cti_op_put_by_index): + (JSC::JITStubs::cti_op_switch_imm): + (JSC::JITStubs::cti_op_switch_char): + (JSC::JITStubs::cti_op_switch_string): + (JSC::JITStubs::cti_op_del_by_val): + (JSC::JITStubs::cti_op_put_getter): + (JSC::JITStubs::cti_op_put_setter): + (JSC::JITStubs::cti_op_new_error): + (JSC::JITStubs::cti_op_debug): + (JSC::JITStubs::cti_vm_throw): + * jit/JITStubs.h: + (JSC::): + * runtime/JSFunction.h: + * runtime/JSGlobalData.cpp: + (JSC::JSGlobalData::JSGlobalData): + * runtime/JSGlobalData.h: + * runtime/JSGlobalObject.cpp: + * runtime/JSGlobalObject.h: + * runtime/TimeoutChecker.cpp: Copied from interpreter/Interpreter.cpp. + (JSC::TimeoutChecker::TimeoutChecker): + (JSC::TimeoutChecker::reset): + (JSC::TimeoutChecker::didTimeOut): + * runtime/TimeoutChecker.h: Copied from interpreter/Interpreter.h. + (JSC::TimeoutChecker::setTimeoutInterval): + (JSC::TimeoutChecker::ticksUntilNextCheck): + (JSC::TimeoutChecker::start): + (JSC::TimeoutChecker::stop): + +2009-02-20 Gustavo Noronha Silva <gns@gnome.org> + + Unreviewed build fix after r41100. + + * GNUmakefile.am: + +2009-02-20 Oliver Hunt <oliver@apple.com> + + Reviewed by Mark Rowe. + + <rdar://problem/6606660> 2==null returns true in 64bit jit + + Code for op_eq_null and op_neq_null was incorrectly performing + a 32bit compare, which truncated the type tag from an integer + immediate, leading to incorrect behaviour. + + * assembler/MacroAssembler.h: + (JSC::MacroAssembler::setPtr): + * assembler/MacroAssemblerX86_64.h: + (JSC::MacroAssemblerX86_64::setPtr): + * jit/JIT.cpp: + (JSC::JIT::privateCompileMainPass): + +2009-02-19 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Gavin Barraclough. + + First step in splitting JIT functionality out of the Interpreter class: + Created JITStubs.h/.cpp, and moved Interpreter::cti_* into JITStubs.cpp. + + Functions that the Interpreter and JITStubs share moved to Operations.h/.cpp. + + * GNUmakefile.am: + * JavaScriptCore.pri: + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + * interpreter/Interpreter.cpp: + (JSC::Interpreter::resolveBase): + (JSC::Interpreter::checkTimeout): + (JSC::Interpreter::privateExecute): + * interpreter/Interpreter.h: + * jit/JITStubs.cpp: Copied from interpreter/Interpreter.cpp. + (JSC::Interpreter::cti_op_resolve_base): + * jit/JITStubs.h: Copied from interpreter/Interpreter.h. + * runtime/Operations.cpp: + (JSC::jsAddSlowCase): + (JSC::jsTypeStringForValue): + (JSC::jsIsObjectType): + (JSC::jsIsFunctionType): + * runtime/Operations.h: + (JSC::jsLess): + (JSC::jsLessEq): + (JSC::jsAdd): + (JSC::cachePrototypeChain): + (JSC::countPrototypeChainEntriesAndCheckForProxies): + (JSC::resolveBase): + +2009-02-19 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Oliver Hunt. + + Fix for x86-64. Where the JavaScriptCore text segment lies outside + a 2gb range of the heap containing JIT generated code, callbacks + from JIT code to the stub functions in Interpreter will be incorrectly + linked. + + No performance impact on Sunspider, 1% regression on v8-tests, + due to a 3% regression on richards. + + * assembler/AbstractMacroAssembler.h: + (JSC::AbstractMacroAssembler::Call::Call): + (JSC::AbstractMacroAssembler::Jump::link): + (JSC::AbstractMacroAssembler::Jump::linkTo): + (JSC::AbstractMacroAssembler::CodeLocationJump::relink): + (JSC::AbstractMacroAssembler::CodeLocationCall::relink): + (JSC::AbstractMacroAssembler::ProcessorReturnAddress::relinkCallerToFunction): + (JSC::AbstractMacroAssembler::PatchBuffer::link): + (JSC::AbstractMacroAssembler::PatchBuffer::linkTailRecursive): + (JSC::AbstractMacroAssembler::differenceBetween): + * assembler/MacroAssembler.h: + (JSC::MacroAssembler::tailRecursiveCall): + (JSC::MacroAssembler::makeTailRecursiveCall): + * assembler/MacroAssemblerX86.h: + (JSC::MacroAssemblerX86::call): + * assembler/MacroAssemblerX86Common.h: + * assembler/MacroAssemblerX86_64.h: + (JSC::MacroAssemblerX86_64::call): + (JSC::MacroAssemblerX86_64::moveWithPatch): + (JSC::MacroAssemblerX86_64::branchPtrWithPatch): + (JSC::MacroAssemblerX86_64::storePtrWithPatch): + * assembler/X86Assembler.h: + (JSC::X86Assembler::jmp_r): + (JSC::X86Assembler::linkJump): + (JSC::X86Assembler::patchJump): + (JSC::X86Assembler::patchCall): + (JSC::X86Assembler::linkCall): + (JSC::X86Assembler::patchAddress): + * interpreter/Interpreter.cpp: + (JSC::Interpreter::tryCTICachePutByID): + * jit/JIT.cpp: + (JSC::JIT::privateCompile): + (JSC::JIT::privateCompileCTIMachineTrampolines): + * jit/JIT.h: + * jit/JITArithmetic.cpp: + (JSC::JIT::putDoubleResultToJSNumberCellOrJSImmediate): + (JSC::JIT::compileBinaryArithOp): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::privateCompileGetByIdSelf): + (JSC::JIT::privateCompilePutByIdReplace): + +2009-02-18 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Oliver Hunt. + + Simplified .call and .apply in preparation for optimizing them. Also, + a little cleanup. + + * runtime/FunctionPrototype.cpp: + (JSC::functionProtoFuncApply): + (JSC::functionProtoFuncCall): No need to do any specific conversion on + 'this' -- op_convert_this will do it if necessary. + + * runtime/JSImmediate.cpp: + (JSC::JSImmediate::toThisObject): Slightly relaxed the rules on + toThisObject to allow for 'undefined', which can be passed through + .call and .apply. + +2009-02-19 David Levin <levin@chromium.org> + + Reviewed by Alexey Proskuryakov. + + Bug 23976: MessageQueue needs a way to wait for a message that satisfies an arbitrary criteria. + <https://bugs.webkit.org/show_bug.cgi?id=23976> + + * wtf/Deque.h: + (WTF::Deque<T>::findIf): + * wtf/MessageQueue.h: + (WTF::MessageQueue<T>::waitForMessageFiltered): + +2009-02-18 David Levin <levin@chromium.org> + + Reviewed by Alexey Proskuryakov. + + Bug 23974: Deque::Remove would be a useful method. + <https://bugs.webkit.org/show_bug.cgi?id=23974> + + Add Deque::remove and DequeIteratorBase<T>::operator=. + + Why was operator= added? Every concrete iterator (DequeIterator..DequeConstReverseIterator) + was calling DequeIteratorBase::assign(), which called Base::operator=(). Base::operator=() + was not implemented. This went unnoticed because the iterator copy code has been unused. + + * wtf/Deque.h: + (WTF::Deque<T>::remove): + (WTF::DequeIteratorBase<T>::removeFromIteratorsList): + (WTF::DequeIteratorBase<T>::operator=): + (WTF::DequeIteratorBase<T>::~DequeIteratorBase): + +2009-02-18 Gustavo Noronha Silva <gns@gnome.org> + + Reviewed by Holger Freyther. + + Fix symbols.filter location, and add other missing files to the + autotools build, so that make dist works. + + * GNUmakefile.am: + +2009-02-17 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Sam Weinig. + + Fixed failure in js1_5/Regress/regress-168347.js, as seen on the Oliver + bot. + + Technically, both behaviors are OK, but we might as well keep this test + passing. + + * runtime/FunctionPrototype.cpp: + (JSC::insertSemicolonIfNeeded): No need to add a trailing semicolon + after a trailing '}', since '}' ends a block, indicating the end of a + statement. + +2009-02-17 Geoffrey Garen <ggaren@apple.com> + + Build fix. + + * runtime/FunctionPrototype.cpp: + +2009-02-17 Oliver Hunt <oliver@apple.com> + + Reviewed by Geoff Garen. + + Add assertion to guard against oversized pc relative calls. + + * assembler/X86Assembler.h: + (JSC::X86Assembler::link): + +2009-02-17 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Sam Weinig. + + Fixed <rdar://problem/6595040> REGRESSION: http://www.amnestyusa.org/ + fails to load. + + amnestyusa.org uses the Optimist JavaScript library, which adds event + listeners by concatenating string-ified functions. This is only sure to + be syntactically valid if the string-ified functions end in semicolons. + + * parser/Lexer.cpp: + (JSC::Lexer::isWhiteSpace): + * parser/Lexer.h: + (JSC::Lexer::isWhiteSpace): + (JSC::Lexer::isLineTerminator): Added some helper functions for examining + whitespace. + + * runtime/FunctionPrototype.cpp: + (JSC::appendSemicolonIfNeeded): + (JSC::functionProtoFuncToString): When string-ifying a function, insert + a semicolon in the last non-whitespace position, if one doesn't already exist. + +2009-02-16 Oliver Hunt <oliver@apple.com> + + Reviewed by NOBODY (Build fix). + + Roll out r41022 as it breaks qt and gtk builds + + * jit/JITArithmetic.cpp: + (JSC::isSSE2Present): + +2009-02-16 Sam Weinig <sam@webkit.org> Reviewed by Geoffrey Garen. - Speed up String.prototype.fontsize. + Fix for <rdar://problem/6468156> + REGRESSION (r36779): Adding link, images, flash in TinyMCE blocks entire page (21382) - * runtime/StringPrototype.cpp: - (JSC::stringProtoFuncFontsize): Specialize for defined/commonly used values. + No performance regression. + + * runtime/Arguments.cpp: + (JSC::Arguments::fillArgList): Add codepath for when the "length" property has been + overridden. + +2009-02-16 Mark Rowe <mrowe@apple.com> + + Build fix. + + * wtf/FastMalloc.cpp: + (WTF::TCMallocStats::): + (WTF::TCMallocStats::FastMallocZone::FastMallocZone): + +2009-02-16 Csaba Osztrogonac <oszi@inf.u-szeged.hu> + + Reviewed by Oliver Hunt. + + Bug 23787: Allow JIT to generate SSE2 code if using GCC + <https://bugs.webkit.org/show_bug.cgi?id=23787> + + GCC version of the cpuid check. + + * jit/JITArithmetic.cpp: + (JSC::isSSE2Present): GCC assembly code added. + 6.6% progression on x86 Linux with JIT and WREC on SunSpider if using SSE2 capable machine. -2009-02-13 Mark Rowe <mrowe@apple.com> +2009-02-13 Adam Treat <adam.treat@torchmobile.com> - Merge r41000. + Reviewed by George Staikos. - 2009-02-13 Gavin Barraclough <barraclough@apple.com> + https://bugs.webkit.org/show_bug.cgi?id=23960 + Crash Fix. + + Don't depend on 'initializeThreading()' to come before a call to 'isMainThread()' + as QtWebKit only calls 'initializeThreading()' during QWebPage construction. + + A client app may well make a call to QWebSettings::iconForUrl() for instance + before creating a QWebPage and that call to QWebSettings triggers an + ASSERT(isMainThread()) deep within WebCore. + + * wtf/ThreadingQt.cpp: + (WTF::isMainThread): + +2009-02-13 Gavin Barraclough <barraclough@apple.com> Reviewed by Darin Adler. @@ -62,11 +1673,7 @@ * parser/ResultType.h: (JSC::OperandTypes::OperandTypes): -2009-02-13 Mark Rowe <mrowe@apple.com> - - Merge r40995. - - 2009-02-13 Geoffrey Garen <ggaren@apple.com> +2009-02-13 Geoffrey Garen <ggaren@apple.com> Build fix for non_JIT platforms. @@ -74,17 +1681,13 @@ (JSC::CodeBlock::setIsNumericCompareFunction): (JSC::CodeBlock::isNumericCompareFunction): -2009-02-13 Mark Rowe <mrowe@apple.com> - - Merge r40993. - - 2009-02-13 Geoffrey Garen <ggaren@apple.com> +2009-02-13 Geoffrey Garen <ggaren@apple.com> Reviewed by Darin Adler. - + Fixed <rdar://problem/6584057> Optimize sort by JS numeric comparison function not to run the comparison function - + * bytecode/CodeBlock.cpp: (JSC::CodeBlock::CodeBlock): * bytecode/CodeBlock.h: @@ -127,11 +1730,48 @@ * runtime/JSGlobalData.h: Added helper data for computing the isNumericCompareFunction bit. -2009-02-13 Mark Rowe <mrowe@apple.com> +2009-02-13 Darin Adler <darin@apple.com> + + * Configurations/JavaScriptCore.xcconfig: Undo accidental commit of this file. + +2009-02-12 Darin Adler <darin@apple.com> + + Reviewed by Oliver Hunt and Alexey Proskuryakov. + + Speed up a couple string functions. + + * runtime/StringPrototype.cpp: + (JSC::stringProtoFuncIndexOf): Added a fast path for cases where the second + argument is either missing or an integer. + (JSC::stringProtoFuncBig): Use jsNontrivialString since the string is guaranteed + to be 2 or more characters long. + (JSC::stringProtoFuncSmall): Ditto. + (JSC::stringProtoFuncBlink): Ditto. + (JSC::stringProtoFuncBold): Ditto. + (JSC::stringProtoFuncItalics): Ditto. + (JSC::stringProtoFuncStrike): Ditto. + (JSC::stringProtoFuncSub): Ditto. + (JSC::stringProtoFuncSup): Ditto. + (JSC::stringProtoFuncFontcolor): Ditto. + (JSC::stringProtoFuncFontsize): Make the fast path Sam recently added even faster + by avoiding all but the minimum memory allocation. + (JSC::stringProtoFuncAnchor): Use jsNontrivialString. + (JSC::stringProtoFuncLink): Added a fast path. + + * runtime/UString.cpp: + (JSC::UString::find): Added a fast path for single-character search strings. + +2009-02-13 David Levin <levin@chromium.org> - Merge r40968. + Reviewed by Darin Adler. + + Bug 23926: Race condition in callOnMainThreadAndWait + <https://bugs.webkit.org/show_bug.cgi?id=23926> - 2009-02-13 Oliver Hunt <oliver@apple.com> + * wtf/MainThread.cpp: + Removed callOnMainThreadAndWait since it isn't used. + +2009-02-13 Oliver Hunt <oliver@apple.com> Reviewed by Jon Honeycutt. @@ -157,50 +1797,135 @@ * wtf/RandomNumberSeed.h: (WTF::initializeWeakRandomNumberGenerator): -2009-02-13 Mark Rowe <mrowe@apple.com> - - Merge r40967. - - 2009-02-12 Mark Rowe <mrowe@apple.com> +2009-02-12 Mark Rowe <mrowe@apple.com> Fix the build for other platforms. * wtf/RandomNumber.cpp: (WTF::randomNumber): -2009-02-13 Mark Rowe <mrowe@apple.com> +2009-02-12 Gavin Barraclough <barraclough@apple.com> - Merge r40937. + Reviewed by Sam Weinig. - 2009-02-12 Geoffrey Garen <ggaren@apple.com> + Remove (/reduce) use of hard-wired register names from the JIT. + Currently there is no abstraction of registers used in the JIT, + which has a number of negative consequences. Hard-wiring x86 + register names makes the JIT less portable to other platforms, + and prevents us from performing dynamic register allocation to + attempt to maintain more temporary values in machine registers. + (The latter will be more important on x86-64, where we have more + registers to make use of). - Reviewed by Sam Weinig. + Also, remove MacroAssembler::mod32. This was not providing a + useful abstraction, and was not in keeping with the rest of the + MacroAssembler interface, in having specific register requirements. + + * assembler/MacroAssemblerX86Common.h: + * jit/JIT.cpp: + (JSC::JIT::compileOpStrictEq): + (JSC::JIT::emitSlowScriptCheck): + (JSC::JIT::privateCompileMainPass): + (JSC::JIT::privateCompileSlowCases): + (JSC::JIT::privateCompile): + (JSC::JIT::privateCompileCTIMachineTrampolines): + * jit/JIT.h: + * jit/JITArithmetic.cpp: + (JSC::JIT::compileFastArith_op_lshift): + (JSC::JIT::compileFastArithSlow_op_lshift): + (JSC::JIT::compileFastArith_op_rshift): + (JSC::JIT::compileFastArithSlow_op_rshift): + (JSC::JIT::compileFastArith_op_bitand): + (JSC::JIT::compileFastArithSlow_op_bitand): + (JSC::JIT::compileFastArith_op_mod): + (JSC::JIT::compileFastArithSlow_op_mod): + (JSC::JIT::compileFastArith_op_post_inc): + (JSC::JIT::compileFastArithSlow_op_post_inc): + (JSC::JIT::compileFastArith_op_post_dec): + (JSC::JIT::compileFastArithSlow_op_post_dec): + (JSC::JIT::compileFastArith_op_pre_inc): + (JSC::JIT::compileFastArithSlow_op_pre_inc): + (JSC::JIT::compileFastArith_op_pre_dec): + (JSC::JIT::compileFastArithSlow_op_pre_dec): + (JSC::JIT::compileFastArith_op_add): + (JSC::JIT::compileFastArith_op_mul): + (JSC::JIT::compileFastArith_op_sub): + (JSC::JIT::compileBinaryArithOp): + * jit/JITCall.cpp: + (JSC::JIT::compileOpCallInitializeCallFrame): + (JSC::JIT::compileOpCallSetupArgs): + (JSC::JIT::compileOpCallEvalSetupArgs): + (JSC::JIT::compileOpConstructSetupArgs): + (JSC::JIT::compileOpCall): + (JSC::JIT::compileOpCallSlowCase): + * jit/JITInlineMethods.h: + (JSC::JIT::emitGetVirtualRegister): + (JSC::JIT::emitPutVirtualRegister): + (JSC::JIT::emitNakedCall): + (JSC::JIT::restoreArgumentReference): + (JSC::JIT::restoreArgumentReferenceForTrampoline): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::compileGetByIdHotPath): + (JSC::JIT::compilePutByIdHotPath): + (JSC::JIT::compileGetByIdSlowCase): + (JSC::JIT::compilePutByIdSlowCase): + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::privateCompilePatchGetArrayLength): + (JSC::JIT::privateCompileGetByIdSelf): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdSelfList): + (JSC::JIT::privateCompileGetByIdProtoList): + (JSC::JIT::privateCompileGetByIdChainList): + (JSC::JIT::privateCompileGetByIdChain): + (JSC::JIT::privateCompilePutByIdReplace): + +2009-02-12 Horia Olaru <olaru@adobe.com> + + Reviewed by Oliver Hunt. + + https://bugs.webkit.org/show_bug.cgi?id=23400 + + When throwing an exception within an eval argument string, the dst parameter was + modified in the functions below and the return value for eval was altered. Changed + the emitNode call in JSC::ThrowNode::emitBytecode to use a temporary register + to store its results instead of dst. The JSC::FunctionCallResolveNode::emitBytecode + would load the function within the dst registry, also altering the result returned + by eval. Replaced it with another temporary. + * parser/Nodes.cpp: + (JSC::FunctionCallResolveNode::emitBytecode): + (JSC::ThrowNode::emitBytecode): + +2009-02-12 Sam Weinig <sam@webkit.org> + + Reviewed by Geoffrey Garen. + + Speed up String.prototype.fontsize. + + * runtime/StringPrototype.cpp: + (JSC::stringProtoFuncFontsize): Specialize for defined/commonly used values. + +2009-02-12 Geoffrey Garen <ggaren@apple.com> + + Reviewed by Sam Weinig. + Correctness fix. * wtf/RandomNumber.cpp: (WTF::randomNumber): Divide by the maximum representable value, which is different on each platform now, to get values between 0 and 1. -2009-02-13 Mark Rowe <mrowe@apple.com> - - Merge r40935. - - 2009-02-12 Geoffrey Garen <ggaren@apple.com> +2009-02-12 Geoffrey Garen <ggaren@apple.com> Build fix. * wtf/RandomNumber.cpp: (WTF::randomNumber): -2009-02-13 Mark Rowe <mrowe@apple.com> - - Merge r40932. - - 2009-02-12 Geoffrey Garen <ggaren@apple.com> +2009-02-12 Geoffrey Garen <ggaren@apple.com> Reviewed by Sam Weinig. - + Fixed <rdar://problem/6582048>. * wtf/RandomNumber.cpp: @@ -209,11 +1934,654 @@ of randomness over and above cryptographically secure randomness is not clear, and it caused some performance problems. -2009-02-03 Mark Rowe <mrowe@apple.com> +2009-02-12 Adam Roben <aroben@apple.com> + + Fix lots of Perl warnings when building JavaScriptCoreGenerated on + Windows + + Reviewed by John Sullivan. - Merge r40522. + * JavaScriptCore.vcproj/JavaScriptCore/build-generated-files.sh: + Create the docs/ directory so that we can write bytecode.html into it. + This matches what JavaScriptCore.xcodeproj does. + +2009-02-12 Simon Hausmann <simon.hausmann@nokia.com> + + Rubber-stamped by Lars. + + Re-enable the JIT in the Qt build with -fno-stack-protector on Linux. + + * JavaScriptCore.pri: - 2009-02-02 Oliver Hunt <oliver@apple.com> +2009-02-11 Dmitry Titov <dimich@chromium.org> + + Reviewed by Alexey Proskuryakov. + + https://bugs.webkit.org/show_bug.cgi?id=23705 + Fix the UI freeze caused by Worker generating a flood of messages. + Measure time we spend in executing posted work items. If too much time is spent + without returning to the run loop, exit and reschedule. + + * wtf/MainThread.h: + Added initializeMainThreadPlatform() to initialize low-level mechanism for posting + work items from thread to thread. This removes #ifdefs for WIN and CHROMIUM from platform-independent code. + + * wtf/MainThread.cpp: + (WTF::initializeMainThread): + (WTF::dispatchFunctionsFromMainThread): + Instead of dispatching all work items in the queue, dispatch them one by one + and measure elapsed time. After a threshold, reschedule and quit. + + (WTF::callOnMainThread): + (WTF::callOnMainThreadAndWait): + Only schedule dispatch if the queue was empty - to avoid many posted messages in the run loop queue. + + * wtf/mac/MainThreadMac.mm: + (WTF::scheduleDispatchFunctionsOnMainThread): + Use static instance of the mainThreadCaller instead of allocating and releasing it each time. + (WTF::initializeMainThreadPlatform): + * wtf/gtk/MainThreadChromium.cpp: + (WTF::initializeMainThreadPlatform): + * wtf/gtk/MainThreadGtk.cpp: + (WTF::initializeMainThreadPlatform): + * wtf/qt/MainThreadQt.cpp: + (WTF::initializeMainThreadPlatform): + * wtf/win/MainThreadWin.cpp: + (WTF::initializeMainThreadPlatform): + * wtf/wx/MainThreadWx.cpp: + (WTF::initializeMainThreadPlatform): + +2009-02-11 Sam Weinig <sam@webkit.org> + + Reviewed by Gavin Barraclough. + + Style cleanup. + + * assembler/AbstractMacroAssembler.h: + (JSC::AbstractMacroAssembler::CodeLocationCommon::CodeLocationCommon): + (JSC::AbstractMacroAssembler::CodeLocationCommon::operator bool): + (JSC::AbstractMacroAssembler::CodeLocationCommon::reset): + (JSC::AbstractMacroAssembler::CodeLocationLabel::addressForSwitch): + (JSC::AbstractMacroAssembler::CodeLocationLabel::addressForExceptionHandler): + (JSC::AbstractMacroAssembler::CodeLocationLabel::addressForJSR): + (JSC::AbstractMacroAssembler::CodeLocationLabel::getJumpDestination): + (JSC::AbstractMacroAssembler::CodeLocationJump::relink): + (JSC::AbstractMacroAssembler::CodeLocationJump::CodeLocationJump): + (JSC::AbstractMacroAssembler::CodeLocationCall::relink): + (JSC::AbstractMacroAssembler::CodeLocationCall::calleeReturnAddressValue): + (JSC::AbstractMacroAssembler::CodeLocationCall::CodeLocationCall): + (JSC::AbstractMacroAssembler::CodeLocationDataLabel32::repatch): + (JSC::AbstractMacroAssembler::CodeLocationDataLabel32::CodeLocationDataLabel32): + (JSC::AbstractMacroAssembler::CodeLocationDataLabelPtr::repatch): + (JSC::AbstractMacroAssembler::CodeLocationDataLabelPtr::CodeLocationDataLabelPtr): + (JSC::AbstractMacroAssembler::ProcessorReturnAddress::ProcessorReturnAddress): + (JSC::AbstractMacroAssembler::ProcessorReturnAddress::relinkCallerToFunction): + (JSC::AbstractMacroAssembler::ProcessorReturnAddress::operator void*): + (JSC::AbstractMacroAssembler::PatchBuffer::link): + (JSC::::CodeLocationCommon::labelAtOffset): + (JSC::::CodeLocationCommon::jumpAtOffset): + (JSC::::CodeLocationCommon::callAtOffset): + (JSC::::CodeLocationCommon::dataLabelPtrAtOffset): + (JSC::::CodeLocationCommon::dataLabel32AtOffset): + +2009-02-11 Sam Weinig <sam@webkit.org> + + Reviewed by Gavin Barraclough. + + * assembler/AbstractMacroAssembler.h: Fix comments. + +2009-02-11 Alexey Proskuryakov <ap@webkit.org> + + Trying to fix wx build. + + * bytecode/JumpTable.h: Include "MacroAssembler.h", not <MacroAssembler.h>. + * jscore.bkl: Added assembler directory to search paths. + +2009-02-10 Gavin Barraclough <barraclough@apple.com> + + Build + fix. + (Narrow + changelog + for + dhyatt). + + * bytecode/Instruction.h: + (JSC::PolymorphicAccessStructureList::PolymorphicStubInfo::set): + (JSC::PolymorphicAccessStructureList::PolymorphicAccessStructureList): + +2009-02-10 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Oliver Hunt. + + Reduce use of void* / reinterpret_cast in JIT repatching code, + add strong types for Calls and for the various types of pointers + we retain into the JIT generated instruction stream. + + No performance impact. + + * assembler/AbstractMacroAssembler.h: + (JSC::AbstractMacroAssembler::ImmPtr::ImmPtr): + (JSC::AbstractMacroAssembler::ImmPtr::asIntptr): + (JSC::AbstractMacroAssembler::Imm32::Imm32): + (JSC::AbstractMacroAssembler::Label::Label): + (JSC::AbstractMacroAssembler::DataLabelPtr::DataLabelPtr): + (JSC::AbstractMacroAssembler::Call::Call): + (JSC::AbstractMacroAssembler::Call::link): + (JSC::AbstractMacroAssembler::Call::linkTo): + (JSC::AbstractMacroAssembler::Jump::Jump): + (JSC::AbstractMacroAssembler::Jump::linkTo): + (JSC::AbstractMacroAssembler::CodeLocationCommon::CodeLocationCommon): + (JSC::AbstractMacroAssembler::CodeLocationCommon::operator bool): + (JSC::AbstractMacroAssembler::CodeLocationCommon::reset): + (JSC::AbstractMacroAssembler::CodeLocationLabel::CodeLocationLabel): + (JSC::AbstractMacroAssembler::CodeLocationLabel::addressForSwitch): + (JSC::AbstractMacroAssembler::CodeLocationLabel::addressForExceptionHandler): + (JSC::AbstractMacroAssembler::CodeLocationLabel::addressForJSR): + (JSC::AbstractMacroAssembler::CodeLocationLabel::getJumpDestination): + (JSC::AbstractMacroAssembler::CodeLocationJump::CodeLocationJump): + (JSC::AbstractMacroAssembler::CodeLocationJump::relink): + (JSC::AbstractMacroAssembler::CodeLocationCall::CodeLocationCall): + (JSC::AbstractMacroAssembler::CodeLocationCall::relink): + (JSC::AbstractMacroAssembler::CodeLocationCall::calleeReturnAddressValue): + (JSC::AbstractMacroAssembler::CodeLocationDataLabel32::CodeLocationDataLabel32): + (JSC::AbstractMacroAssembler::CodeLocationDataLabel32::repatch): + (JSC::AbstractMacroAssembler::CodeLocationDataLabelPtr::CodeLocationDataLabelPtr): + (JSC::AbstractMacroAssembler::CodeLocationDataLabelPtr::repatch): + (JSC::AbstractMacroAssembler::ProcessorReturnAddress::ProcessorReturnAddress): + (JSC::AbstractMacroAssembler::ProcessorReturnAddress::relinkCallerToFunction): + (JSC::AbstractMacroAssembler::ProcessorReturnAddress::operator void*): + (JSC::AbstractMacroAssembler::PatchBuffer::entry): + (JSC::AbstractMacroAssembler::PatchBuffer::trampolineAt): + (JSC::AbstractMacroAssembler::PatchBuffer::link): + (JSC::AbstractMacroAssembler::PatchBuffer::linkTailRecursive): + (JSC::AbstractMacroAssembler::PatchBuffer::patch): + (JSC::AbstractMacroAssembler::PatchBuffer::locationOf): + (JSC::AbstractMacroAssembler::PatchBuffer::returnAddressOffset): + (JSC::AbstractMacroAssembler::differenceBetween): + (JSC::::CodeLocationCommon::labelAtOffset): + (JSC::::CodeLocationCommon::jumpAtOffset): + (JSC::::CodeLocationCommon::callAtOffset): + (JSC::::CodeLocationCommon::dataLabelPtrAtOffset): + (JSC::::CodeLocationCommon::dataLabel32AtOffset): + * assembler/MacroAssemblerX86Common.h: + (JSC::MacroAssemblerX86Common::call): + * assembler/X86Assembler.h: + (JSC::X86Assembler::getCallReturnOffset): + * bytecode/CodeBlock.h: + (JSC::CallLinkInfo::CallLinkInfo): + (JSC::getStructureStubInfoReturnLocation): + (JSC::getCallLinkInfoReturnLocation): + * bytecode/Instruction.h: + (JSC::PolymorphicAccessStructureList::PolymorphicStubInfo::set): + (JSC::PolymorphicAccessStructureList::PolymorphicAccessStructureList): + * bytecode/JumpTable.h: + (JSC::StringJumpTable::ctiForValue): + (JSC::SimpleJumpTable::ctiForValue): + * bytecode/StructureStubInfo.h: + (JSC::StructureStubInfo::StructureStubInfo): + * bytecompiler/BytecodeGenerator.cpp: + (JSC::BytecodeGenerator::emitCatch): + (JSC::prepareJumpTableForStringSwitch): + * interpreter/Interpreter.cpp: + (JSC::Interpreter::cti_op_get_by_id_self_fail): + (JSC::getPolymorphicAccessStructureListSlot): + (JSC::Interpreter::cti_op_throw): + (JSC::Interpreter::cti_op_switch_imm): + (JSC::Interpreter::cti_op_switch_char): + (JSC::Interpreter::cti_op_switch_string): + (JSC::Interpreter::cti_vm_throw): + * jit/JIT.cpp: + (JSC::ctiSetReturnAddress): + (JSC::ctiPatchCallByReturnAddress): + (JSC::JIT::privateCompile): + (JSC::JIT::privateCompileCTIMachineTrampolines): + * jit/JIT.h: + (JSC::CallRecord::CallRecord): + (JSC::JIT::compileGetByIdSelf): + (JSC::JIT::compileGetByIdProto): + (JSC::JIT::compileGetByIdChain): + (JSC::JIT::compilePutByIdReplace): + (JSC::JIT::compilePutByIdTransition): + (JSC::JIT::compilePatchGetArrayLength): + (JSC::JIT::emitCTICall): + * jit/JITCall.cpp: + (JSC::JIT::unlinkCall): + (JSC::JIT::linkCall): + * jit/JITInlineMethods.h: + (JSC::JIT::emitNakedCall): + (JSC::JIT::emitCTICall_internal): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::compileGetByIdSlowCase): + (JSC::JIT::compilePutByIdSlowCase): + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::patchGetByIdSelf): + (JSC::JIT::patchPutByIdReplace): + (JSC::JIT::privateCompilePatchGetArrayLength): + (JSC::JIT::privateCompileGetByIdSelf): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdSelfList): + (JSC::JIT::privateCompileGetByIdProtoList): + (JSC::JIT::privateCompileGetByIdChainList): + (JSC::JIT::privateCompileGetByIdChain): + (JSC::JIT::privateCompilePutByIdReplace): + +2009-02-10 Adam Roben <aroben@apple.com> + + Windows build fix after r40813 + + * JavaScriptCore.vcproj/jsc/jsc.vcproj: Added profiler/ to the include + path so that Profiler.h can be found. + +2009-02-09 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Oliver Hunt. + + Provide a class type for a generated block of JIT code. + Also changes the return address -> bytecode index map to + track the return addess as an unsigned offset into the code + instead of a ptrdiff_t in terms of void**s - the latter is + equal to the actual offset / sizeof(void*), making it a + potentially lossy representation. + + * JavaScriptCore.xcodeproj/project.pbxproj: + * assembler/AbstractMacroAssembler.h: + (JSC::AbstractMacroAssembler::PatchBuffer::returnAddressOffset): + * assembler/X86Assembler.h: + (JSC::X86Assembler::getCallReturnOffset): + * bytecode/CodeBlock.h: + (JSC::CallReturnOffsetToBytecodeIndex::CallReturnOffsetToBytecodeIndex): + (JSC::getCallReturnOffset): + (JSC::CodeBlock::getBytecodeIndex): + (JSC::CodeBlock::jitCode): + (JSC::CodeBlock::callReturnIndexVector): + * interpreter/Interpreter.cpp: + (JSC::Interpreter::execute): + (JSC::Interpreter::cti_vm_dontLazyLinkCall): + (JSC::Interpreter::cti_vm_lazyLinkCall): + * jit/JIT.cpp: + (JSC::JIT::privateCompile): + * jit/JIT.h: + (JSC::): + * jit/JITCall.cpp: + (JSC::JIT::linkCall): + * jit/JITCode.h: Added. + (JSC::): + (JSC::JITCode::JITCode): + (JSC::JITCode::operator bool): + (JSC::JITCode::addressForCall): + (JSC::JITCode::offsetOf): + (JSC::JITCode::execute): + +2009-02-09 John Grabowski <jrg@chromium.org> + + Reviewed by Darin Adler. + + https://bugs.webkit.org/show_bug.cgi?id=23856 + Change the definition of "main thread" for Chromium on OSX. + It does not match the DARWIN definition. + + * wtf/ThreadingPthreads.cpp: + (WTF::initializeThreading): + (WTF::isMainThread): + +2009-02-09 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Oliver Hunt. + + Minor bugfix, incorrect check meant that subtraction causing integer overflow + would be missed on x86-64 JIT. + + * jit/JITArithmetic.cpp: + (JSC::JIT::compileBinaryArithOp): + +2009-02-09 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Oliver Hunt. + + A more sensible register allocation for x86-64. + + When WREC was ported to x86-64 it stuck with the same register allocation as x86. + This requires registers to be reordered on entry into WREC generated code, since + argument passing is different on x86-64 and x86 (regparm(3)). This patch switches + x86-64 to use a native register allocation, that does not require argument registers + to be reordered. + + * wrec/WRECGenerator.cpp: + (JSC::WREC::Generator::generateEnter): + (JSC::WREC::Generator::generateReturnSuccess): + (JSC::WREC::Generator::generateReturnFailure): + * wrec/WRECGenerator.h: + +2009-02-05 Adam Roben <aroben@apple.com> + + Build fix + + Rubberstamped by Sam Weinig. + + * wtf/TypeTraits.h: Include Platform.h, since this header uses macros + defined there. + +2009-02-05 Dimitri Glazkov <dglazkov@chromium.org> + + Reviewed by Eric Seidel. + + https://bugs.webkit.org/show_bug.cgi?id=23747 + Add Chromium threading-related files. + + * wtf/MainThread.cpp: Added platform guard to initializeMainThread. + * wtf/chromium/ChromiumThreading.h: Added. + * wtf/chromium/MainThreadChromium.cpp: Added. + (WTF::initializeMainThread): + (WTF::scheduleDispatchFunctionsOnMainThread): + +2009-02-05 David Levin <levin@chromium.org> + + Reviewed by Darin Adler. + + Bug 23713: COMPILE_ASSERTS should be moved out of TypeTraits.h and into .cpp file + <https://bugs.webkit.org/show_bug.cgi?id=23713> + + * GNUmakefile.am: + * JavaScriptCore.pri: + * JavaScriptCore.scons: + * JavaScriptCore.vcproj/WTF/WTF.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + * JavaScriptCoreSources.bkl: + + * wtf/HashTraits.h: + Remove unnecessary header file that I missed when moving out the type traits form this file. + + * wtf/TypeTraits.cpp: Added. + (WTF::): + * wtf/TypeTraits.h: + Moved the compile asserts into TypeTraits.cpp file. + +2009-02-04 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Oliver 'the nun' Hunt. + + Add -e switch to jsc to enable evaluation of scripts passed on the command line. + + * jsc.cpp: + (Script::Script): + (runWithScripts): + (printUsageStatement): + (parseArguments): + (jscmain): + +2009-02-04 Gavin Barraclough <barraclough@apple.com> + + Rubber stamped by Sam 'Big Mac' Weinig. + + * assembler/AbstractMacroAssembler.h: Copied from assembler/MacroAssembler.h. + * assembler/MacroAssemblerX86.h: Copied from assembler/MacroAssembler.h. + * assembler/MacroAssemblerX86Common.h: Copied from assembler/MacroAssembler.h. + * assembler/MacroAssemblerX86_64.h: Copied from assembler/MacroAssembler.h. + +2009-02-04 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Sam Weinig. + + This patch tidies up the MacroAssembler, cleaning up the code and refactoring out the + platform-specific parts. The MacroAssembler gets split up like a beef burger, with the + platform-agnostic data types being the lower bun (in the form of the class AbstractMacroAssembler), + the plaform-specific code generation forming a big meaty patty of methods like 'add32', + 'branch32', etc (MacroAssemblerX86), and finally topped off with the bun-lid of the + MacroAssembler class itself, providing covenience methods such as the stack peek & poke, + and backwards branch methods, all of which can be described in a platform independent + way using methods from the base class. The AbstractMacroAssembler is templated on the + type of the assembler class that will be used for code generation, and the three layers + are held together with the cocktail stick of inheritance. + + The above description is a slight simplification since the MacroAssemblerX86 is actually + formed from two layers (in effect giving us a kind on bacon double cheeseburger) - with the + bulk of methods that are common between x86 & x86-64 implemented in MacroAssemblerX86Common, + which forms a base class for MacroAssemblerX86 and MacroAssemblerX86_64 (which add the methods + specific to the given platform). + + I'm landing these changes first without splitting the classes across multiple files, + I will follow up with a second patch to split up the file MacroAssembler.h. + + * assembler/MacroAssembler.h: + (JSC::AbstractMacroAssembler::): + (JSC::AbstractMacroAssembler::DataLabelPtr::DataLabelPtr): + (JSC::AbstractMacroAssembler::DataLabelPtr::patch): + (JSC::AbstractMacroAssembler::DataLabel32::DataLabel32): + (JSC::AbstractMacroAssembler::DataLabel32::patch): + (JSC::AbstractMacroAssembler::Label::Label): + (JSC::AbstractMacroAssembler::Jump::Jump): + (JSC::AbstractMacroAssembler::Jump::link): + (JSC::AbstractMacroAssembler::Jump::linkTo): + (JSC::AbstractMacroAssembler::Jump::patch): + (JSC::AbstractMacroAssembler::JumpList::link): + (JSC::AbstractMacroAssembler::JumpList::linkTo): + (JSC::AbstractMacroAssembler::PatchBuffer::link): + (JSC::AbstractMacroAssembler::PatchBuffer::addressOf): + (JSC::AbstractMacroAssembler::PatchBuffer::setPtr): + (JSC::AbstractMacroAssembler::size): + (JSC::AbstractMacroAssembler::copyCode): + (JSC::AbstractMacroAssembler::label): + (JSC::AbstractMacroAssembler::align): + (JSC::AbstractMacroAssembler::differenceBetween): + (JSC::MacroAssemblerX86Common::xor32): + (JSC::MacroAssemblerX86Common::load32WithAddressOffsetPatch): + (JSC::MacroAssemblerX86Common::store32WithAddressOffsetPatch): + (JSC::MacroAssemblerX86Common::move): + (JSC::MacroAssemblerX86Common::swap): + (JSC::MacroAssemblerX86Common::signExtend32ToPtr): + (JSC::MacroAssemblerX86Common::zeroExtend32ToPtr): + (JSC::MacroAssemblerX86Common::branch32): + (JSC::MacroAssemblerX86Common::jump): + (JSC::MacroAssemblerX86_64::add32): + (JSC::MacroAssemblerX86_64::sub32): + (JSC::MacroAssemblerX86_64::load32): + (JSC::MacroAssemblerX86_64::store32): + (JSC::MacroAssemblerX86_64::addPtr): + (JSC::MacroAssemblerX86_64::andPtr): + (JSC::MacroAssemblerX86_64::orPtr): + (JSC::MacroAssemblerX86_64::rshiftPtr): + (JSC::MacroAssemblerX86_64::subPtr): + (JSC::MacroAssemblerX86_64::xorPtr): + (JSC::MacroAssemblerX86_64::loadPtr): + (JSC::MacroAssemblerX86_64::loadPtrWithAddressOffsetPatch): + (JSC::MacroAssemblerX86_64::storePtr): + (JSC::MacroAssemblerX86_64::storePtrWithAddressOffsetPatch): + (JSC::MacroAssemblerX86_64::branchPtr): + (JSC::MacroAssemblerX86_64::branchTestPtr): + (JSC::MacroAssemblerX86_64::branchAddPtr): + (JSC::MacroAssemblerX86_64::branchSubPtr): + (JSC::MacroAssemblerX86_64::branchPtrWithPatch): + (JSC::MacroAssemblerX86_64::storePtrWithPatch): + (JSC::MacroAssemblerX86::add32): + (JSC::MacroAssemblerX86::sub32): + (JSC::MacroAssemblerX86::load32): + (JSC::MacroAssemblerX86::store32): + (JSC::MacroAssemblerX86::branch32): + (JSC::MacroAssemblerX86::branchPtrWithPatch): + (JSC::MacroAssemblerX86::storePtrWithPatch): + (JSC::MacroAssembler::pop): + (JSC::MacroAssembler::peek): + (JSC::MacroAssembler::poke): + (JSC::MacroAssembler::branchPtr): + (JSC::MacroAssembler::branch32): + (JSC::MacroAssembler::branch16): + (JSC::MacroAssembler::branchTestPtr): + (JSC::MacroAssembler::addPtr): + (JSC::MacroAssembler::andPtr): + (JSC::MacroAssembler::orPtr): + (JSC::MacroAssembler::rshiftPtr): + (JSC::MacroAssembler::subPtr): + (JSC::MacroAssembler::xorPtr): + (JSC::MacroAssembler::loadPtr): + (JSC::MacroAssembler::loadPtrWithAddressOffsetPatch): + (JSC::MacroAssembler::storePtr): + (JSC::MacroAssembler::storePtrWithAddressOffsetPatch): + (JSC::MacroAssembler::branchAddPtr): + (JSC::MacroAssembler::branchSubPtr): + * jit/JITArithmetic.cpp: + (JSC::JIT::compileBinaryArithOp): + +2009-02-04 Alexey Proskuryakov <ap@webkit.org> + + Reviewed by Sam Weinig. + + https://bugs.webkit.org/show_bug.cgi?id=23681 + Worker tests crash in debug builds if run --singly + + The crash happened because worker threads continued running while debug-only static objects + were already being destroyed on main thread. + + * runtime/Structure.cpp: Create static debug-only sets in heap, so that they don't get + destroyed. + + * wtf/ThreadingPthreads.cpp: Changed assertions to conventional form. + +2009-02-03 Gavin Barraclough <barraclough@apple.com> + + Reviewed by Geoff Garen. + + https://bugs.webkit.org/show_bug.cgi?id=23715 + + Simplify MacroAssembler interface, by combining comparison methods. + Seprate operations are combined as follows: + jz32/jnz32/jzPtr/jnzPtr -> branchTest32/branchTestPtr, + j*(Add|Mul|Sub)32/j*(Add|Mul|Sub)Ptr -> branch(Add|Mul|Sub)32/branch(Add|Mul|Sub)Ptr + j*32/j*Ptr (all other two op combparisons) -> branch32/brnachPtr + set*32 -> set32 + + Also, represent the Scale of BaseIndex addresses as a plain enum (0,1,2,3), + instead of as multiplicands (1,2,4,8). + + This patch singificantly reduces replication of code, and increases functionality supported + by the MacroAssembler. No performance impact. + + * assembler/MacroAssembler.h: + (JSC::MacroAssembler::): + (JSC::MacroAssembler::branchPtr): + (JSC::MacroAssembler::branchPtrWithPatch): + (JSC::MacroAssembler::branch32): + (JSC::MacroAssembler::branch16): + (JSC::MacroAssembler::branchTestPtr): + (JSC::MacroAssembler::branchTest32): + (JSC::MacroAssembler::branchAddPtr): + (JSC::MacroAssembler::branchAdd32): + (JSC::MacroAssembler::branchMul32): + (JSC::MacroAssembler::branchSubPtr): + (JSC::MacroAssembler::branchSub32): + (JSC::MacroAssembler::set32): + (JSC::MacroAssembler::setTest32): + * assembler/X86Assembler.h: + (JSC::X86Assembler::): + (JSC::X86Assembler::jccRel32): + (JSC::X86Assembler::setccOpcode): + (JSC::X86Assembler::cmpq_mr): + (JSC::X86Assembler::setcc_r): + (JSC::X86Assembler::sete_r): + (JSC::X86Assembler::setne_r): + (JSC::X86Assembler::jne): + (JSC::X86Assembler::je): + (JSC::X86Assembler::jl): + (JSC::X86Assembler::jb): + (JSC::X86Assembler::jle): + (JSC::X86Assembler::jbe): + (JSC::X86Assembler::jge): + (JSC::X86Assembler::jg): + (JSC::X86Assembler::ja): + (JSC::X86Assembler::jae): + (JSC::X86Assembler::jo): + (JSC::X86Assembler::jp): + (JSC::X86Assembler::js): + (JSC::X86Assembler::jcc): + (JSC::X86Assembler::X86InstructionFormatter::putModRmSib): + * jit/JIT.cpp: + (JSC::JIT::compileOpStrictEq): + (JSC::JIT::emitSlowScriptCheck): + (JSC::JIT::privateCompileMainPass): + (JSC::JIT::privateCompileSlowCases): + (JSC::JIT::privateCompile): + (JSC::JIT::privateCompileCTIMachineTrampolines): + * jit/JITArithmetic.cpp: + (JSC::JIT::compileFastArith_op_lshift): + (JSC::JIT::compileFastArith_op_mod): + (JSC::JIT::compileFastArith_op_post_inc): + (JSC::JIT::compileFastArith_op_post_dec): + (JSC::JIT::compileFastArith_op_pre_inc): + (JSC::JIT::compileFastArith_op_pre_dec): + (JSC::JIT::compileBinaryArithOp): + (JSC::JIT::compileFastArith_op_add): + (JSC::JIT::compileFastArith_op_mul): + * jit/JITCall.cpp: + (JSC::JIT::compileOpCall): + (JSC::JIT::compileOpCallSlowCase): + * jit/JITInlineMethods.h: + (JSC::JIT::checkStructure): + (JSC::JIT::emitJumpIfJSCell): + (JSC::JIT::emitJumpIfNotJSCell): + (JSC::JIT::emitJumpIfImmediateNumber): + (JSC::JIT::emitJumpIfNotImmediateNumber): + (JSC::JIT::emitJumpIfImmediateInteger): + (JSC::JIT::emitJumpIfNotImmediateInteger): + (JSC::JIT::emitFastArithDeTagImmediateJumpIfZero): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::compileGetByIdHotPath): + (JSC::JIT::compilePutByIdHotPath): + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::privateCompilePatchGetArrayLength): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdProtoList): + (JSC::JIT::privateCompileGetByIdChainList): + (JSC::JIT::privateCompileGetByIdChain): + * runtime/RegExp.cpp: + (JSC::RegExp::match): + * wrec/WRECGenerator.cpp: + (JSC::WREC::Generator::generateEnter): + (JSC::WREC::Generator::generateIncrementIndex): + (JSC::WREC::Generator::generateLoadCharacter): + (JSC::WREC::Generator::generateJumpIfNotEndOfInput): + (JSC::WREC::Generator::generateBackreferenceQuantifier): + (JSC::WREC::Generator::generateNonGreedyQuantifier): + (JSC::WREC::Generator::generateGreedyQuantifier): + (JSC::WREC::Generator::generatePatternCharacterPair): + (JSC::WREC::Generator::generatePatternCharacter): + (JSC::WREC::Generator::generateCharacterClassInvertedRange): + (JSC::WREC::Generator::generateCharacterClassInverted): + (JSC::WREC::Generator::generateAssertionBOL): + (JSC::WREC::Generator::generateAssertionEOL): + (JSC::WREC::Generator::generateAssertionWordBoundary): + (JSC::WREC::Generator::generateBackreference): + +2009-02-03 David Hyatt <hyatt@apple.com> + + Fix a bug in Vector's shrinkCapacity method. It did not properly copy elements into the inline buffer + when shrinking down from a size that was greater than the inline capacity. + + Reviewed by Maciej + + * wtf/Vector.h: + (WTF::VectorBuffer::VectorBuffer): + (WTF::VectorBuffer::allocateBuffer): + +2009-02-03 Simon Hausmann <simon.hausmann@nokia.com> + + Reviewed by Tor Arne Vestbø. + + Added accessor for JSByteArray storage. + + * runtime/JSByteArray.h: + (JSC::JSByteArray::storage): + +2009-02-03 Dmitry Titov <dimich@chromium.org> + + Reviewed by Alexey Proskuryakov. + + https://bugs.webkit.org/show_bug.cgi?id=23560 + Implement SharedTimer on WorkerRunLoop + + * JavaScriptCore.exp: + Forgot to expose ThreadCondition::timedWait() in one of previous patches. + +2009-02-02 Oliver Hunt <oliver@apple.com> Reviewed by Gavin Barraclough. @@ -229,11 +2597,103 @@ * wrec/WRECGenerator.cpp: (JSC::WREC::Generator::generateCharacterClassInvertedRange): -2009-02-03 Mark Rowe <mrowe@apple.com> +2009-02-02 Darin Adler <darin@apple.com> + + Reviewed by Dave Hyatt. + + Bug 23676: Speed up uses of reserveCapacity on new vectors by adding a new reserveInitialCapacity + https://bugs.webkit.org/show_bug.cgi?id=23676 + + * API/JSObjectRef.cpp: + (JSObjectCopyPropertyNames): Use reserveInitialCapacity. + * parser/Lexer.cpp: + (JSC::Lexer::Lexer): Ditto. + (JSC::Lexer::clear): Ditto. + + * wtf/Vector.h: Added reserveInitialCapacity, a more efficient version of + reserveCapacity for use when the vector is brand new (still size 0 with no + capacity other than the inline capacity). + +2009-01-30 Mark Rowe <mrowe@apple.com> + + Rubber-stamped by Oliver Hunt. + + <rdar://problem/6391501> Enable the JIT on Mac OS X x86_64 as it passes all tests. + + * wtf/Platform.h: + +2009-01-30 Oliver Hunt <oliver@apple.com> + + Reviewed by Mark Rowe and Sam Weinig. + + Finally fix load() to propagate exceptions correctly. + + * jsc.cpp: + (functionLoad): + +2009-01-30 David Levin <levin@chromium.org> + + Reviewed by Darin Adler. + + https://bugs.webkit.org/show_bug.cgi?id=23618 + Templated worker tasks should be more error proof to use. + Fix Chromium build. - Merge r40397. + * wtf/TypeTraits.h: + (WTF::IsConvertibleToInteger::IsConvertibleToDouble): + Avoid "possible loss of data" warning when using Microsoft's C++ compiler + by avoiding an implicit conversion of int types to doubles. - 2009-01-29 Stephanie Lewis <slewis@apple.com> +2009-01-30 Laszlo Gombos <laszlo.1.gombos@nokia.com> + + Reviewed by Simon Hausmann. + + Bug 23580: GNU mode RVCT compilation support + <https://bugs.webkit.org/show_bug.cgi?id=23580> + + * pcre/pcre_exec.cpp: Use COMPILER(GCC) instead of __GNUC__. + * wtf/FastMalloc.cpp: Ditto. + (WTF::TCMallocStats::): + * wtf/Platform.h: Don't define COMPILER(GCC) with RVCT --gnu. + +2009-01-30 David Levin <levin@chromium.org> + + Reviewed by Alexey Proskuryakov. + + Bug 23618: Templated worker tasks should be more error proof to use + <https://bugs.webkit.org/show_bug.cgi?id=23618> + + Add the type traits needed for the generic worker tasks + and compile asserts for them. + + Add a summary header to the TypeTraits.h file to explain what is in there. + + Add a note to explain IsPod's deficiencies. + + * wtf/TypeTraits.h: + +2009-01-30 David Levin <levin@chromium.org> + + Reviewed by Alexey Proskuryakov. + + Bug 23616: Various "template helpers" should be consolidated from isolated files in JavaScriptCore. + <https://bugs.webkit.org/show_bug.cgi?id=23616> + + * wtf/TypeTraits.h: Moved RemovePointer, IsPod, IsInteger to this file. + + * wtf/OwnPtr.h: Use RemovePointer from TypeTraits.h. + * wtf/RetainPtr.h: Ditto. + + * wtf/HashTraits.h: Use IsInteger from TypeTraits.h. + + * wtf/VectorTraits.h: Use IsPod from TypeTraits.h. + + * GNUmakefile.am: + * JavaScriptCore.vcproj/WTF/WTF.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + Added TypeTraits.h. + +2009-01-29 Stephanie Lewis <slewis@apple.com> RS by Oliver Hunt. @@ -241,11 +2701,7 @@ * JavaScriptCore.order: -2009-02-03 Mark Rowe <mrowe@apple.com> - - Merge r40396. - - 2009-01-29 Cameron Zwarich <cwzwarich@uwaterloo.ca> +2009-01-29 Cameron Zwarich <cwzwarich@uwaterloo.ca> Reviewed by Oliver Hunt. @@ -266,11 +2722,7 @@ * interpreter/Interpreter.cpp: (JSC::Interpreter::execute): -2009-02-03 Mark Rowe <mrowe@apple.com> - - Merge r40345. - - 2009-01-28 Sam Weinig <sam@webkit.org> +2009-01-28 Sam Weinig <sam@webkit.org> Reviewed by Gavin Barraclough. @@ -279,11 +2731,7 @@ * JavaScriptCore.exp: Export JSGlobalData::sharedInstance. -2009-02-03 Mark Rowe <mrowe@apple.com> - - Merge r40339. - - 2009-01-28 Sam Weinig <sam@webkit.org> +2009-01-28 Sam Weinig <sam@webkit.org> Reviewed by Geoff Garen. @@ -302,11 +2750,7 @@ the array. (JSC::JSArray::increaseVectorLength): Ditto. -2009-02-03 Mark Rowe <mrowe@apple.com> - - Merge r40332. - - 2009-01-28 Sam Weinig <sam@webkit.org> +2009-01-28 Sam Weinig <sam@webkit.org> Reviewed by Geoff Garen. @@ -339,6 +2783,25 @@ * runtime/JSObject.h: (JSC::JSObject::isActivationObject): Added. +2009-01-28 David Kilzer <ddkilzer@apple.com> + + Bug 23490: Remove initialRefCount argument from RefCounted class + + <https://bugs.webkit.org/show_bug.cgi?id=23490> + + Reviewed by Darin Adler. + + RefCountedBase now always starts with a ref count of 1, so there + is no need to pass the initialRefCount into the class anymore. + + * wtf/ByteArray.h: + (WTF::ByteArray::ByteArray): Removed call to RefCounted(1). + * wtf/RefCounted.h: + (WTF::RefCountedBase::RefCountedBase): Changed to start with a + ref count of 1. + (WTF::RefCounted::RefCounted): Removed initialRefCount argument + and removed call to RefCounted(1). + 2009-01-26 Adele Peterson <adele@apple.com> Build fix. diff --git a/JavaScriptCore/Configurations/Base.xcconfig b/JavaScriptCore/Configurations/Base.xcconfig index 4154cb8..b639dad 100644 --- a/JavaScriptCore/Configurations/Base.xcconfig +++ b/JavaScriptCore/Configurations/Base.xcconfig @@ -12,7 +12,7 @@ GCC_INLINES_ARE_PRIVATE_EXTERN = YES; GCC_MODEL_TUNING = G5; GCC_OBJC_CALL_CXX_CDTORS = YES; GCC_PRECOMPILE_PREFIX_HEADER = YES; -GCC_PREPROCESSOR_DEFINITIONS = $(DEBUG_DEFINES) WEBKIT_VERSION_MIN_REQUIRED=WEBKIT_VERSION_LATEST $(GCC_PREPROCESSOR_DEFINITIONS); +GCC_PREPROCESSOR_DEFINITIONS = $(DEBUG_DEFINES) HAVE_DTRACE=$(HAVE_DTRACE) WEBKIT_VERSION_MIN_REQUIRED=WEBKIT_VERSION_LATEST $(GCC_PREPROCESSOR_DEFINITIONS); GCC_STRICT_ALIASING = YES; GCC_THREADSAFE_STATICS = NO; GCC_TREAT_WARNINGS_AS_ERRORS = YES; @@ -34,6 +34,11 @@ WARNING_CFLAGS_x86_64 = $(WARNING_CFLAGS_BASE); HEADER_SEARCH_PATHS = . icu $(HEADER_SEARCH_PATHS); +REAL_PLATFORM_NAME = $(REAL_PLATFORM_NAME_$(PLATFORM_NAME)); +REAL_PLATFORM_NAME_ = $(REAL_PLATFORM_NAME_macosx); +REAL_PLATFORM_NAME_macosx = macosx; + + // DEBUG_DEFINES, GCC_OPTIMIZATION_LEVEL, STRIP_INSTALLED_PRODUCT and DEAD_CODE_STRIPPING vary between the debug and normal variants. // We set up the values for each variant here, and have the Debug configuration in the Xcode project use the _debug variant. DEBUG_DEFINES_debug = ; @@ -57,6 +62,15 @@ GCC_VERSION = $(GCC_VERSION_$(XCODE_VERSION_ACTUAL)); GCC_VERSION_0310 = 4.2; +// HAVE_DTRACE is disabled on Leopard due to <rdar://problem/5628149> +HAVE_DTRACE = $(HAVE_DTRACE_$(REAL_PLATFORM_NAME)); +HAVE_DTRACE_macosx = $(HAVE_DTRACE_macosx_$(MAC_OS_X_VERSION_MAJOR)); +HAVE_DTRACE_macosx_ = $(HAVE_DTRACE_macosx_1040); +HAVE_DTRACE_macosx_1040 = 0; +HAVE_DTRACE_macosx_1050 = 0; +HAVE_DTRACE_macosx_1060 = 1; + + // <rdar://problem/5488678>: Production builds on 10.4 PowerPC need to have debugging symbols disabled to prevent a huge STABS section being generated. // Xcode on 10.4 does not define MAC_OS_X_VERSION_MAJOR, so the default Mac OS X version is treated as 10.4. GCC_GENERATE_DEBUGGING_SYMBOLS = $(GCC_GENERATE_DEBUGGING_SYMBOLS_$(CURRENT_ARCH)); diff --git a/JavaScriptCore/Configurations/DebugRelease.xcconfig b/JavaScriptCore/Configurations/DebugRelease.xcconfig index cbb2933..a9c39aa 100644 --- a/JavaScriptCore/Configurations/DebugRelease.xcconfig +++ b/JavaScriptCore/Configurations/DebugRelease.xcconfig @@ -1,5 +1,12 @@ #include "Base.xcconfig" -ARCHS = $(NATIVE_ARCH); + +ARCHS = $(ARCHS_$(MAC_OS_X_VERSION_MAJOR)); +ARCHS_ = $(ARCHS_1040); +ARCHS_1040 = $(NATIVE_ARCH); +ARCHS_1050 = $(NATIVE_ARCH); +ARCHS_1060 = $(ARCHS_STANDARD_32_64_BIT); + +ONLY_ACTIVE_ARCH = YES; MACOSX_DEPLOYMENT_TARGET = $(MACOSX_DEPLOYMENT_TARGET_$(MAC_OS_X_VERSION_MAJOR)); MACOSX_DEPLOYMENT_TARGET_ = 10.4; diff --git a/JavaScriptCore/Configurations/JavaScriptCore.xcconfig b/JavaScriptCore/Configurations/JavaScriptCore.xcconfig index ef199d2..0976c16 100644 --- a/JavaScriptCore/Configurations/JavaScriptCore.xcconfig +++ b/JavaScriptCore/Configurations/JavaScriptCore.xcconfig @@ -12,8 +12,14 @@ INFOPLIST_FILE = Info.plist; INSTALL_PATH = $(SYSTEM_LIBRARY_DIR)/Frameworks; PRODUCT_NAME = JavaScriptCore; -// This needs to be kept sorted, and in sync with FEATURE_DEFINES in WebCore.xcconfig, WebKit.xcconfig and the default settings of build-webkit. -FEATURE_DEFINES = ENABLE_DATABASE ENABLE_DOM_STORAGE ENABLE_ICONDATABASE ENABLE_OFFLINE_WEB_APPLICATIONS ENABLE_SVG ENABLE_SVG_ANIMATION ENABLE_SVG_AS_IMAGE ENABLE_SVG_FONTS ENABLE_SVG_FOREIGN_OBJECT ENABLE_SVG_USE ENABLE_VIDEO ENABLE_WORKERS ENABLE_XPATH ENABLE_XSLT; +// This needs to be kept sorted, and in sync with FEATURE_DEFINES in WebCore.xcconfig, WebKit.xcconfig and +// the default settings of build-webkit to prevent needless rebuilding when using both Xcode and build-webkit. +FEATURE_DEFINES = $(FEATURE_DEFINES_$(MAC_OS_X_VERSION_MAJOR)); +FEATURE_DEFINES_BASE = ENABLE_DATABASE ENABLE_DOM_STORAGE ENABLE_ICONDATABASE ENABLE_OFFLINE_WEB_APPLICATIONS ENABLE_SVG ENABLE_SVG_ANIMATION ENABLE_SVG_AS_IMAGE ENABLE_SVG_FONTS ENABLE_SVG_FOREIGN_OBJECT ENABLE_SVG_USE ENABLE_VIDEO ENABLE_WORKERS ENABLE_XPATH ENABLE_XSLT; +FEATURE_DEFINES_ = $(FEATURE_DEFINES_1040); +FEATURE_DEFINES_1040 = $(FEATURE_DEFINES_BASE); +FEATURE_DEFINES_1050 = $(FEATURE_DEFINES_BASE); +FEATURE_DEFINES_1060 = $(FEATURE_DEFINES_BASE) ENABLE_GEOLOCATION; OTHER_CFLAGS = $(OTHER_CFLAGS_$(CONFIGURATION)_$(CURRENT_VARIANT)); OTHER_CFLAGS_Release_normal = $(OTHER_CFLAGS_normal_$(XCODE_VERSION_ACTUAL)); diff --git a/JavaScriptCore/Configurations/Version.xcconfig b/JavaScriptCore/Configurations/Version.xcconfig index 16a349c..ab0aa9b 100644 --- a/JavaScriptCore/Configurations/Version.xcconfig +++ b/JavaScriptCore/Configurations/Version.xcconfig @@ -1,5 +1,5 @@ -MAJOR_VERSION = 528; -MINOR_VERSION = 15; +MAJOR_VERSION = 530; +MINOR_VERSION = 5; TINY_VERSION = 0; FULL_VERSION = $(MAJOR_VERSION).$(MINOR_VERSION); diff --git a/JavaScriptCore/GNUmakefile.am b/JavaScriptCore/GNUmakefile.am index 3d90470..b9feada 100644 --- a/JavaScriptCore/GNUmakefile.am +++ b/JavaScriptCore/GNUmakefile.am @@ -76,12 +76,15 @@ javascriptcore_sources += \ JavaScriptCore/jit/ExecutableAllocator.h \ JavaScriptCore/jit/JIT.cpp \ JavaScriptCore/jit/JITCall.cpp \ + JavaScriptCore/jit/JITCode.h \ JavaScriptCore/jit/JITPropertyAccess.cpp \ JavaScriptCore/jit/JITArithmetic.cpp \ JavaScriptCore/jit/ExecutableAllocator.cpp \ JavaScriptCore/jit/ExecutableAllocatorPosix.cpp \ JavaScriptCore/jit/JIT.h \ JavaScriptCore/jit/JITInlineMethods.h \ + JavaScriptCore/jit/JITStubs.cpp \ + JavaScriptCore/jit/JITStubs.h \ JavaScriptCore/bytecode/StructureStubInfo.cpp \ JavaScriptCore/bytecode/StructureStubInfo.h \ JavaScriptCore/bytecode/CodeBlock.cpp \ @@ -130,8 +133,11 @@ javascriptcore_sources += \ JavaScriptCore/icu/unicode/utypes.h \ JavaScriptCore/icu/unicode/uversion.h \ JavaScriptCore/assembler/X86Assembler.h \ + JavaScriptCore/assembler/AbstractMacroAssembler.h \ JavaScriptCore/assembler/AssemblerBuffer.h \ JavaScriptCore/assembler/MacroAssembler.h \ + JavaScriptCore/assembler/MacroAssemblerX86.h \ + JavaScriptCore/assembler/MacroAssemblerX86Common.h \ JavaScriptCore/os-win32/stdbool.h \ JavaScriptCore/os-win32/stdint.h \ JavaScriptCore/pcre/pcre.h \ @@ -157,6 +163,8 @@ javascriptcore_sources += \ JavaScriptCore/profiler/TreeProfile.h \ JavaScriptCore/interpreter/CallFrame.cpp \ JavaScriptCore/interpreter/CallFrame.h \ + JavaScriptCore/runtime/TimeoutChecker.cpp \ + JavaScriptCore/runtime/TimeoutChecker.h \ JavaScriptCore/runtime/InitializeThreading.cpp \ JavaScriptCore/runtime/InitializeThreading.h \ JavaScriptCore/runtime/JSActivation.cpp \ @@ -247,6 +255,8 @@ javascriptcore_sources += \ JavaScriptCore/wtf/Threading.cpp \ JavaScriptCore/wtf/ThreadingGtk.cpp \ JavaScriptCore/wtf/ThreadingPthreads.cpp \ + JavaScriptCore/wtf/TypeTraits.cpp \ + JavaScriptCore/wtf/TypeTraits.h \ JavaScriptCore/wtf/UnusedParam.h \ JavaScriptCore/wtf/Vector.h \ JavaScriptCore/wtf/VectorTraits.h \ diff --git a/JavaScriptCore/JavaScriptCore.exp b/JavaScriptCore/JavaScriptCore.exp index 5e1bb78..993ebe7 100644 --- a/JavaScriptCore/JavaScriptCore.exp +++ b/JavaScriptCore/JavaScriptCore.exp @@ -111,7 +111,7 @@ __ZN3JSC12DateInstance4infoE __ZN3JSC12JSGlobalData10ClientDataD2Ev __ZN3JSC12JSGlobalData12createLeakedEv __ZN3JSC12JSGlobalData14sharedInstanceEv -__ZN3JSC12JSGlobalData6createEv +__ZN3JSC12JSGlobalData6createEb __ZN3JSC12JSGlobalDataD1Ev __ZN3JSC12SamplingTool13notifyOfScopeEPNS_9ScopeNodeE __ZN3JSC12SamplingTool4dumpEPNS_9ExecStateE @@ -132,15 +132,13 @@ __ZN3JSC13jsOwnedStringEPNS_12JSGlobalDataERKNS_7UStringE __ZN3JSC14JSGlobalObject10globalExecEv __ZN3JSC14JSGlobalObject12defineGetterEPNS_9ExecStateERKNS_10IdentifierEPNS_8JSObjectE __ZN3JSC14JSGlobalObject12defineSetterEPNS_9ExecStateERKNS_10IdentifierEPNS_8JSObjectE -__ZN3JSC14JSGlobalObject14setTimeoutTimeEj -__ZN3JSC14JSGlobalObject16stopTimeoutCheckEv __ZN3JSC14JSGlobalObject17putWithAttributesEPNS_9ExecStateERKNS_10IdentifierENS_10JSValuePtrEj -__ZN3JSC14JSGlobalObject17startTimeoutCheckEv __ZN3JSC14JSGlobalObject3putEPNS_9ExecStateERKNS_10IdentifierENS_10JSValuePtrERNS_15PutPropertySlotE __ZN3JSC14JSGlobalObject4initEPNS_8JSObjectE __ZN3JSC14JSGlobalObject4markEv __ZN3JSC14JSGlobalObjectD2Ev __ZN3JSC14JSGlobalObjectnwEmPNS_12JSGlobalDataE +__ZN3JSC14TimeoutChecker5resetEv __ZN3JSC14constructArrayEPNS_9ExecStateERKNS_7ArgListE __ZN3JSC15JSWrapperObject4markEv __ZN3JSC15toInt32SlowCaseEdRb @@ -284,6 +282,7 @@ __ZN3WTF13tryFastCallocEmm __ZN3WTF15ThreadCondition4waitERNS_5MutexE __ZN3WTF15ThreadCondition6signalEv __ZN3WTF15ThreadCondition9broadcastEv +__ZN3WTF15ThreadCondition9timedWaitERNS_5MutexEd __ZN3WTF15ThreadConditionC1Ev __ZN3WTF15ThreadConditionD1Ev __ZN3WTF16callOnMainThreadEPFvPvES0_ diff --git a/JavaScriptCore/JavaScriptCore.pri b/JavaScriptCore/JavaScriptCore.pri index 6aee0aa..eb4bab3 100644 --- a/JavaScriptCore/JavaScriptCore.pri +++ b/JavaScriptCore/JavaScriptCore.pri @@ -14,18 +14,20 @@ win32-* { } # Disable the JIT due to numerous observed miscompilations :( -#CONFIG(release):isEqual(QT_ARCH,i386) { -# JIT_DEFINES = ENABLE_JIT ENABLE_WREC ENABLE_JIT_OPTIMIZE_CALL ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS ENABLE_JIT_OPTIMIZE_ARITHMETIC -# # gcc <= 4.1 is known to miscompile, so require >= 4.2, written as major > 3 and minor > 1 -# linux-g++*:greaterThan(QT_GCC_MAJOR_VERSION,3):greaterThan(QT_GCC_MINOR_VERSION,1) { -# DEFINES += $$JIT_DEFINES -# SOURCES += wtf/TCSystemAlloc.cpp -# DEFINES -= USE_SYSTEM_MALLOC -# } -# win32-msvc* { -# DEFINES += $$JIT_DEFINES -# } -#} +!contains(DEFINES, ENABLE_JIT=.) { + CONFIG(release):isEqual(QT_ARCH,i386) { + JIT_DEFINES = ENABLE_JIT ENABLE_WREC ENABLE_JIT_OPTIMIZE_CALL ENABLE_JIT_OPTIMIZE_PROPERTY_ACCESS ENABLE_JIT_OPTIMIZE_ARITHMETIC + # Require gcc >= 4.1 + linux-g++*:greaterThan(QT_GCC_MAJOR_VERSION,3):greaterThan(QT_GCC_MINOR_VERSION,0) { + DEFINES += $$JIT_DEFINES WTF_USE_JIT_STUB_ARGUMENT_VA_LIST + QMAKE_CXXFLAGS += -fno-stack-protector + QMAKE_CFLAGS += -fno-stack-protector + } + win32-msvc* { + DEFINES += $$JIT_DEFINES WTF_USE_JIT_STUB_ARGUMENT_REGISTER + } + } +} include(pcre/pcre.pri) @@ -51,6 +53,7 @@ SOURCES += \ wtf/MainThread.cpp \ wtf/RandomNumber.cpp \ wtf/RefCountedLeakCounter.cpp \ + wtf/TypeTraits.cpp \ wtf/unicode/CollatorDefault.cpp \ wtf/unicode/icu/CollatorICU.cpp \ wtf/unicode/UTF8.cpp \ @@ -71,6 +74,7 @@ SOURCES += \ runtime/JSVariableObject.cpp \ runtime/JSActivation.cpp \ runtime/JSNotAnObject.cpp \ + runtime/TimeoutChecker.cpp \ bytecode/CodeBlock.cpp \ bytecode/StructureStubInfo.cpp \ bytecode/JumpTable.cpp \ @@ -79,6 +83,7 @@ SOURCES += \ jit/JITArithmetic.cpp \ jit/JITPropertyAccess.cpp \ jit/ExecutableAllocator.cpp \ + jit/JITStubs.cpp \ bytecompiler/BytecodeGenerator.cpp \ runtime/ExceptionHelpers.cpp \ runtime/JSPropertyNameIterator.cpp \ diff --git a/JavaScriptCore/JavaScriptCore.scons b/JavaScriptCore/JavaScriptCore.scons index 24e5003..30665cf 100644 --- a/JavaScriptCore/JavaScriptCore.scons +++ b/JavaScriptCore/JavaScriptCore.scons @@ -114,6 +114,7 @@ sources['runtime'] = [ 'runtime/StringPrototype.cpp', 'runtime/Structure.cpp', 'runtime/StructureChain.cpp', + 'runtime/TimeoutChecker.cpp', 'runtime/UString.cpp', ] sources['bytecode'] = [ @@ -149,6 +150,7 @@ sources['wtf'] = [ 'wtf/RandomNumber.cpp', 'wtf/RefCountedLeakCounter.cpp', 'wtf/Threading.cpp', + 'wtf/TypeTraits.cpp', 'wtf/dtoa.cpp', ] sources['wtf/unicode'] = [ diff --git a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj index e28adab..a19b310 100644 --- a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj +++ b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj @@ -505,10 +505,6 @@ >
</File>
<File
- RelativePath="..\..\runtime\Interpreter.h"
- >
- </File>
- <File
RelativePath="..\..\runtime\JSActivation.cpp"
>
</File>
@@ -1290,6 +1286,22 @@ >
</File>
<File
+ RelativePath="..\..\runtime\TimeoutChecker.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\runtime\TimeoutChecker.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\jit\JITStubs.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\jit\JITStubs.cpp"
+ >
+ </File>
+ <File
RelativePath="..\..\runtime\JSPropertyNameIterator.cpp"
>
</File>
diff --git a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/build-generated-files.sh b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/build-generated-files.sh index 4f44ddc..6d6b588 100755 --- a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/build-generated-files.sh +++ b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/build-generated-files.sh @@ -29,7 +29,7 @@ export SDKROOT export BUILT_PRODUCTS_DIR="$XDSTROOT/obj/JavaScriptCore" -mkdir -p "${BUILT_PRODUCTS_DIR}/DerivedSources" +mkdir -p "${BUILT_PRODUCTS_DIR}/DerivedSources/docs" cd "${BUILT_PRODUCTS_DIR}/DerivedSources" export JavaScriptCore="${XSRCROOT}" diff --git a/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj b/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj index 3934b15..76c76ff 100644 --- a/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj +++ b/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj @@ -459,6 +459,14 @@ >
</File>
<File
+ RelativePath="..\..\wtf\TypeTraits.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\wtf\TypeTraits.h"
+ >
+ </File>
+ <File
RelativePath="..\..\wtf\unicode\Unicode.h"
>
</File>
diff --git a/JavaScriptCore/JavaScriptCore.vcproj/jsc/jsc.vcproj b/JavaScriptCore/JavaScriptCore.vcproj/jsc/jsc.vcproj index 0b3a006..5ca2306 100644 --- a/JavaScriptCore/JavaScriptCore.vcproj/jsc/jsc.vcproj +++ b/JavaScriptCore/JavaScriptCore.vcproj/jsc/jsc.vcproj @@ -39,7 +39,7 @@ />
<Tool
Name="VCCLCompilerTool"
- AdditionalIncludeDirectories=""$(WebKitOutputDir)\include";"$(WebKitOutputDir)\obj\JavaScriptCore\$(ConfigurationName)\DerivedSources\";../../;"../../os-win32/";../../pcre/;../../assembler/;../../wrec/;../../parser/;../../runtime/;../../VM/;../../bytecode/;../../interpreter/;../../wtf/;../../debugger/;../../bytecompiler/;"$(WebKitLibrariesDir)\include\icu";"$(WebKitLibrariesDir)\include\pthreads";../../../icu/include;"$(WebKitLibrariesDir)\include";../../jit/"
+ AdditionalIncludeDirectories=""$(WebKitOutputDir)\include";"$(WebKitOutputDir)\obj\JavaScriptCore\$(ConfigurationName)\DerivedSources\";../../;"../../os-win32/";../../pcre/;../../assembler/;../../wrec/;../../parser/;../../runtime/;../../VM/;../../bytecode/;../../interpreter/;../../wtf/;../../debugger/;../../bytecompiler/;../../profiler;"$(WebKitLibrariesDir)\include\icu";"$(WebKitLibrariesDir)\include\pthreads";../../../icu/include;"$(WebKitLibrariesDir)\include";../../jit/"
PreprocessorDefinitions="__STD_C"
/>
<Tool
@@ -109,7 +109,7 @@ />
<Tool
Name="VCCLCompilerTool"
- AdditionalIncludeDirectories=""$(WebKitOutputDir)\include";"$(WebKitOutputDir)\obj\JavaScriptCore\$(ConfigurationName)\DerivedSources\";../../;"../../os-win32/";../../pcre/;../../assembler/;../../wrec/;../../parser/;../../runtime/;../../VM/;../../bytecode/;../../interpreter/;../../wtf/;../../debugger/;../../bytecompiler/;"$(WebKitLibrariesDir)\include\icu";"$(WebKitLibrariesDir)\include\pthreads";../../../icu/include;"$(WebKitLibrariesDir)\include";../../jit/"
+ AdditionalIncludeDirectories=""$(WebKitOutputDir)\include";"$(WebKitOutputDir)\obj\JavaScriptCore\$(ConfigurationName)\DerivedSources\";../../;"../../os-win32/";../../pcre/;../../assembler/;../../wrec/;../../parser/;../../runtime/;../../VM/;../../bytecode/;../../interpreter/;../../wtf/;../../debugger/;../../bytecompiler/;../../profiler;"$(WebKitLibrariesDir)\include\icu";"$(WebKitLibrariesDir)\include\pthreads";../../../icu/include;"$(WebKitLibrariesDir)\include";../../jit/"
PreprocessorDefinitions="__STD_C"
/>
<Tool
@@ -178,7 +178,7 @@ />
<Tool
Name="VCCLCompilerTool"
- AdditionalIncludeDirectories=""$(WebKitOutputDir)\include";"$(WebKitOutputDir)\obj\JavaScriptCore\$(ConfigurationName)\DerivedSources\";../../;"../../os-win32/";../../pcre/;../../assembler/;../../wrec/;../../parser/;../../runtime/;../../VM/;../../bytecode/;../../interpreter/;../../wtf/;../../debugger/;../../bytecompiler/;"$(WebKitLibrariesDir)\include\icu";"$(WebKitLibrariesDir)\include\pthreads";../../../icu/include;"$(WebKitLibrariesDir)\include";../../jit/"
+ AdditionalIncludeDirectories=""$(WebKitOutputDir)\include";"$(WebKitOutputDir)\obj\JavaScriptCore\$(ConfigurationName)\DerivedSources\";../../;"../../os-win32/";../../pcre/;../../assembler/;../../wrec/;../../parser/;../../runtime/;../../VM/;../../bytecode/;../../interpreter/;../../wtf/;../../debugger/;../../bytecompiler/;../../profiler;"$(WebKitLibrariesDir)\include\icu";"$(WebKitLibrariesDir)\include\pthreads";../../../icu/include;"$(WebKitLibrariesDir)\include";../../jit/"
PreprocessorDefinitions="__STD_C"
/>
<Tool
diff --git a/JavaScriptCore/JavaScriptCore.vcproj/testapi/testapi.vcproj b/JavaScriptCore/JavaScriptCore.vcproj/testapi/testapi.vcproj index d4d9966..d13ed56 100644 --- a/JavaScriptCore/JavaScriptCore.vcproj/testapi/testapi.vcproj +++ b/JavaScriptCore/JavaScriptCore.vcproj/testapi/testapi.vcproj @@ -89,7 +89,7 @@ />
<Tool
Name="VCPostBuildEventTool"
- CommandLine="if exist "$(WebKitOutputDir)\buildfailed" del "$(WebKitOutputDir)\buildfailed""
+ CommandLine="if exist "$(WebKitOutputDir)\buildfailed" del "$(WebKitOutputDir)\buildfailed"

xcopy /y /d "$(ProjectDir)\..\..\API\tests\testapi.js" "$(OutDir)""
/>
</Configuration>
<Configuration
@@ -166,7 +166,7 @@ />
<Tool
Name="VCPostBuildEventTool"
- CommandLine="if exist "$(WebKitOutputDir)\buildfailed" del "$(WebKitOutputDir)\buildfailed""
+ CommandLine="if exist "$(WebKitOutputDir)\buildfailed" del "$(WebKitOutputDir)\buildfailed"

xcopy /y /d "$(ProjectDir)\..\..\API\tests\testapi.js" "$(OutDir)""
/>
</Configuration>
<Configuration
@@ -242,7 +242,7 @@ />
<Tool
Name="VCPostBuildEventTool"
- CommandLine="if exist "$(WebKitOutputDir)\buildfailed" del "$(WebKitOutputDir)\buildfailed""
+ CommandLine="if exist "$(WebKitOutputDir)\buildfailed" del "$(WebKitOutputDir)\buildfailed"

xcopy /y /d "$(ProjectDir)\..\..\API\tests\testapi.js" "$(OutDir)""
/>
</Configuration>
</Configurations>
diff --git a/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj index 3c02898..e10ec7b 100644 --- a/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj +++ b/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj @@ -40,6 +40,8 @@ 088FA5BC0EF76D4300578E6F /* RandomNumber.h in Headers */ = {isa = PBXBuildFile; fileRef = 088FA5BA0EF76D4300578E6F /* RandomNumber.h */; settings = {ATTRIBUTES = (Private, ); }; }; 08E279E90EF83B10007DB523 /* RandomNumberSeed.h in Headers */ = {isa = PBXBuildFile; fileRef = 08E279E80EF83B10007DB523 /* RandomNumberSeed.h */; }; 0B1F921D0F1753500036468E /* PtrAndFlags.h in Headers */ = {isa = PBXBuildFile; fileRef = 0B1F921B0F17502D0036468E /* PtrAndFlags.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0B330C270F38C62300692DE3 /* TypeTraits.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0B330C260F38C62300692DE3 /* TypeTraits.cpp */; }; + 0B4D7E630F319AC800AD7E58 /* TypeTraits.h in Headers */ = {isa = PBXBuildFile; fileRef = 0B4D7E620F319AC800AD7E58 /* TypeTraits.h */; settings = {ATTRIBUTES = (Private, ); }; }; 140B7D1D0DC69AF7009C42B8 /* JSActivation.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14DA818F0D99FD2000B0A4FB /* JSActivation.cpp */; }; 140D17D70E8AD4A9000CD17D /* JSBasePrivate.h in Headers */ = {isa = PBXBuildFile; fileRef = 140D17D60E8AD4A9000CD17D /* JSBasePrivate.h */; settings = {ATTRIBUTES = (Private, ); }; }; 141211310A48794D00480255 /* JavaScriptCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 932F5BD90822A1C700736975 /* JavaScriptCore.framework */; }; @@ -82,11 +84,15 @@ 1482B74E0A43032800517CFC /* JSStringRef.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1482B74C0A43032800517CFC /* JSStringRef.cpp */; }; 1482B7E40A43076000517CFC /* JSObjectRef.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1482B7E20A43076000517CFC /* JSObjectRef.cpp */; }; 149559EE0DDCDDF700648087 /* DebuggerCallFrame.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 149559ED0DDCDDF700648087 /* DebuggerCallFrame.cpp */; }; + 14A23D750F4E1ABB0023CDAD /* JITStubs.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14A23D6C0F4E19CE0023CDAD /* JITStubs.cpp */; }; + 14A42E3F0F4F60EE00599099 /* TimeoutChecker.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14A42E3D0F4F60EE00599099 /* TimeoutChecker.cpp */; }; + 14A42E400F4F60EE00599099 /* TimeoutChecker.h in Headers */ = {isa = PBXBuildFile; fileRef = 14A42E3E0F4F60EE00599099 /* TimeoutChecker.h */; settings = {ATTRIBUTES = (Private, ); }; }; 14ABDF600A437FEF00ECCA01 /* JSCallbackObject.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14ABDF5E0A437FEF00ECCA01 /* JSCallbackObject.cpp */; }; 14B8EC720A5652090062BE54 /* CoreFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 6560A4CF04B3B3E7008AE952 /* CoreFoundation.framework */; }; 14BD59C50A3E8F9F00BAF59C /* JavaScriptCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 932F5BD90822A1C700736975 /* JavaScriptCore.framework */; }; 14BD5A300A3E91F600BAF59C /* JSContextRef.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14BD5A290A3E91F600BAF59C /* JSContextRef.cpp */; }; 14BD5A320A3E91F600BAF59C /* JSValueRef.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14BD5A2B0A3E91F600BAF59C /* JSValueRef.cpp */; }; + 14C5242B0F5355E900BA3D04 /* JITStubs.h in Headers */ = {isa = PBXBuildFile; fileRef = 14A6581A0F4E36F4000150FD /* JITStubs.h */; settings = {ATTRIBUTES = (Private, ); }; }; 14F3488F0E95EF8A003648BC /* CollectorHeapIterator.h in Headers */ = {isa = PBXBuildFile; fileRef = 14F3488E0E95EF8A003648BC /* CollectorHeapIterator.h */; settings = {ATTRIBUTES = (Private, ); }; }; 180B9B080F16D94F009BDBC5 /* CurrentTime.h in Headers */ = {isa = PBXBuildFile; fileRef = 180B9AF00F16C569009BDBC5 /* CurrentTime.h */; settings = {ATTRIBUTES = (Private, ); }; }; 180B9BFE0F16E94D009BDBC5 /* CurrentTime.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 180B9AEF0F16C569009BDBC5 /* CurrentTime.cpp */; }; @@ -106,6 +112,10 @@ 7E4EE7090EBB7963005934AA /* StructureChain.h in Headers */ = {isa = PBXBuildFile; fileRef = 7E4EE7080EBB7963005934AA /* StructureChain.h */; settings = {ATTRIBUTES = (Private, ); }; }; 7E4EE70F0EBB7A5B005934AA /* StructureChain.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 7E4EE70E0EBB7A5B005934AA /* StructureChain.cpp */; }; 7EFF00640EC05A9A00AA7C93 /* NodeInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 7EFF00630EC05A9A00AA7C93 /* NodeInfo.h */; }; + 860161E30F3A83C100F84710 /* AbstractMacroAssembler.h in Headers */ = {isa = PBXBuildFile; fileRef = 860161DF0F3A83C100F84710 /* AbstractMacroAssembler.h */; }; + 860161E40F3A83C100F84710 /* MacroAssemblerX86.h in Headers */ = {isa = PBXBuildFile; fileRef = 860161E00F3A83C100F84710 /* MacroAssemblerX86.h */; }; + 860161E50F3A83C100F84710 /* MacroAssemblerX86_64.h in Headers */ = {isa = PBXBuildFile; fileRef = 860161E10F3A83C100F84710 /* MacroAssemblerX86_64.h */; }; + 860161E60F3A83C100F84710 /* MacroAssemblerX86Common.h in Headers */ = {isa = PBXBuildFile; fileRef = 860161E20F3A83C100F84710 /* MacroAssemblerX86Common.h */; }; 869083150E6518D7000D36ED /* WREC.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 869083130E6518D7000D36ED /* WREC.cpp */; }; 869083160E6518D7000D36ED /* WREC.h in Headers */ = {isa = PBXBuildFile; fileRef = 869083140E6518D7000D36ED /* WREC.h */; settings = {ATTRIBUTES = (Private, ); }; }; 869EBCB70E8C6D4A008722CC /* ResultType.h in Headers */ = {isa = PBXBuildFile; fileRef = 869EBCB60E8C6D4A008722CC /* ResultType.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -114,6 +124,7 @@ 86CC85A10EE79A4700288682 /* JITInlineMethods.h in Headers */ = {isa = PBXBuildFile; fileRef = 86CC85A00EE79A4700288682 /* JITInlineMethods.h */; }; 86CC85A30EE79B7400288682 /* JITCall.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86CC85A20EE79B7400288682 /* JITCall.cpp */; }; 86CC85C40EE7A89400288682 /* JITPropertyAccess.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86CC85C30EE7A89400288682 /* JITPropertyAccess.cpp */; }; + 86CCEFDE0F413F8900FD7F9E /* JITCode.h in Headers */ = {isa = PBXBuildFile; fileRef = 86CCEFDD0F413F8900FD7F9E /* JITCode.h */; }; 905B02AE0E28640F006DF882 /* RefCountedLeakCounter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 905B02AD0E28640F006DF882 /* RefCountedLeakCounter.cpp */; }; 90D3469C0E285280009492EE /* RefCountedLeakCounter.h in Headers */ = {isa = PBXBuildFile; fileRef = 90D3469B0E285280009492EE /* RefCountedLeakCounter.h */; settings = {ATTRIBUTES = (Private, ); }; }; 930754C108B0F68000AB3056 /* pcre_compile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 930754BF08B0F68000AB3056 /* pcre_compile.cpp */; }; @@ -425,6 +436,8 @@ 088FA5BA0EF76D4300578E6F /* RandomNumber.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RandomNumber.h; sourceTree = "<group>"; }; 08E279E80EF83B10007DB523 /* RandomNumberSeed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RandomNumberSeed.h; sourceTree = "<group>"; }; 0B1F921B0F17502D0036468E /* PtrAndFlags.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PtrAndFlags.h; sourceTree = "<group>"; }; + 0B330C260F38C62300692DE3 /* TypeTraits.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TypeTraits.cpp; sourceTree = "<group>"; }; + 0B4D7E620F319AC800AD7E58 /* TypeTraits.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TypeTraits.h; sourceTree = "<group>"; }; 140D17D60E8AD4A9000CD17D /* JSBasePrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSBasePrivate.h; sourceTree = "<group>"; }; 141211020A48780900480255 /* minidom.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; name = minidom.c; path = tests/minidom.c; sourceTree = "<group>"; }; 1412110D0A48788700480255 /* minidom.js */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.javascript; name = minidom.js; path = tests/minidom.js; sourceTree = "<group>"; }; @@ -483,7 +496,11 @@ 148A1ECD0D10C23B0069A47C /* RefPtrHashMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RefPtrHashMap.h; sourceTree = "<group>"; }; 149559ED0DDCDDF700648087 /* DebuggerCallFrame.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DebuggerCallFrame.cpp; sourceTree = "<group>"; }; 149B24FF0D8AF6D1009CB8C7 /* Register.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Register.h; sourceTree = "<group>"; }; + 14A23D6C0F4E19CE0023CDAD /* JITStubs.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITStubs.cpp; sourceTree = "<group>"; }; 14A396A60CD2933100B5B4FF /* SymbolTable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SymbolTable.h; sourceTree = "<group>"; }; + 14A42E3D0F4F60EE00599099 /* TimeoutChecker.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = TimeoutChecker.cpp; sourceTree = "<group>"; }; + 14A42E3E0F4F60EE00599099 /* TimeoutChecker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TimeoutChecker.h; sourceTree = "<group>"; }; + 14A6581A0F4E36F4000150FD /* JITStubs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITStubs.h; sourceTree = "<group>"; }; 14ABB36E099C076400E2A24F /* JSValue.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = JSValue.h; sourceTree = "<group>"; }; 14ABB454099C2A0F00E2A24F /* JSType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSType.h; sourceTree = "<group>"; }; 14ABDF5D0A437FEF00ECCA01 /* JSCallbackObject.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSCallbackObject.h; sourceTree = "<group>"; }; @@ -575,6 +592,10 @@ 7E4EE7080EBB7963005934AA /* StructureChain.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StructureChain.h; sourceTree = "<group>"; }; 7E4EE70E0EBB7A5B005934AA /* StructureChain.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = StructureChain.cpp; sourceTree = "<group>"; }; 7EFF00630EC05A9A00AA7C93 /* NodeInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NodeInfo.h; sourceTree = "<group>"; }; + 860161DF0F3A83C100F84710 /* AbstractMacroAssembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AbstractMacroAssembler.h; sourceTree = "<group>"; }; + 860161E00F3A83C100F84710 /* MacroAssemblerX86.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MacroAssemblerX86.h; sourceTree = "<group>"; }; + 860161E10F3A83C100F84710 /* MacroAssemblerX86_64.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MacroAssemblerX86_64.h; sourceTree = "<group>"; }; + 860161E20F3A83C100F84710 /* MacroAssemblerX86Common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MacroAssemblerX86Common.h; sourceTree = "<group>"; }; 869083130E6518D7000D36ED /* WREC.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WREC.cpp; sourceTree = "<group>"; }; 869083140E6518D7000D36ED /* WREC.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WREC.h; sourceTree = "<group>"; }; 869EBCB60E8C6D4A008722CC /* ResultType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ResultType.h; sourceTree = "<group>"; }; @@ -583,6 +604,7 @@ 86CC85A00EE79A4700288682 /* JITInlineMethods.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITInlineMethods.h; sourceTree = "<group>"; }; 86CC85A20EE79B7400288682 /* JITCall.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITCall.cpp; sourceTree = "<group>"; }; 86CC85C30EE7A89400288682 /* JITPropertyAccess.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITPropertyAccess.cpp; sourceTree = "<group>"; }; + 86CCEFDD0F413F8900FD7F9E /* JITCode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITCode.h; sourceTree = "<group>"; }; 905B02AD0E28640F006DF882 /* RefCountedLeakCounter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RefCountedLeakCounter.cpp; sourceTree = "<group>"; }; 90D3469B0E285280009492EE /* RefCountedLeakCounter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RefCountedLeakCounter.h; sourceTree = "<group>"; }; 9303F567099118FA00AD71B8 /* OwnPtr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OwnPtr.h; sourceTree = "<group>"; }; @@ -960,6 +982,9 @@ 1429D92C0ED22D7000B89619 /* jit */ = { isa = PBXGroup; children = ( + 14A6581A0F4E36F4000150FD /* JITStubs.h */, + 14A23D6C0F4E19CE0023CDAD /* JITStubs.cpp */, + 86CCEFDD0F413F8900FD7F9E /* JITCode.h */, A7B48DB60EE74CFC00DCBDB6 /* ExecutableAllocator.cpp */, A7B48DB50EE74CFC00DCBDB6 /* ExecutableAllocator.h */, A782F1A40EEC9FA20036273F /* ExecutableAllocatorPosix.cpp */, @@ -1082,6 +1107,7 @@ 65162EF108E6A21C007556CD /* wtf */ = { isa = PBXGroup; children = ( + 0B330C260F38C62300692DE3 /* TypeTraits.cpp */, 06D358A00DAAD9C4003B174E /* mac */, E195678D09E7CF1200B89D13 /* unicode */, 938C4F690CA06BC700D9310A /* ASCIICType.h */, @@ -1142,6 +1168,7 @@ 5D6A566A0F05995500266145 /* Threading.cpp */, E1EE79220D6C95CD00FEA3BA /* Threading.h */, E1EE793C0D6C9B9200FEA3BA /* ThreadingPthreads.cpp */, + 0B4D7E620F319AC800AD7E58 /* TypeTraits.h */, 935AF46B09E9D9DB00ACD1D8 /* UnusedParam.h */, 6592C316098B7DE10003D4F6 /* Vector.h */, 6592C317098B7DE10003D4F6 /* VectorTraits.h */, @@ -1359,6 +1386,8 @@ 7E4EE7080EBB7963005934AA /* StructureChain.h */, BC9041470EB9250900FE26FA /* StructureTransitionTable.h */, 14A396A60CD2933100B5B4FF /* SymbolTable.h */, + 14A42E3D0F4F60EE00599099 /* TimeoutChecker.cpp */, + 14A42E3E0F4F60EE00599099 /* TimeoutChecker.h */, 5D53726D0E1C546B0021E549 /* Tracing.d */, 5D53726E0E1C54880021E549 /* Tracing.h */, 6507D2970E871E4A00D7D896 /* TypeInfo.h */, @@ -1425,6 +1454,10 @@ 9688CB120ED12B4E001D649F /* assembler */ = { isa = PBXGroup; children = ( + 860161DF0F3A83C100F84710 /* AbstractMacroAssembler.h */, + 860161E00F3A83C100F84710 /* MacroAssemblerX86.h */, + 860161E10F3A83C100F84710 /* MacroAssemblerX86_64.h */, + 860161E20F3A83C100F84710 /* MacroAssemblerX86Common.h */, 9688CB130ED12B4E001D649F /* AssemblerBuffer.h */, 86C36EE90EE1289D00B3DF59 /* MacroAssembler.h */, 9688CB140ED12B4E001D649F /* X86Assembler.h */, @@ -1492,6 +1525,7 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + 14C5242B0F5355E900BA3D04 /* JITStubs.h in Headers */, BC18C3E50E16F5CD00B34460 /* APICast.h in Headers */, BC18C3E90E16F5CD00B34460 /* ASCIICType.h in Headers */, BC18C3EB0E16F5CD00B34460 /* AVLTree.h in Headers */, @@ -1684,6 +1718,7 @@ 5D53726F0E1C54880021E549 /* Tracing.h in Headers */, 95CD41B40E1BF6560085358E /* TreeProfile.h in Headers */, 6507D29E0E871E5E00D7D896 /* TypeInfo.h in Headers */, + 0B4D7E630F319AC800AD7E58 /* TypeTraits.h in Headers */, BC18C4760E16F5CD00B34460 /* UString.h in Headers */, BC18C4770E16F5CD00B34460 /* UTF8.h in Headers */, BC18C4730E16F5CD00B34460 /* Unicode.h in Headers */, @@ -1707,6 +1742,12 @@ BC18C4720E16F5CD00B34460 /* ucpinternal.h in Headers */, A7A1F7AD0F252B3C00E184E2 /* ByteArray.h in Headers */, BC3135640F302FA3003DFD3A /* DebuggerActivation.h in Headers */, + 860161E30F3A83C100F84710 /* AbstractMacroAssembler.h in Headers */, + 860161E40F3A83C100F84710 /* MacroAssemblerX86.h in Headers */, + 860161E50F3A83C100F84710 /* MacroAssemblerX86_64.h in Headers */, + 860161E60F3A83C100F84710 /* MacroAssemblerX86Common.h in Headers */, + 86CCEFDE0F413F8900FD7F9E /* JITCode.h in Headers */, + 14A42E400F4F60EE00599099 /* TimeoutChecker.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1904,7 +1945,7 @@ ); runOnlyForDeploymentPostprocessing = 0; shellPath = /bin/sh; - shellScript = "TRACING_D=\"$SRCROOT/runtime/Tracing.d\";\nTRACING_H=\"$BUILT_PRODUCTS_DIR/DerivedSources/JavaScriptCore/TracingDtrace.h\";\n\nif [[ \"$MACOSX_DEPLOYMENT_TARGET\" > \"10.5\" ]];\nthen\n\tdtrace -h -o \"$TRACING_H\" -s \"$TRACING_D\";\nfi;\n"; + shellScript = "TRACING_D=\"$SRCROOT/runtime/Tracing.d\";\nTRACING_H=\"$BUILT_PRODUCTS_DIR/DerivedSources/JavaScriptCore/TracingDtrace.h\";\n\nif [[ \"$HAVE_DTRACE\" = \"1\" && \"$TRACING_D\" -nt \"$TRACING_H\" ]];\nthen\n\tdtrace -h -o \"$TRACING_H\" -s \"$TRACING_D\";\nfi;\n"; }; 5D5D8ABF0E0D0B0300F9C692 /* Fix Framework Reference */ = { isa = PBXShellScriptBuildPhase; @@ -2011,6 +2052,7 @@ 86A90ED00EE7D51F00AB350D /* JITArithmetic.cpp in Sources */, 86CC85A30EE79B7400288682 /* JITCall.cpp in Sources */, 86CC85C40EE7A89400288682 /* JITPropertyAccess.cpp in Sources */, + 14A23D750F4E1ABB0023CDAD /* JITStubs.cpp in Sources */, 140B7D1D0DC69AF7009C42B8 /* JSActivation.cpp in Sources */, 1421359B0A677F4F00A8195E /* JSBase.cpp in Sources */, A791EF290F11E07900AE1F68 /* JSByteArray.cpp in Sources */, @@ -2059,6 +2101,8 @@ 93E26BD408B1514100F85226 /* pcre_xclass.cpp in Sources */, A7A1F7AC0F252B3C00E184E2 /* ByteArray.cpp in Sources */, BC3135650F302FA3003DFD3A /* DebuggerActivation.cpp in Sources */, + 0B330C270F38C62300692DE3 /* TypeTraits.cpp in Sources */, + 14A42E3F0F4F60EE00599099 /* TimeoutChecker.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/JavaScriptCore/JavaScriptCorePrefix.h b/JavaScriptCore/JavaScriptCorePrefix.h index e71c8a8..13b21bb 100644 --- a/JavaScriptCore/JavaScriptCorePrefix.h +++ b/JavaScriptCore/JavaScriptCorePrefix.h @@ -25,15 +25,6 @@ #endif -#if defined(__APPLE__) -#import <AvailabilityMacros.h> -#if MAC_OS_X_VERSION_MAX_ALLOWED <= MAC_OS_X_VERSION_10_4 -#define BUILDING_ON_TIGER 1 -#elif MAC_OS_X_VERSION_MAX_ALLOWED <= MAC_OS_X_VERSION_10_5 -#define BUILDING_ON_LEOPARD 1 -#endif -#endif - #ifdef __cplusplus #define new ("if you use new/delete make sure to include config.h at the top of the file"()) #define delete ("if you use new/delete make sure to include config.h at the top of the file"()) diff --git a/JavaScriptCore/JavaScriptCoreSources.bkl b/JavaScriptCore/JavaScriptCoreSources.bkl index 7ba3e09..b3a461b 100644 --- a/JavaScriptCore/JavaScriptCoreSources.bkl +++ b/JavaScriptCore/JavaScriptCoreSources.bkl @@ -152,6 +152,7 @@ Source files for JSCore. bytecode/StructureStubInfo.cpp bytecode/JumpTable.cpp runtime/ExceptionHelpers.cpp + runtime/TimeoutChecker.cpp interpreter/Interpreter.cpp bytecode/Opcode.cpp bytecode/SamplingTool.cpp @@ -169,6 +170,7 @@ Source files for JSCore. wtf/TCSystemAlloc.cpp wtf/Threading.cpp wtf/ThreadingNone.cpp + wtf/TypeTraits.cpp wtf/wx/MainThreadWx.cpp wtf/unicode/CollatorDefault.cpp wtf/unicode/icu/CollatorICU.cpp diff --git a/JavaScriptCore/assembler/AbstractMacroAssembler.h b/JavaScriptCore/assembler/AbstractMacroAssembler.h new file mode 100644 index 0000000..851b6d5 --- /dev/null +++ b/JavaScriptCore/assembler/AbstractMacroAssembler.h @@ -0,0 +1,841 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AbstractMacroAssembler_h +#define AbstractMacroAssembler_h + +#include <wtf/Platform.h> + +#if ENABLE(ASSEMBLER) + +namespace JSC { + +template <class AssemblerType> +class AbstractMacroAssembler { +public: + class Jump; + class PatchBuffer; + class CodeLocationLabel; + class CodeLocationJump; + class CodeLocationCall; + class CodeLocationDataLabel32; + class CodeLocationDataLabelPtr; + + typedef typename AssemblerType::RegisterID RegisterID; + typedef typename AssemblerType::JmpSrc JmpSrc; + typedef typename AssemblerType::JmpDst JmpDst; + + + // Section 1: MacroAssembler operand types + // + // The following types are used as operands to MacroAssembler operations, + // describing immediate and memory operands to the instructions to be planted. + + + enum Scale { + TimesOne, + TimesTwo, + TimesFour, + TimesEight, + }; + + // Address: + // + // Describes a simple base-offset address. + struct Address { + explicit Address(RegisterID base, int32_t offset = 0) + : base(base) + , offset(offset) + { + } + + RegisterID base; + int32_t offset; + }; + + // ImplicitAddress: + // + // This class is used for explicit 'load' and 'store' operations + // (as opposed to situations in which a memory operand is provided + // to a generic operation, such as an integer arithmetic instruction). + // + // In the case of a load (or store) operation we want to permit + // addresses to be implicitly constructed, e.g. the two calls: + // + // load32(Address(addrReg), destReg); + // load32(addrReg, destReg); + // + // Are equivalent, and the explicit wrapping of the Address in the former + // is unnecessary. + struct ImplicitAddress { + ImplicitAddress(RegisterID base) + : base(base) + , offset(0) + { + } + + ImplicitAddress(Address address) + : base(address.base) + , offset(address.offset) + { + } + + RegisterID base; + int32_t offset; + }; + + // BaseIndex: + // + // Describes a complex addressing mode. + struct BaseIndex { + BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0) + : base(base) + , index(index) + , scale(scale) + , offset(offset) + { + } + + RegisterID base; + RegisterID index; + Scale scale; + int32_t offset; + }; + + // AbsoluteAddress: + // + // Describes an memory operand given by a pointer. For regular load & store + // operations an unwrapped void* will be used, rather than using this. + struct AbsoluteAddress { + explicit AbsoluteAddress(void* ptr) + : m_ptr(ptr) + { + } + + void* m_ptr; + }; + + // ImmPtr: + // + // A pointer sized immediate operand to an instruction - this is wrapped + // in a class requiring explicit construction in order to differentiate + // from pointers used as absolute addresses to memory operations + struct ImmPtr { + explicit ImmPtr(void* value) + : m_value(value) + { + } + + intptr_t asIntptr() + { + return reinterpret_cast<intptr_t>(m_value); + } + + void* m_value; + }; + + // Imm32: + // + // A 32bit immediate operand to an instruction - this is wrapped in a + // class requiring explicit construction in order to prevent RegisterIDs + // (which are implemented as an enum) from accidentally being passed as + // immediate values. + struct Imm32 { + explicit Imm32(int32_t value) + : m_value(value) + { + } + +#if !PLATFORM(X86_64) + explicit Imm32(ImmPtr ptr) + : m_value(ptr.asIntptr()) + { + } +#endif + + int32_t m_value; + }; + + + // Section 2: MacroAssembler code buffer handles + // + // The following types are used to reference items in the code buffer + // during JIT code generation. For example, the type Jump is used to + // track the location of a jump instruction so that it may later be + // linked to a label marking its destination. + + + // Label: + // + // A Label records a point in the generated instruction stream, typically such that + // it may be used as a destination for a jump. + class Label { + friend class Jump; + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + friend class PatchBuffer; + public: + Label() + { + } + + Label(AbstractMacroAssembler<AssemblerType>* masm) + : m_label(masm->m_assembler.label()) + { + } + + bool isUsed() const { return m_label.isUsed(); } + void used() { m_label.used(); } + private: + JmpDst m_label; + }; + + // DataLabelPtr: + // + // A DataLabelPtr is used to refer to a location in the code containing a pointer to be + // patched after the code has been generated. + class DataLabelPtr { + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + friend class PatchBuffer; + public: + DataLabelPtr() + { + } + + DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm) + : m_label(masm->m_assembler.label()) + { + } + + private: + JmpDst m_label; + }; + + // DataLabel32: + // + // A DataLabelPtr is used to refer to a location in the code containing a pointer to be + // patched after the code has been generated. + class DataLabel32 { + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + friend class PatchBuffer; + public: + DataLabel32() + { + } + + DataLabel32(AbstractMacroAssembler<AssemblerType>* masm) + : m_label(masm->m_assembler.label()) + { + } + + private: + JmpDst m_label; + }; + + // Call: + // + // A Call object is a reference to a call instruction that has been planted + // into the code buffer - it is typically used to link the call, setting the + // relative offset such that when executed it will call to the desired + // destination. + class Call { + friend class PatchBuffer; + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + public: + enum Flags { + None = 0x0, + Linkable = 0x1, + Near = 0x2, + LinkableNear = 0x3, + }; + + Call() + : m_flags(None) + { + } + + Call(JmpSrc jmp, Flags flags) + : m_jmp(jmp) + , m_flags(flags) + { + } + + bool isFlagSet(Flags flag) + { + return m_flags & flag; + } + + static Call fromTailJump(Jump jump) + { + return Call(jump.m_jmp, Linkable); + } + + private: + JmpSrc m_jmp; + Flags m_flags; + }; + + // Jump: + // + // A jump object is a reference to a jump instruction that has been planted + // into the code buffer - it is typically used to link the jump, setting the + // relative offset such that when executed it will jump to the desired + // destination. + class Jump { + friend class PatchBuffer; + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + friend class Call; + public: + Jump() + { + } + + Jump(JmpSrc jmp) + : m_jmp(jmp) + { + } + + void link(AbstractMacroAssembler<AssemblerType>* masm) + { + masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label()); + } + + void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) + { + masm->m_assembler.linkJump(m_jmp, label.m_label); + } + + private: + JmpSrc m_jmp; + }; + + // JumpList: + // + // A JumpList is a set of Jump objects. + // All jumps in the set will be linked to the same destination. + class JumpList { + friend class PatchBuffer; + + public: + void link(AbstractMacroAssembler<AssemblerType>* masm) + { + size_t size = m_jumps.size(); + for (size_t i = 0; i < size; ++i) + m_jumps[i].link(masm); + m_jumps.clear(); + } + + void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) + { + size_t size = m_jumps.size(); + for (size_t i = 0; i < size; ++i) + m_jumps[i].linkTo(label, masm); + m_jumps.clear(); + } + + void append(Jump jump) + { + m_jumps.append(jump); + } + + void append(JumpList& other) + { + m_jumps.append(other.m_jumps.begin(), other.m_jumps.size()); + } + + bool empty() + { + return !m_jumps.size(); + } + + private: + Vector<Jump, 16> m_jumps; + }; + + + // Section 3: MacroAssembler JIT instruction stream handles. + // + // The MacroAssembler supported facilities to modify a JIT generated + // instruction stream after it has been generated (relinking calls and + // jumps, and repatching data values). The following types are used + // to store handles into the underlying instruction stream, the type + // providing semantic information as to what it is that is in the + // instruction stream at this point, and thus what operations may be + // performed on it. + + + // CodeLocationCommon: + // + // Base type for other CodeLocation* types. A postion in the JIT genertaed + // instruction stream, without any semantic information. + class CodeLocationCommon { + public: + CodeLocationCommon() + : m_location(0) + { + } + + // In order to avoid the need to store multiple handles into the + // instructions stream, where the code generation is deterministic + // and the labels will always be a fixed distance apart, these + // methods may be used to recover a handle that has nopw been + // retained, based on a known fixed relative offset from one that has. + CodeLocationLabel labelAtOffset(int offset); + CodeLocationJump jumpAtOffset(int offset); + CodeLocationCall callAtOffset(int offset); + CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset); + CodeLocationDataLabel32 dataLabel32AtOffset(int offset); + + operator bool() { return m_location; } + void reset() { m_location = 0; } + + protected: + explicit CodeLocationCommon(void* location) + : m_location(location) + { + } + + void* m_location; + }; + + // CodeLocationLabel: + // + // A point in the JIT code maked with a label. + class CodeLocationLabel : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class CodeLocationJump; + friend class PatchBuffer; + public: + CodeLocationLabel() + { + } + + void* addressForSwitch() { return this->m_location; } + void* addressForExceptionHandler() { return this->m_location; } + void* addressForJSR() { return this->m_location; } + + private: + explicit CodeLocationLabel(void* location) + : CodeLocationCommon(location) + { + } + + void* getJumpDestination() { return this->m_location; } + }; + + // CodeLocationJump: + // + // A point in the JIT code at which there is a jump instruction. + class CodeLocationJump : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationJump() + { + } + + void relink(CodeLocationLabel destination) + { + AssemblerType::patchJump(reinterpret_cast<intptr_t>(this->m_location), destination.m_location); + } + + private: + explicit CodeLocationJump(void* location) + : CodeLocationCommon(location) + { + } + }; + + // CodeLocationCall: + // + // A point in the JIT code at which there is a call instruction. + class CodeLocationCall : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationCall() + { + } + + template<typename FunctionSig> + void relink(FunctionSig* function) + { + AssemblerType::patchMacroAssemblerCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(function)); + } + + // This methods returns the value that will be set as the return address + // within a function that has been called from this call instruction. + void* calleeReturnAddressValue() + { + return this->m_location; + } + + private: + explicit CodeLocationCall(void* location) + : CodeLocationCommon(location) + { + } + }; + + // CodeLocationNearCall: + // + // A point in the JIT code at which there is a call instruction with near linkage. + class CodeLocationNearCall : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationNearCall() + { + } + + template<typename FunctionSig> + void relink(FunctionSig* function) + { + AssemblerType::patchCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(function)); + } + + // This methods returns the value that will be set as the return address + // within a function that has been called from this call instruction. + void* calleeReturnAddressValue() + { + return this->m_location; + } + + private: + explicit CodeLocationNearCall(void* location) + : CodeLocationCommon(location) + { + } + }; + + // CodeLocationDataLabel32: + // + // A point in the JIT code at which there is an int32_t immediate that may be repatched. + class CodeLocationDataLabel32 : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationDataLabel32() + { + } + + void repatch(int32_t value) + { + AssemblerType::patchImmediate(reinterpret_cast<intptr_t>(this->m_location), value); + } + + private: + explicit CodeLocationDataLabel32(void* location) + : CodeLocationCommon(location) + { + } + }; + + // CodeLocationDataLabelPtr: + // + // A point in the JIT code at which there is a void* immediate that may be repatched. + class CodeLocationDataLabelPtr : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationDataLabelPtr() + { + } + + void repatch(void* value) + { + AssemblerType::patchPointer(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<intptr_t>(value)); + } + + private: + explicit CodeLocationDataLabelPtr(void* location) + : CodeLocationCommon(location) + { + } + }; + + // ProcessorReturnAddress: + // + // This class can be used to relink a call identified by its return address. + class ProcessorReturnAddress { + public: + ProcessorReturnAddress(void* location) + : m_location(location) + { + } + + template<typename FunctionSig> + void relinkCallerToFunction(FunctionSig* newCalleeFunction) + { + AssemblerType::patchMacroAssemblerCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(newCalleeFunction)); + } + + template<typename FunctionSig> + void relinkNearCallerToFunction(FunctionSig* newCalleeFunction) + { + AssemblerType::patchCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(newCalleeFunction)); + } + + operator void*() + { + return m_location; + } + + private: + void* m_location; + }; + + + // Section 4: The patch buffer - utility to finalize code generation. + + + // PatchBuffer: + // + // This class assists in linking code generated by the macro assembler, once code generation + // has been completed, and the code has been copied to is final location in memory. At this + // time pointers to labels within the code may be resolved, and relative offsets to external + // addresses may be fixed. + // + // Specifically: + // * Jump objects may be linked to external targets, + // * The address of Jump objects may taken, such that it can later be relinked. + // * The return address of a Jump object representing a call may be acquired. + // * The address of a Label pointing into the code may be resolved. + // * The value referenced by a DataLabel may be fixed. + // + // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return + // address of calls, as opposed to a point that can be used to later relink a Jump - + // possibly wrap the later up in an object that can do just that). + class PatchBuffer { + public: + PatchBuffer(void* code) + : m_code(code) + { + } + + CodeLocationLabel entry() + { + return CodeLocationLabel(m_code); + } + + void* trampolineAt(Label label) + { + return AssemblerType::getRelocatedAddress(m_code, label.m_label); + } + + // These methods are used to link or set values at code generation time. + + template<typename FunctionSig> + void link(Call call, FunctionSig* function) + { + ASSERT(call.isFlagSet(Call::Linkable)); +#if PLATFORM(X86_64) + if (call.isFlagSet(Call::Near)) { + AssemblerType::linkCall(m_code, call.m_jmp, reinterpret_cast<void*>(function)); + } else { + intptr_t callLocation = reinterpret_cast<intptr_t>(AssemblerType::getRelocatedAddress(m_code, call.m_jmp)); + AssemblerType::patchMacroAssemblerCall(callLocation, reinterpret_cast<void*>(function)); + } +#else + AssemblerType::linkCall(m_code, call.m_jmp, reinterpret_cast<void*>(function)); +#endif + } + + template<typename FunctionSig> + void linkTailRecursive(Jump jump, FunctionSig* function) + { + AssemblerType::linkJump(m_code, jump.m_jmp, reinterpret_cast<void*>(function)); + } + + template<typename FunctionSig> + void linkTailRecursive(JumpList list, FunctionSig* function) + { + for (unsigned i = 0; i < list.m_jumps.size(); ++i) { + AssemblerType::linkJump(m_code, list.m_jumps[i].m_jmp, reinterpret_cast<void*>(function)); + } + } + + void link(Jump jump, CodeLocationLabel label) + { + AssemblerType::linkJump(m_code, jump.m_jmp, label.m_location); + } + + void link(JumpList list, CodeLocationLabel label) + { + for (unsigned i = 0; i < list.m_jumps.size(); ++i) + AssemblerType::linkJump(m_code, list.m_jumps[i].m_jmp, label.m_location); + } + + void patch(DataLabelPtr label, void* value) + { + AssemblerType::patchAddress(m_code, label.m_label, value); + } + + // These methods are used to obtain handles to allow the code to be relinked / repatched later. + + CodeLocationCall locationOf(Call call) + { + ASSERT(call.isFlagSet(Call::Linkable)); + ASSERT(!call.isFlagSet(Call::Near)); + return CodeLocationCall(AssemblerType::getRelocatedAddress(m_code, call.m_jmp)); + } + + CodeLocationNearCall locationOfNearCall(Call call) + { + ASSERT(call.isFlagSet(Call::Linkable)); + ASSERT(call.isFlagSet(Call::Near)); + return CodeLocationNearCall(AssemblerType::getRelocatedAddress(m_code, call.m_jmp)); + } + + CodeLocationLabel locationOf(Label label) + { + return CodeLocationLabel(AssemblerType::getRelocatedAddress(m_code, label.m_label)); + } + + CodeLocationDataLabelPtr locationOf(DataLabelPtr label) + { + return CodeLocationDataLabelPtr(AssemblerType::getRelocatedAddress(m_code, label.m_label)); + } + + CodeLocationDataLabel32 locationOf(DataLabel32 label) + { + return CodeLocationDataLabel32(AssemblerType::getRelocatedAddress(m_code, label.m_label)); + } + + // This method obtains the return address of the call, given as an offset from + // the start of the code. + unsigned returnAddressOffset(Call call) + { + return AssemblerType::getCallReturnOffset(call.m_jmp); + } + + private: + void* m_code; + }; + + + // Section 5: Misc admin methods + + size_t size() + { + return m_assembler.size(); + } + + void* copyCode(ExecutablePool* allocator) + { + return m_assembler.executableCopy(allocator); + } + + Label label() + { + return Label(this); + } + + Label align() + { + m_assembler.align(16); + return Label(this); + } + + ptrdiff_t differenceBetween(Label from, Jump to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + } + + ptrdiff_t differenceBetween(Label from, Call to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + } + + ptrdiff_t differenceBetween(Label from, Label to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + } + + ptrdiff_t differenceBetween(Label from, DataLabelPtr to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + } + + ptrdiff_t differenceBetween(Label from, DataLabel32 to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + } + + ptrdiff_t differenceBetween(DataLabelPtr from, Jump to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + } + + ptrdiff_t differenceBetween(DataLabelPtr from, Call to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + } + +protected: + AssemblerType m_assembler; +}; + + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationLabel AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::labelAtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationLabel(reinterpret_cast<char*>(m_location) + offset); +} + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationJump AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::jumpAtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationJump(reinterpret_cast<char*>(m_location) + offset); +} + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationCall AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::callAtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationCall(reinterpret_cast<char*>(m_location) + offset); +} + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabelPtr AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabelPtrAtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationDataLabelPtr(reinterpret_cast<char*>(m_location) + offset); +} + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabel32 AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabel32AtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationDataLabel32(reinterpret_cast<char*>(m_location) + offset); +} + + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // AbstractMacroAssembler_h diff --git a/JavaScriptCore/assembler/MacroAssembler.h b/JavaScriptCore/assembler/MacroAssembler.h index 9d24653..71ac1f6 100644 --- a/JavaScriptCore/assembler/MacroAssembler.h +++ b/JavaScriptCore/assembler/MacroAssembler.h @@ -30,1986 +30,301 @@ #if ENABLE(ASSEMBLER) -#include "X86Assembler.h" - -namespace JSC { +#if PLATFORM(X86) +#include "MacroAssemblerX86.h" +namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }; -class MacroAssembler { -protected: - X86Assembler m_assembler; +#elif PLATFORM(X86_64) +#include "MacroAssemblerX86_64.h" +namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }; -#if PLATFORM(X86_64) - static const X86::RegisterID scratchRegister = X86::r11; +#else +#error "The MacroAssembler is not supported on this platform." #endif + +namespace JSC { + +class MacroAssembler : public MacroAssemblerBase { public: - typedef X86::RegisterID RegisterID; - - // Note: do not rely on values in this enum, these will change (to 0..3). - enum Scale { - TimesOne = 1, - TimesTwo = 2, - TimesFour = 4, - TimesEight = 8, -#if PLATFORM(X86) - ScalePtr = TimesFour -#endif + + using MacroAssemblerBase::pop; + using MacroAssemblerBase::jump; + using MacroAssemblerBase::branch32; + using MacroAssemblerBase::branch16; #if PLATFORM(X86_64) - ScalePtr = TimesEight + using MacroAssemblerBase::branchPtr; + using MacroAssemblerBase::branchTestPtr; #endif - }; - MacroAssembler() + + // Platform agnostic onvenience functions, + // described in terms of other macro assembly methods. + void pop() { + addPtr(Imm32(sizeof(void*)), stackPointerRegister); } - size_t size() { return m_assembler.size(); } - void* copyCode(ExecutablePool* allocator) + void peek(RegisterID dest, int index = 0) { - return m_assembler.executableCopy(allocator); + loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest); } - - // Address: - // - // Describes a simple base-offset address. - struct Address { - explicit Address(RegisterID base, int32_t offset = 0) - : base(base) - , offset(offset) - { - } - - RegisterID base; - int32_t offset; - }; - - // ImplicitAddress: - // - // This class is used for explicit 'load' and 'store' operations - // (as opposed to situations in which a memory operand is provided - // to a generic operation, such as an integer arithmetic instruction). - // - // In the case of a load (or store) operation we want to permit - // addresses to be implicitly constructed, e.g. the two calls: - // - // load32(Address(addrReg), destReg); - // load32(addrReg, destReg); - // - // Are equivalent, and the explicit wrapping of the Address in the former - // is unnecessary. - struct ImplicitAddress { - ImplicitAddress(RegisterID base) - : base(base) - , offset(0) - { - } - - ImplicitAddress(Address address) - : base(address.base) - , offset(address.offset) - { - } - - RegisterID base; - int32_t offset; - }; - - // BaseIndex: - // - // Describes a complex addressing mode. - struct BaseIndex { - BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0) - : base(base) - , index(index) - , scale(scale) - , offset(offset) - { - } - - RegisterID base; - RegisterID index; - Scale scale; - int32_t offset; - }; - - // AbsoluteAddress: - // - // Describes an memory operand given by a pointer. For regular load & store - // operations an unwrapped void* will be used, rather than using this. - struct AbsoluteAddress { - explicit AbsoluteAddress(void* ptr) - : m_ptr(ptr) - { - } - - void* m_ptr; - }; - - - class Jump; - class PatchBuffer; - - // DataLabelPtr: - // - // A DataLabelPtr is used to refer to a location in the code containing a pointer to be - // patched after the code has been generated. - class DataLabelPtr { - friend class MacroAssembler; - friend class PatchBuffer; - - public: - DataLabelPtr() - { - } - - DataLabelPtr(MacroAssembler* masm) - : m_label(masm->m_assembler.label()) - { - } - - static void patch(void* address, void* value) - { - X86Assembler::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value)); - } - - private: - X86Assembler::JmpDst m_label; - }; - - // DataLabel32: - // - // A DataLabelPtr is used to refer to a location in the code containing a pointer to be - // patched after the code has been generated. - class DataLabel32 { - friend class MacroAssembler; - friend class PatchBuffer; - - public: - DataLabel32() - { - } - - DataLabel32(MacroAssembler* masm) - : m_label(masm->m_assembler.label()) - { - } - - static void patch(void* address, int32_t value) - { - X86Assembler::patchImmediate(reinterpret_cast<intptr_t>(address), value); - } - - private: - X86Assembler::JmpDst m_label; - }; - - // Label: - // - // A Label records a point in the generated instruction stream, typically such that - // it may be used as a destination for a jump. - class Label { - friend class Jump; - friend class MacroAssembler; - friend class PatchBuffer; - - public: - Label() - { - } - - Label(MacroAssembler* masm) - : m_label(masm->m_assembler.label()) - { - } - - // FIXME: transitionary method, while we replace JmpSrces with Jumps. - operator X86Assembler::JmpDst() - { - return m_label; - } - - private: - X86Assembler::JmpDst m_label; - }; - - - // Jump: - // - // A jump object is a reference to a jump instruction that has been planted - // into the code buffer - it is typically used to link the jump, setting the - // relative offset such that when executed it will jump to the desired - // destination. - // - // Jump objects retain a pointer to the assembler for syntactic purposes - - // to allow the jump object to be able to link itself, e.g.: - // - // Jump forwardsBranch = jne32(Imm32(0), reg1); - // // ... - // forwardsBranch.link(); - // - // Jumps may also be linked to a Label. - class Jump { - friend class PatchBuffer; - friend class MacroAssembler; - - public: - Jump() - { - } - - // FIXME: transitionary method, while we replace JmpSrces with Jumps. - Jump(X86Assembler::JmpSrc jmp) - : m_jmp(jmp) - { - } - - void link(MacroAssembler* masm) - { - masm->m_assembler.link(m_jmp, masm->m_assembler.label()); - } - - void linkTo(Label label, MacroAssembler* masm) - { - masm->m_assembler.link(m_jmp, label.m_label); - } - - // FIXME: transitionary method, while we replace JmpSrces with Jumps. - operator X86Assembler::JmpSrc() - { - return m_jmp; - } - - static void patch(void* address, void* destination) - { - X86Assembler::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination); - } - - private: - X86Assembler::JmpSrc m_jmp; - }; - - // JumpList: - // - // A JumpList is a set of Jump objects. - // All jumps in the set will be linked to the same destination. - class JumpList { - friend class PatchBuffer; - - public: - void link(MacroAssembler* masm) - { - size_t size = m_jumps.size(); - for (size_t i = 0; i < size; ++i) - m_jumps[i].link(masm); - m_jumps.clear(); - } - - void linkTo(Label label, MacroAssembler* masm) - { - size_t size = m_jumps.size(); - for (size_t i = 0; i < size; ++i) - m_jumps[i].linkTo(label, masm); - m_jumps.clear(); - } - - void append(Jump jump) - { - m_jumps.append(jump); - } - - void append(JumpList& other) - { - m_jumps.append(other.m_jumps.begin(), other.m_jumps.size()); - } - - bool empty() - { - return !m_jumps.size(); - } - - private: - Vector<Jump, 16> m_jumps; - }; - - - // PatchBuffer: - // - // This class assists in linking code generated by the macro assembler, once code generation - // has been completed, and the code has been copied to is final location in memory. At this - // time pointers to labels within the code may be resolved, and relative offsets to external - // addresses may be fixed. - // - // Specifically: - // * Jump objects may be linked to external targets, - // * The address of Jump objects may taken, such that it can later be relinked. - // * The return address of a Jump object representing a call may be acquired. - // * The address of a Label pointing into the code may be resolved. - // * The value referenced by a DataLabel may be fixed. - // - // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return - // address of calls, as opposed to a point that can be used to later relink a Jump - - // possibly wrap the later up in an object that can do just that). - class PatchBuffer { - public: - PatchBuffer(void* code) - : m_code(code) - { - } - - void link(Jump jump, void* target) - { - X86Assembler::link(m_code, jump.m_jmp, target); - } - - void link(JumpList list, void* target) - { - for (unsigned i = 0; i < list.m_jumps.size(); ++i) - X86Assembler::link(m_code, list.m_jumps[i], target); - } - - void* addressOf(Jump jump) - { - return X86Assembler::getRelocatedAddress(m_code, jump.m_jmp); - } - - void* addressOf(Label label) - { - return X86Assembler::getRelocatedAddress(m_code, label.m_label); - } - - void* addressOf(DataLabelPtr label) - { - return X86Assembler::getRelocatedAddress(m_code, label.m_label); - } - - void* addressOf(DataLabel32 label) - { - return X86Assembler::getRelocatedAddress(m_code, label.m_label); - } - - void setPtr(DataLabelPtr label, void* value) - { - X86Assembler::patchAddress(m_code, label.m_label, value); - } - - private: - void* m_code; - }; - - - // ImmPtr: - // - // A pointer sized immediate operand to an instruction - this is wrapped - // in a class requiring explicit construction in order to differentiate - // from pointers used as absolute addresses to memory operations - struct ImmPtr { - explicit ImmPtr(void* value) - : m_value(value) - { - } - - intptr_t asIntptr() - { - return reinterpret_cast<intptr_t>(m_value); - } - - void* m_value; - }; - - - // Imm32: - // - // A 32bit immediate operand to an instruction - this is wrapped in a - // class requiring explicit construction in order to prevent RegisterIDs - // (which are implemented as an enum) from accidentally being passed as - // immediate values. - struct Imm32 { - explicit Imm32(int32_t value) - : m_value(value) - { - } - -#if PLATFORM(X86) - explicit Imm32(ImmPtr ptr) - : m_value(ptr.asIntptr()) - { - } -#endif - - int32_t m_value; - }; - - // Integer arithmetic operations: - // - // Operations are typically two operand - operation(source, srcDst) - // For many operations the source may be an Imm32, the srcDst operand - // may often be a memory location (explictly described using an Address - // object). - - void addPtr(RegisterID src, RegisterID dest) + void poke(RegisterID src, int index = 0) { -#if PLATFORM(X86_64) - m_assembler.addq_rr(src, dest); -#else - add32(src, dest); -#endif + storePtr(src, Address(stackPointerRegister, (index * sizeof(void*)))); } - void addPtr(Imm32 imm, RegisterID srcDest) + void poke(Imm32 value, int index = 0) { -#if PLATFORM(X86_64) - m_assembler.addq_ir(imm.m_value, srcDest); -#else - add32(imm, srcDest); -#endif + store32(value, Address(stackPointerRegister, (index * sizeof(void*)))); } - void addPtr(ImmPtr imm, RegisterID dest) + void poke(ImmPtr imm, int index = 0) { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - m_assembler.addq_rr(scratchRegister, dest); -#else - add32(Imm32(imm), dest); -#endif + storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*)))); } - void addPtr(Imm32 imm, RegisterID src, RegisterID dest) + + // Backwards banches, these are currently all implemented using existing forwards branch mechanisms. + void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target) { - m_assembler.leal_mr(imm.m_value, src, dest); + branchPtr(cond, op1, imm).linkTo(target, this); } - void add32(RegisterID src, RegisterID dest) + void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target) { - m_assembler.addl_rr(src, dest); + branch32(cond, op1, op2).linkTo(target, this); } - void add32(Imm32 imm, Address address) + void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target) { - m_assembler.addl_im(imm.m_value, address.offset, address.base); + branch32(cond, op1, imm).linkTo(target, this); } - void add32(Imm32 imm, RegisterID dest) + void branch32(Condition cond, RegisterID left, Address right, Label target) { - m_assembler.addl_ir(imm.m_value, dest); + branch32(cond, left, right).linkTo(target, this); } - - void add32(Imm32 imm, AbsoluteAddress address) - { -#if PLATFORM(X86_64) - move(ImmPtr(address.m_ptr), scratchRegister); - add32(imm, Address(scratchRegister)); -#else - m_assembler.addl_im(imm.m_value, address.m_ptr); -#endif - } - - void add32(Address src, RegisterID dest) + + void branch16(Condition cond, BaseIndex left, RegisterID right, Label target) { - m_assembler.addl_mr(src.offset, src.base, dest); + branch16(cond, left, right).linkTo(target, this); } - void andPtr(RegisterID src, RegisterID dest) + void branchTestPtr(Condition cond, RegisterID reg, Label target) { -#if PLATFORM(X86_64) - m_assembler.andq_rr(src, dest); -#else - and32(src, dest); -#endif + branchTestPtr(cond, reg).linkTo(target, this); } - void andPtr(Imm32 imm, RegisterID srcDest) + void jump(Label target) { -#if PLATFORM(X86_64) - m_assembler.andq_ir(imm.m_value, srcDest); -#else - and32(imm, srcDest); -#endif + jump().linkTo(target, this); } - void and32(RegisterID src, RegisterID dest) - { - m_assembler.andl_rr(src, dest); - } - void and32(Imm32 imm, RegisterID dest) + // Ptr methods + // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. +#if !PLATFORM(X86_64) + void addPtr(RegisterID src, RegisterID dest) { - m_assembler.andl_ir(imm.m_value, dest); + add32(src, dest); } - void lshift32(Imm32 imm, RegisterID dest) - { - m_assembler.shll_i8r(imm.m_value, dest); - } - - void lshift32(RegisterID shift_amount, RegisterID dest) + void addPtr(Imm32 imm, RegisterID srcDest) { - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86::ecx) { - swap(shift_amount, X86::ecx); - - // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.shll_CLr(X86::ecx); - // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86::ecx) - m_assembler.shll_CLr(shift_amount); - // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.shll_CLr(dest); - - swap(shift_amount, X86::ecx); - } else - m_assembler.shll_CLr(dest); + add32(imm, srcDest); } - - // Take the value from dividend, divide it by divisor, and put the remainder in remainder. - // For now, this operation has specific register requirements, and the three register must - // be unique. It is unfortunate to expose this in the MacroAssembler interface, however - // given the complexity to fix, the fact that it is not uncommmon for processors to have - // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not - // support a hardware divide at all, it may not be - void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder) - { -#ifdef NDEBUG -#pragma unused(dividend,remainder) -#else - ASSERT((dividend == X86::eax) && (remainder == X86::edx)); - ASSERT((dividend != divisor) && (remainder != divisor)); -#endif - m_assembler.cdq(); - m_assembler.idivl_r(divisor); + void addPtr(ImmPtr imm, RegisterID dest) + { + add32(Imm32(imm), dest); } - void mul32(RegisterID src, RegisterID dest) + void addPtr(Imm32 imm, RegisterID src, RegisterID dest) { - m_assembler.imull_rr(src, dest); + add32(imm, src, dest); } - - void mul32(Imm32 imm, RegisterID src, RegisterID dest) + + void andPtr(RegisterID src, RegisterID dest) { - m_assembler.imull_i32r(src, imm.m_value, dest); + and32(src, dest); } - - void not32(RegisterID srcDest) + + void andPtr(Imm32 imm, RegisterID srcDest) { - m_assembler.notl_r(srcDest); + and32(imm, srcDest); } - + void orPtr(RegisterID src, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.orq_rr(src, dest); -#else or32(src, dest); -#endif } void orPtr(ImmPtr imm, RegisterID dest) { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - m_assembler.orq_rr(scratchRegister, dest); -#else or32(Imm32(imm), dest); -#endif } void orPtr(Imm32 imm, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.orq_ir(imm.m_value, dest); -#else or32(imm, dest); -#endif - } - - void or32(RegisterID src, RegisterID dest) - { - m_assembler.orl_rr(src, dest); - } - - void or32(Imm32 imm, RegisterID dest) - { - m_assembler.orl_ir(imm.m_value, dest); } void rshiftPtr(RegisterID shift_amount, RegisterID dest) { -#if PLATFORM(X86_64) - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86::ecx) { - swap(shift_amount, X86::ecx); - - // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.sarq_CLr(X86::ecx); - // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86::ecx) - m_assembler.sarq_CLr(shift_amount); - // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.sarq_CLr(dest); - - swap(shift_amount, X86::ecx); - } else - m_assembler.sarq_CLr(dest); -#else rshift32(shift_amount, dest); -#endif } void rshiftPtr(Imm32 imm, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.sarq_i8r(imm.m_value, dest); -#else rshift32(imm, dest); -#endif - } - - void rshift32(RegisterID shift_amount, RegisterID dest) - { - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86::ecx) { - swap(shift_amount, X86::ecx); - - // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.sarl_CLr(X86::ecx); - // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86::ecx) - m_assembler.sarl_CLr(shift_amount); - // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.sarl_CLr(dest); - - swap(shift_amount, X86::ecx); - } else - m_assembler.sarl_CLr(dest); - } - - void rshift32(Imm32 imm, RegisterID dest) - { - m_assembler.sarl_i8r(imm.m_value, dest); } void subPtr(RegisterID src, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.subq_rr(src, dest); -#else sub32(src, dest); -#endif } void subPtr(Imm32 imm, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.subq_ir(imm.m_value, dest); -#else sub32(imm, dest); -#endif } void subPtr(ImmPtr imm, RegisterID dest) { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - m_assembler.subq_rr(scratchRegister, dest); -#else sub32(Imm32(imm), dest); -#endif - } - - void sub32(RegisterID src, RegisterID dest) - { - m_assembler.subl_rr(src, dest); - } - - void sub32(Imm32 imm, RegisterID dest) - { - m_assembler.subl_ir(imm.m_value, dest); - } - - void sub32(Imm32 imm, Address address) - { - m_assembler.subl_im(imm.m_value, address.offset, address.base); - } - - void sub32(Imm32 imm, AbsoluteAddress address) - { -#if PLATFORM(X86_64) - move(ImmPtr(address.m_ptr), scratchRegister); - sub32(imm, Address(scratchRegister)); -#else - m_assembler.subl_im(imm.m_value, address.m_ptr); -#endif - } - - void sub32(Address src, RegisterID dest) - { - m_assembler.subl_mr(src.offset, src.base, dest); } void xorPtr(RegisterID src, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.xorq_rr(src, dest); -#else xor32(src, dest); -#endif } void xorPtr(Imm32 imm, RegisterID srcDest) { -#if PLATFORM(X86_64) - m_assembler.xorq_ir(imm.m_value, srcDest); -#else xor32(imm, srcDest); -#endif } - void xor32(RegisterID src, RegisterID dest) - { - m_assembler.xorl_rr(src, dest); - } - - void xor32(Imm32 imm, RegisterID srcDest) - { - m_assembler.xorl_ir(imm.m_value, srcDest); - } - - - // Memory access operations: - // - // Loads are of the form load(address, destination) and stores of the form - // store(source, address). The source for a store may be an Imm32. Address - // operand objects to loads and store will be implicitly constructed if a - // register is passed. void loadPtr(ImplicitAddress address, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.movq_mr(address.offset, address.base, dest); -#else load32(address, dest); -#endif - } - - DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) - { -#if PLATFORM(X86_64) - m_assembler.movq_mr_disp32(address.offset, address.base, dest); - return DataLabel32(this); -#else - m_assembler.movl_mr_disp32(address.offset, address.base, dest); - return DataLabel32(this); -#endif } void loadPtr(BaseIndex address, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest); -#else load32(address, dest); -#endif } void loadPtr(void* address, RegisterID dest) { -#if PLATFORM(X86_64) - if (dest == X86::eax) - m_assembler.movq_mEAX(address); - else { - move(X86::eax, dest); - m_assembler.movq_mEAX(address); - swap(X86::eax, dest); - } -#else load32(address, dest); -#endif - } - - void load32(ImplicitAddress address, RegisterID dest) - { - m_assembler.movl_mr(address.offset, address.base, dest); } - void load32(BaseIndex address, RegisterID dest) + DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) { - m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest); + return load32WithAddressOffsetPatch(address, dest); } - void load32(void* address, RegisterID dest) + void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest) { -#if PLATFORM(X86_64) - if (dest == X86::eax) - m_assembler.movl_mEAX(address); - else { - move(X86::eax, dest); - m_assembler.movl_mEAX(address); - swap(X86::eax, dest); - } -#else - m_assembler.movl_mr(address, dest); -#endif - } - - void load16(BaseIndex address, RegisterID dest) - { - m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest); + set32(cond, left, right, dest); } void storePtr(RegisterID src, ImplicitAddress address) { -#if PLATFORM(X86_64) - m_assembler.movq_rm(src, address.offset, address.base); -#else store32(src, address); -#endif - } - - DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) - { -#if PLATFORM(X86_64) - m_assembler.movq_rm_disp32(src, address.offset, address.base); - return DataLabel32(this); -#else - m_assembler.movl_rm_disp32(src, address.offset, address.base); - return DataLabel32(this); -#endif } void storePtr(RegisterID src, BaseIndex address) { -#if PLATFORM(X86_64) - m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale); -#else store32(src, address); -#endif } void storePtr(ImmPtr imm, ImplicitAddress address) { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - storePtr(scratchRegister, address); -#else - m_assembler.movl_i32m(imm.asIntptr(), address.offset, address.base); -#endif + store32(Imm32(imm), address); } -#if !PLATFORM(X86_64) void storePtr(ImmPtr imm, void* address) { store32(Imm32(imm), address); } -#endif - - DataLabelPtr storePtrWithPatch(Address address) - { -#if PLATFORM(X86_64) - m_assembler.movq_i64r(0, scratchRegister); - DataLabelPtr label(this); - storePtr(scratchRegister, address); - return label; -#else - m_assembler.movl_i32m(0, address.offset, address.base); - return DataLabelPtr(this); -#endif - } - - void store32(RegisterID src, ImplicitAddress address) - { - m_assembler.movl_rm(src, address.offset, address.base); - } - - void store32(RegisterID src, BaseIndex address) - { - m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale); - } - - void store32(Imm32 imm, ImplicitAddress address) - { - m_assembler.movl_i32m(imm.m_value, address.offset, address.base); - } - - void store32(Imm32 imm, void* address) - { -#if PLATFORM(X86_64) - move(X86::eax, scratchRegister); - move(imm, X86::eax); - m_assembler.movl_EAXm(address); - move(scratchRegister, X86::eax); -#else - m_assembler.movl_i32m(imm.m_value, address); -#endif - } - - - // Stack manipulation operations: - // - // The ABI is assumed to provide a stack abstraction to memory, - // containing machine word sized units of data. Push and pop - // operations add and remove a single register sized unit of data - // to or from the stack. Peek and poke operations read or write - // values on the stack, without moving the current stack position. - - void pop(RegisterID dest) - { - m_assembler.pop_r(dest); - } - - void push(RegisterID src) - { - m_assembler.push_r(src); - } - - void push(Address address) - { - m_assembler.push_m(address.offset, address.base); - } - - void push(Imm32 imm) - { - m_assembler.push_i32(imm.m_value); - } - - void pop() - { - addPtr(Imm32(sizeof(void*)), X86::esp); - } - - void peek(RegisterID dest, int index = 0) - { - loadPtr(Address(X86::esp, (index * sizeof(void *))), dest); - } - - void poke(RegisterID src, int index = 0) - { - storePtr(src, Address(X86::esp, (index * sizeof(void *)))); - } - - void poke(Imm32 value, int index = 0) - { - store32(value, Address(X86::esp, (index * sizeof(void *)))); - } - - void poke(ImmPtr imm, int index = 0) - { - storePtr(imm, Address(X86::esp, (index * sizeof(void *)))); - } - - // Register move operations: - // - // Move values in registers. - - void move(Imm32 imm, RegisterID dest) - { - // Note: on 64-bit the Imm32 value is zero extended into the register, it - // may be useful to have a separate version that sign extends the value? - if (!imm.m_value) - m_assembler.xorl_rr(dest, dest); - else - m_assembler.movl_i32r(imm.m_value, dest); - } - - void move(RegisterID src, RegisterID dest) - { - // Note: on 64-bit this is is a full register move; perhaps it would be - // useful to have separate move32 & movePtr, with move32 zero extending? -#if PLATFORM(X86_64) - m_assembler.movq_rr(src, dest); -#else - m_assembler.movl_rr(src, dest); -#endif - } - - void move(ImmPtr imm, RegisterID dest) - { -#if PLATFORM(X86_64) - if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr())) - m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest); - else - m_assembler.movq_i64r(imm.asIntptr(), dest); -#else - m_assembler.movl_i32r(imm.asIntptr(), dest); -#endif - } - - void swap(RegisterID reg1, RegisterID reg2) - { -#if PLATFORM(X86_64) - m_assembler.xchgq_rr(reg1, reg2); -#else - m_assembler.xchgl_rr(reg1, reg2); -#endif - } - - void signExtend32ToPtr(RegisterID src, RegisterID dest) - { -#if PLATFORM(X86_64) - m_assembler.movsxd_rr(src, dest); -#else - if (src != dest) - move(src, dest); -#endif - } - - void zeroExtend32ToPtr(RegisterID src, RegisterID dest) - { -#if PLATFORM(X86_64) - m_assembler.movl_rr(src, dest); -#else - if (src != dest) - move(src, dest); -#endif - } - - - // Forwards / external control flow operations: - // - // This set of jump and conditional branch operations return a Jump - // object which may linked at a later point, allow forwards jump, - // or jumps that will require external linkage (after the code has been - // relocated). - // - // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge - // respecitvely, for unsigned comparisons the names b, a, be, and ae are - // used (representing the names 'below' and 'above'). - // - // Operands to the comparision are provided in the expected order, e.g. - // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when - // treated as a signed 32bit value, is less than or equal to 5. - // - // jz and jnz test whether the first operand is equal to zero, and take - // an optional second operand of a mask under which to perform the test. - -private: - void compareImm32ForBranch(RegisterID left, int32_t right) - { - m_assembler.cmpl_ir(right, left); - } - - void compareImm32ForBranchEquality(RegisterID reg, int32_t imm) - { - if (!imm) - m_assembler.testl_rr(reg, reg); - else - m_assembler.cmpl_ir(imm, reg); - } - - void compareImm32ForBranchEquality(Address address, int32_t imm) - { - m_assembler.cmpl_im(imm, address.offset, address.base); - } - - void testImm32(RegisterID reg, Imm32 mask) - { - // if we are only interested in the low seven bits, this can be tested with a testb - if (mask.m_value == -1) - m_assembler.testl_rr(reg, reg); - else if ((mask.m_value & ~0x7f) == 0) - m_assembler.testb_i8r(mask.m_value, reg); - else - m_assembler.testl_i32r(mask.m_value, reg); - } - - void testImm32(Address address, Imm32 mask) - { - if (mask.m_value == -1) - m_assembler.cmpl_im(0, address.offset, address.base); - else - m_assembler.testl_i32m(mask.m_value, address.offset, address.base); - } - - void testImm32(BaseIndex address, Imm32 mask) - { - if (mask.m_value == -1) - m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale); - else - m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); - } - -#if PLATFORM(X86_64) - void compareImm64ForBranch(RegisterID left, int32_t right) - { - m_assembler.cmpq_ir(right, left); - } - - void compareImm64ForBranchEquality(RegisterID reg, int32_t imm) - { - if (!imm) - m_assembler.testq_rr(reg, reg); - else - m_assembler.cmpq_ir(imm, reg); - } - - void testImm64(RegisterID reg, Imm32 mask) - { - // if we are only interested in the low seven bits, this can be tested with a testb - if (mask.m_value == -1) - m_assembler.testq_rr(reg, reg); - else if ((mask.m_value & ~0x7f) == 0) - m_assembler.testb_i8r(mask.m_value, reg); - else - m_assembler.testq_i32r(mask.m_value, reg); - } - - void testImm64(Address address, Imm32 mask) - { - if (mask.m_value == -1) - m_assembler.cmpq_im(0, address.offset, address.base); - else - m_assembler.testq_i32m(mask.m_value, address.offset, address.base); - } - - void testImm64(BaseIndex address, Imm32 mask) - { - if (mask.m_value == -1) - m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale); - else - m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); - } -#endif - -public: - Jump ja32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.ja()); - } - - Jump jaePtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jae()); -#else - return jae32(left, right); -#endif - } - - Jump jaePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jae()); - } else { - move(ptr, scratchRegister); - return jaePtr(reg, scratchRegister); - } -#else - return jae32(reg, Imm32(ptr)); -#endif - } - - Jump jae32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jae()); - } - - Jump jae32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jae()); - } - - Jump jae32(RegisterID left, Address right) - { - m_assembler.cmpl_mr(right.offset, right.base, left); - return Jump(m_assembler.jae()); - } - - Jump jae32(Address left, RegisterID right) - { - m_assembler.cmpl_rm(right, left.offset, left.base); - return Jump(m_assembler.jae()); - } - - Jump jbPtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jb()); -#else - return jb32(left, right); -#endif - } - - Jump jbPtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jb()); - } else { - move(ptr, scratchRegister); - return jbPtr(reg, scratchRegister); - } -#else - return jb32(reg, Imm32(ptr)); -#endif - } - - Jump jb32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jb()); - } - - Jump jb32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jb()); - } - - Jump jb32(RegisterID left, Address right) - { - m_assembler.cmpl_mr(right.offset, right.base, left); - return Jump(m_assembler.jb()); - } - - Jump jePtr(RegisterID op1, RegisterID op2) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(op1, op2); - return Jump(m_assembler.je()); -#else - return je32(op1, op2); -#endif - } - - Jump jePtr(RegisterID reg, Address address) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rm(reg, address.offset, address.base); -#else - m_assembler.cmpl_rm(reg, address.offset, address.base); -#endif - return Jump(m_assembler.je()); - } - - Jump jePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranchEquality(reg, imm); - return Jump(m_assembler.je()); - } else { - move(ptr, scratchRegister); - return jePtr(scratchRegister, reg); - } -#else - return je32(reg, Imm32(ptr)); -#endif - } - - Jump jePtr(Address address, ImmPtr imm) - { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - return jePtr(scratchRegister, address); -#else - return je32(address, Imm32(imm)); -#endif - } - - Jump je32(RegisterID op1, RegisterID op2) - { - m_assembler.cmpl_rr(op1, op2); - return Jump(m_assembler.je()); - } - - Jump je32(Address op1, RegisterID op2) - { - m_assembler.cmpl_mr(op1.offset, op1.base, op2); - return Jump(m_assembler.je()); - } - - Jump je32(RegisterID reg, Imm32 imm) - { - compareImm32ForBranchEquality(reg, imm.m_value); - return Jump(m_assembler.je()); - } - - Jump je32(Address address, Imm32 imm) - { - compareImm32ForBranchEquality(address, imm.m_value); - return Jump(m_assembler.je()); - } - - Jump je16(RegisterID op1, BaseIndex op2) - { - m_assembler.cmpw_rm(op1, op2.offset, op2.base, op2.index, op2.scale); - return Jump(m_assembler.je()); - } - - Jump jg32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jg()); - } - - Jump jg32(RegisterID reg, Address address) - { - m_assembler.cmpl_mr(address.offset, address.base, reg); - return Jump(m_assembler.jg()); - } - - Jump jgePtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jge()); -#else - return jge32(left, right); -#endif - } - - Jump jgePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jge()); - } else { - move(ptr, scratchRegister); - return jgePtr(reg, scratchRegister); - } -#else - return jge32(reg, Imm32(ptr)); -#endif - } - - Jump jge32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jge()); - } - - Jump jge32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jge()); - } - - Jump jlPtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jl()); -#else - return jl32(left, right); -#endif - } - - Jump jlPtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jl()); - } else { - move(ptr, scratchRegister); - return jlPtr(reg, scratchRegister); - } -#else - return jl32(reg, Imm32(ptr)); -#endif - } - - Jump jl32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jl()); - } - - Jump jl32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jl()); - } - - Jump jlePtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jle()); -#else - return jle32(left, right); -#endif - } - - Jump jlePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jle()); - } else { - move(ptr, scratchRegister); - return jlePtr(reg, scratchRegister); - } -#else - return jle32(reg, Imm32(ptr)); -#endif - } - - Jump jle32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jle()); - } - - Jump jle32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jle()); - } - - Jump jnePtr(RegisterID op1, RegisterID op2) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(op1, op2); - return Jump(m_assembler.jne()); -#else - return jne32(op1, op2); -#endif - } - Jump jnePtr(RegisterID reg, Address address) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rm(reg, address.offset, address.base); -#else - m_assembler.cmpl_rm(reg, address.offset, address.base); -#endif - return Jump(m_assembler.jne()); - } - - Jump jnePtr(RegisterID reg, AbsoluteAddress address) - { -#if PLATFORM(X86_64) - move(ImmPtr(address.m_ptr), scratchRegister); - return jnePtr(reg, Address(scratchRegister)); -#else - m_assembler.cmpl_rm(reg, address.m_ptr); - return Jump(m_assembler.jne()); -#endif - } - - Jump jnePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranchEquality(reg, imm); - return Jump(m_assembler.jne()); - } else { - move(ptr, scratchRegister); - return jnePtr(scratchRegister, reg); - } -#else - return jne32(reg, Imm32(ptr)); -#endif - } - - Jump jnePtr(Address address, ImmPtr imm) - { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - return jnePtr(scratchRegister, address); -#else - return jne32(address, Imm32(imm)); -#endif - } - -#if !PLATFORM(X86_64) - Jump jnePtr(AbsoluteAddress address, ImmPtr imm) + DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) { - m_assembler.cmpl_im(imm.asIntptr(), address.m_ptr); - return Jump(m_assembler.jne()); + return store32WithAddressOffsetPatch(src, address); } -#endif - Jump jnePtrWithPatch(RegisterID reg, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0)) - { -#if PLATFORM(X86_64) - m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister); - dataLabel = DataLabelPtr(this); - return jnePtr(scratchRegister, reg); -#else - m_assembler.cmpl_ir_force32(initialValue.asIntptr(), reg); - dataLabel = DataLabelPtr(this); - return Jump(m_assembler.jne()); -#endif - } - Jump jnePtrWithPatch(Address address, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0)) + Jump branchPtr(Condition cond, RegisterID left, RegisterID right) { -#if PLATFORM(X86_64) - m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister); - dataLabel = DataLabelPtr(this); - return jnePtr(scratchRegister, address); -#else - m_assembler.cmpl_im_force32(initialValue.asIntptr(), address.offset, address.base); - dataLabel = DataLabelPtr(this); - return Jump(m_assembler.jne()); -#endif + return branch32(cond, left, right); } - Jump jne32(RegisterID op1, RegisterID op2) + Jump branchPtr(Condition cond, RegisterID left, ImmPtr right) { - m_assembler.cmpl_rr(op1, op2); - return Jump(m_assembler.jne()); + return branch32(cond, left, Imm32(right)); } - Jump jne32(RegisterID reg, Imm32 imm) + Jump branchPtr(Condition cond, RegisterID left, Address right) { - compareImm32ForBranchEquality(reg, imm.m_value); - return Jump(m_assembler.jne()); + return branch32(cond, left, right); } - Jump jne32(Address address, Imm32 imm) - { - compareImm32ForBranchEquality(address, imm.m_value); - return Jump(m_assembler.jne()); - } - - Jump jne32(Address address, RegisterID reg) + Jump branchPtr(Condition cond, Address left, RegisterID right) { - m_assembler.cmpl_rm(reg, address.offset, address.base); - return Jump(m_assembler.jne()); - } - - Jump jnzPtr(RegisterID reg, RegisterID mask) - { -#if PLATFORM(X86_64) - m_assembler.testq_rr(reg, mask); - return Jump(m_assembler.jne()); -#else - return jnz32(reg, mask); -#endif + return branch32(cond, left, right); } - Jump jnzPtr(RegisterID reg, Imm32 mask = Imm32(-1)) + Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right) { -#if PLATFORM(X86_64) - testImm64(reg, mask); - return Jump(m_assembler.jne()); -#else - return jnz32(reg, mask); -#endif + return branch32(cond, left, right); } - Jump jnzPtr(RegisterID reg, ImmPtr mask) + Jump branchPtr(Condition cond, Address left, ImmPtr right) { -#if PLATFORM(X86_64) - move(mask, scratchRegister); - m_assembler.testq_rr(scratchRegister, reg); - return Jump(m_assembler.jne()); -#else - return jnz32(reg, Imm32(mask)); -#endif + return branch32(cond, left, Imm32(right)); } - Jump jnzPtr(Address address, Imm32 mask = Imm32(-1)) + Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right) { -#if PLATFORM(X86_64) - testImm64(address, mask); - return Jump(m_assembler.jne()); -#else - return jnz32(address, mask); -#endif + return branch32(cond, left, Imm32(right)); } - Jump jnz32(RegisterID reg, RegisterID mask) + Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask) { - m_assembler.testl_rr(reg, mask); - return Jump(m_assembler.jne()); + return branchTest32(cond, reg, mask); } - Jump jnz32(RegisterID reg, Imm32 mask = Imm32(-1)) + Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) { - testImm32(reg, mask); - return Jump(m_assembler.jne()); + return branchTest32(cond, reg, mask); } - Jump jnz32(Address address, Imm32 mask = Imm32(-1)) + Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1)) { - testImm32(address, mask); - return Jump(m_assembler.jne()); + return branchTest32(cond, address, mask); } - Jump jzPtr(RegisterID reg, RegisterID mask) + Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) { -#if PLATFORM(X86_64) - m_assembler.testq_rr(reg, mask); - return Jump(m_assembler.je()); -#else - return jz32(reg, mask); -#endif + return branchTest32(cond, address, mask); } - Jump jzPtr(RegisterID reg, Imm32 mask = Imm32(-1)) - { -#if PLATFORM(X86_64) - testImm64(reg, mask); - return Jump(m_assembler.je()); -#else - return jz32(reg, mask); -#endif - } - Jump jzPtr(RegisterID reg, ImmPtr mask) + Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest) { -#if PLATFORM(X86_64) - move(mask, scratchRegister); - m_assembler.testq_rr(scratchRegister, reg); - return Jump(m_assembler.je()); -#else - return jz32(reg, Imm32(mask)); -#endif + return branchAdd32(cond, src, dest); } - Jump jzPtr(Address address, Imm32 mask = Imm32(-1)) + Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest) { -#if PLATFORM(X86_64) - testImm64(address, mask); - return Jump(m_assembler.je()); -#else - return jz32(address, mask); -#endif + return branchSub32(cond, imm, dest); } - - Jump jzPtr(BaseIndex address, Imm32 mask = Imm32(-1)) - { -#if PLATFORM(X86_64) - testImm64(address, mask); - return Jump(m_assembler.je()); -#else - return jz32(address, mask); #endif - } - - Jump jz32(RegisterID reg, RegisterID mask) - { - m_assembler.testl_rr(reg, mask); - return Jump(m_assembler.je()); - } - - Jump jz32(RegisterID reg, Imm32 mask = Imm32(-1)) - { - testImm32(reg, mask); - return Jump(m_assembler.je()); - } - - Jump jz32(Address address, Imm32 mask = Imm32(-1)) - { - testImm32(address, mask); - return Jump(m_assembler.je()); - } - - Jump jz32(BaseIndex address, Imm32 mask = Imm32(-1)) - { - testImm32(address, mask); - return Jump(m_assembler.je()); - } - - Jump jump() - { - return Jump(m_assembler.jmp()); - } - - - // Backwards, local control flow operations: - // - // These operations provide a shorter notation for local - // backwards branches, which may be both more convenient - // for the user, and for the programmer, and for the - // assembler (allowing shorter values to be used in - // relative offsets). - // - // The code sequence: - // - // Label topOfLoop(this); - // // ... - // jne32(reg1, reg2, topOfLoop); - // - // Is equivalent to the longer, potentially less efficient form: - // - // Label topOfLoop(this); - // // ... - // jne32(reg1, reg2).linkTo(topOfLoop); - - void jae32(RegisterID left, Address right, Label target) - { - jae32(left, right).linkTo(target, this); - } - - void je32(RegisterID op1, Imm32 imm, Label target) - { - je32(op1, imm).linkTo(target, this); - } - - void je16(RegisterID op1, BaseIndex op2, Label target) - { - je16(op1, op2).linkTo(target, this); - } - - void jl32(RegisterID left, Imm32 right, Label target) - { - jl32(left, right).linkTo(target, this); - } - - void jle32(RegisterID left, RegisterID right, Label target) - { - jle32(left, right).linkTo(target, this); - } - - void jnePtr(RegisterID op1, ImmPtr imm, Label target) - { - jnePtr(op1, imm).linkTo(target, this); - } - - void jne32(RegisterID op1, RegisterID op2, Label target) - { - jne32(op1, op2).linkTo(target, this); - } - - void jne32(RegisterID op1, Imm32 imm, Label target) - { - jne32(op1, imm).linkTo(target, this); - } - - void jzPtr(RegisterID reg, Label target) - { - jzPtr(reg).linkTo(target, this); - } - - void jump(Label target) - { - m_assembler.link(m_assembler.jmp(), target.m_label); - } - - void jump(RegisterID target) - { - m_assembler.jmp_r(target); - } - - // Address is a memory location containing the address to jump to - void jump(Address address) - { - m_assembler.jmp_m(address.offset, address.base); - } - - - // Arithmetic control flow operations: - // - // This set of conditional branch operations branch based - // on the result of an arithmetic operation. The operation - // is performed as normal, storing the result. - // - // * jz operations branch if the result is zero. - // * jo operations branch if the (signed) arithmetic - // operation caused an overflow to occur. - - Jump jnzSubPtr(Imm32 imm, RegisterID dest) - { - subPtr(imm, dest); - return Jump(m_assembler.jne()); - } - - Jump jnzSub32(Imm32 imm, RegisterID dest) - { - sub32(imm, dest); - return Jump(m_assembler.jne()); - } - - Jump joAddPtr(RegisterID src, RegisterID dest) - { - addPtr(src, dest); - return Jump(m_assembler.jo()); - } - - Jump joAdd32(RegisterID src, RegisterID dest) - { - add32(src, dest); - return Jump(m_assembler.jo()); - } - - Jump joAdd32(Imm32 imm, RegisterID dest) - { - add32(imm, dest); - return Jump(m_assembler.jo()); - } - - Jump joMul32(RegisterID src, RegisterID dest) - { - mul32(src, dest); - return Jump(m_assembler.jo()); - } - - Jump joMul32(Imm32 imm, RegisterID src, RegisterID dest) - { - mul32(imm, src, dest); - return Jump(m_assembler.jo()); - } - - Jump joSub32(RegisterID src, RegisterID dest) - { - sub32(src, dest); - return Jump(m_assembler.jo()); - } - - Jump joSub32(Imm32 imm, RegisterID dest) - { - sub32(imm, dest); - return Jump(m_assembler.jo()); - } - - Jump jzSubPtr(Imm32 imm, RegisterID dest) - { - subPtr(imm, dest); - return Jump(m_assembler.je()); - } - - Jump jzSub32(Imm32 imm, RegisterID dest) - { - sub32(imm, dest); - return Jump(m_assembler.je()); - } - - // Miscellaneous operations: - - void breakpoint() - { - m_assembler.int3(); - } - - Jump call() - { - return Jump(m_assembler.call()); - } - - // FIXME: why does this return a Jump object? - it can't be linked. - // This may be to get a reference to the return address of the call. - // - // This should probably be handled by a separate label type to a regular - // jump. Todo: add a CallLabel type, for the regular call - can be linked - // like a jump (possibly a subclass of jump?, or possibly casts to a Jump). - // Also add a CallReturnLabel type for this to return (just a more JmpDsty - // form of label, can get the void* after the code has been linked, but can't - // try to link it like a Jump object), and let the CallLabel be cast into a - // CallReturnLabel. - Jump call(RegisterID target) - { - return Jump(m_assembler.call(target)); - } - - Label label() - { - return Label(this); - } - - Label align() - { - m_assembler.align(16); - return Label(this); - } - - ptrdiff_t differenceBetween(Label from, Jump to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp); - } - - ptrdiff_t differenceBetween(Label from, Label to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label); - } - - ptrdiff_t differenceBetween(Label from, DataLabelPtr to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label); - } - - ptrdiff_t differenceBetween(Label from, DataLabel32 to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label); - } - - ptrdiff_t differenceBetween(DataLabelPtr from, Jump to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp); - } - - void ret() - { - m_assembler.ret(); - } - - void sete32(RegisterID src, RegisterID srcDest) - { - m_assembler.cmpl_rr(srcDest, src); - m_assembler.sete_r(srcDest); - m_assembler.movzbl_rr(srcDest, srcDest); - } - - void sete32(Imm32 imm, RegisterID srcDest) - { - compareImm32ForBranchEquality(srcDest, imm.m_value); - m_assembler.sete_r(srcDest); - m_assembler.movzbl_rr(srcDest, srcDest); - } - - void setne32(RegisterID src, RegisterID srcDest) - { - m_assembler.cmpl_rr(srcDest, src); - m_assembler.setne_r(srcDest); - m_assembler.movzbl_rr(srcDest, srcDest); - } - - void setne32(Imm32 imm, RegisterID srcDest) - { - compareImm32ForBranchEquality(srcDest, imm.m_value); - m_assembler.setne_r(srcDest); - m_assembler.movzbl_rr(srcDest, srcDest); - } - - // FIXME: - // The mask should be optional... paerhaps the argument order should be - // dest-src, operations always have a dest? ... possibly not true, considering - // asm ops like test, or pseudo ops like pop(). - void setnz32(Address address, Imm32 mask, RegisterID dest) - { - testImm32(address, mask); - m_assembler.setnz_r(dest); - m_assembler.movzbl_rr(dest, dest); - } - - void setz32(Address address, Imm32 mask, RegisterID dest) - { - testImm32(address, mask); - m_assembler.setz_r(dest); - m_assembler.movzbl_rr(dest, dest); - } }; } // namespace JSC diff --git a/JavaScriptCore/assembler/MacroAssemblerX86.h b/JavaScriptCore/assembler/MacroAssemblerX86.h new file mode 100644 index 0000000..b85b8b2 --- /dev/null +++ b/JavaScriptCore/assembler/MacroAssemblerX86.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MacroAssemblerX86_h +#define MacroAssemblerX86_h + +#include <wtf/Platform.h> + +#if ENABLE(ASSEMBLER) && PLATFORM(X86) + +#include "MacroAssemblerX86Common.h" + +namespace JSC { + +class MacroAssemblerX86 : public MacroAssemblerX86Common { +public: + static const Scale ScalePtr = TimesFour; + + using MacroAssemblerX86Common::add32; + using MacroAssemblerX86Common::sub32; + using MacroAssemblerX86Common::load32; + using MacroAssemblerX86Common::store32; + using MacroAssemblerX86Common::branch32; + using MacroAssemblerX86Common::call; + + void add32(Imm32 imm, RegisterID src, RegisterID dest) + { + m_assembler.leal_mr(imm.m_value, src, dest); + } + + void add32(Imm32 imm, AbsoluteAddress address) + { + m_assembler.addl_im(imm.m_value, address.m_ptr); + } + + void sub32(Imm32 imm, AbsoluteAddress address) + { + m_assembler.subl_im(imm.m_value, address.m_ptr); + } + + void load32(void* address, RegisterID dest) + { + m_assembler.movl_mr(address, dest); + } + + void store32(Imm32 imm, void* address) + { + m_assembler.movl_i32m(imm.m_value, address); + } + + Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right) + { + m_assembler.cmpl_rm(right, left.m_ptr); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right) + { + m_assembler.cmpl_im(right.m_value, left.m_ptr); + return Jump(m_assembler.jCC(cond)); + } + + Call call() + { + return Call(m_assembler.call(), Call::Linkable); + } + + Call tailRecursiveCall() + { + return Call::fromTailJump(jump()); + } + + Call makeTailRecursiveCall(Jump oldJump) + { + return Call::fromTailJump(oldJump); + } + + + Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + { + m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left); + dataLabel = DataLabelPtr(this); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + { + m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base); + dataLabel = DataLabelPtr(this); + return Jump(m_assembler.jCC(cond)); + } + + DataLabelPtr storePtrWithPatch(Address address) + { + m_assembler.movl_i32m(0, address.offset, address.base); + return DataLabelPtr(this); + } +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // MacroAssemblerX86_h diff --git a/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/JavaScriptCore/assembler/MacroAssemblerX86Common.h new file mode 100644 index 0000000..5fcd25d --- /dev/null +++ b/JavaScriptCore/assembler/MacroAssemblerX86Common.h @@ -0,0 +1,583 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MacroAssemblerX86Common_h +#define MacroAssemblerX86Common_h + +#include <wtf/Platform.h> + +#if ENABLE(ASSEMBLER) + +#include "X86Assembler.h" +#include "AbstractMacroAssembler.h" + +namespace JSC { + +class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> { +public: + + typedef X86Assembler::Condition Condition; + static const Condition Equal = X86Assembler::ConditionE; + static const Condition NotEqual = X86Assembler::ConditionNE; + static const Condition Above = X86Assembler::ConditionA; + static const Condition AboveOrEqual = X86Assembler::ConditionAE; + static const Condition Below = X86Assembler::ConditionB; + static const Condition BelowOrEqual = X86Assembler::ConditionBE; + static const Condition GreaterThan = X86Assembler::ConditionG; + static const Condition GreaterThanOrEqual = X86Assembler::ConditionGE; + static const Condition LessThan = X86Assembler::ConditionL; + static const Condition LessThanOrEqual = X86Assembler::ConditionLE; + static const Condition Overflow = X86Assembler::ConditionO; + static const Condition Zero = X86Assembler::ConditionE; + static const Condition NonZero = X86Assembler::ConditionNE; + + static const RegisterID stackPointerRegister = X86::esp; + + // Integer arithmetic operations: + // + // Operations are typically two operand - operation(source, srcDst) + // For many operations the source may be an Imm32, the srcDst operand + // may often be a memory location (explictly described using an Address + // object). + + void add32(RegisterID src, RegisterID dest) + { + m_assembler.addl_rr(src, dest); + } + + void add32(Imm32 imm, Address address) + { + m_assembler.addl_im(imm.m_value, address.offset, address.base); + } + + void add32(Imm32 imm, RegisterID dest) + { + m_assembler.addl_ir(imm.m_value, dest); + } + + void add32(Address src, RegisterID dest) + { + m_assembler.addl_mr(src.offset, src.base, dest); + } + + void and32(RegisterID src, RegisterID dest) + { + m_assembler.andl_rr(src, dest); + } + + void and32(Imm32 imm, RegisterID dest) + { + m_assembler.andl_ir(imm.m_value, dest); + } + + void lshift32(Imm32 imm, RegisterID dest) + { + m_assembler.shll_i8r(imm.m_value, dest); + } + + void lshift32(RegisterID shift_amount, RegisterID dest) + { + // On x86 we can only shift by ecx; if asked to shift by another register we'll + // need rejig the shift amount into ecx first, and restore the registers afterwards. + if (shift_amount != X86::ecx) { + swap(shift_amount, X86::ecx); + + // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" + if (dest == shift_amount) + m_assembler.shll_CLr(X86::ecx); + // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" + else if (dest == X86::ecx) + m_assembler.shll_CLr(shift_amount); + // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" + else + m_assembler.shll_CLr(dest); + + swap(shift_amount, X86::ecx); + } else + m_assembler.shll_CLr(dest); + } + + void mul32(RegisterID src, RegisterID dest) + { + m_assembler.imull_rr(src, dest); + } + + void mul32(Imm32 imm, RegisterID src, RegisterID dest) + { + m_assembler.imull_i32r(src, imm.m_value, dest); + } + + void not32(RegisterID srcDest) + { + m_assembler.notl_r(srcDest); + } + + void or32(RegisterID src, RegisterID dest) + { + m_assembler.orl_rr(src, dest); + } + + void or32(Imm32 imm, RegisterID dest) + { + m_assembler.orl_ir(imm.m_value, dest); + } + + void rshift32(RegisterID shift_amount, RegisterID dest) + { + // On x86 we can only shift by ecx; if asked to shift by another register we'll + // need rejig the shift amount into ecx first, and restore the registers afterwards. + if (shift_amount != X86::ecx) { + swap(shift_amount, X86::ecx); + + // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" + if (dest == shift_amount) + m_assembler.sarl_CLr(X86::ecx); + // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" + else if (dest == X86::ecx) + m_assembler.sarl_CLr(shift_amount); + // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" + else + m_assembler.sarl_CLr(dest); + + swap(shift_amount, X86::ecx); + } else + m_assembler.sarl_CLr(dest); + } + + void rshift32(Imm32 imm, RegisterID dest) + { + m_assembler.sarl_i8r(imm.m_value, dest); + } + + void sub32(RegisterID src, RegisterID dest) + { + m_assembler.subl_rr(src, dest); + } + + void sub32(Imm32 imm, RegisterID dest) + { + m_assembler.subl_ir(imm.m_value, dest); + } + + void sub32(Imm32 imm, Address address) + { + m_assembler.subl_im(imm.m_value, address.offset, address.base); + } + + void sub32(Address src, RegisterID dest) + { + m_assembler.subl_mr(src.offset, src.base, dest); + } + + void xor32(RegisterID src, RegisterID dest) + { + m_assembler.xorl_rr(src, dest); + } + + void xor32(Imm32 imm, RegisterID srcDest) + { + m_assembler.xorl_ir(imm.m_value, srcDest); + } + + + // Memory access operations: + // + // Loads are of the form load(address, destination) and stores of the form + // store(source, address). The source for a store may be an Imm32. Address + // operand objects to loads and store will be implicitly constructed if a + // register is passed. + + void load32(ImplicitAddress address, RegisterID dest) + { + m_assembler.movl_mr(address.offset, address.base, dest); + } + + void load32(BaseIndex address, RegisterID dest) + { + m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest); + } + + DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) + { + m_assembler.movl_mr_disp32(address.offset, address.base, dest); + return DataLabel32(this); + } + + void load16(BaseIndex address, RegisterID dest) + { + m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest); + } + + DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) + { + m_assembler.movl_rm_disp32(src, address.offset, address.base); + return DataLabel32(this); + } + + void store32(RegisterID src, ImplicitAddress address) + { + m_assembler.movl_rm(src, address.offset, address.base); + } + + void store32(RegisterID src, BaseIndex address) + { + m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale); + } + + void store32(Imm32 imm, ImplicitAddress address) + { + m_assembler.movl_i32m(imm.m_value, address.offset, address.base); + } + + + // Stack manipulation operations: + // + // The ABI is assumed to provide a stack abstraction to memory, + // containing machine word sized units of data. Push and pop + // operations add and remove a single register sized unit of data + // to or from the stack. Peek and poke operations read or write + // values on the stack, without moving the current stack position. + + void pop(RegisterID dest) + { + m_assembler.pop_r(dest); + } + + void push(RegisterID src) + { + m_assembler.push_r(src); + } + + void push(Address address) + { + m_assembler.push_m(address.offset, address.base); + } + + void push(Imm32 imm) + { + m_assembler.push_i32(imm.m_value); + } + + // Register move operations: + // + // Move values in registers. + + void move(Imm32 imm, RegisterID dest) + { + // Note: on 64-bit the Imm32 value is zero extended into the register, it + // may be useful to have a separate version that sign extends the value? + if (!imm.m_value) + m_assembler.xorl_rr(dest, dest); + else + m_assembler.movl_i32r(imm.m_value, dest); + } + +#if PLATFORM(X86_64) + void move(RegisterID src, RegisterID dest) + { + // Note: on 64-bit this is is a full register move; perhaps it would be + // useful to have separate move32 & movePtr, with move32 zero extending? + m_assembler.movq_rr(src, dest); + } + + void move(ImmPtr imm, RegisterID dest) + { + if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr())) + m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest); + else + m_assembler.movq_i64r(imm.asIntptr(), dest); + } + + void swap(RegisterID reg1, RegisterID reg2) + { + m_assembler.xchgq_rr(reg1, reg2); + } + + void signExtend32ToPtr(RegisterID src, RegisterID dest) + { + m_assembler.movsxd_rr(src, dest); + } + + void zeroExtend32ToPtr(RegisterID src, RegisterID dest) + { + m_assembler.movl_rr(src, dest); + } +#else + void move(RegisterID src, RegisterID dest) + { + m_assembler.movl_rr(src, dest); + } + + void move(ImmPtr imm, RegisterID dest) + { + m_assembler.movl_i32r(imm.asIntptr(), dest); + } + + void swap(RegisterID reg1, RegisterID reg2) + { + m_assembler.xchgl_rr(reg1, reg2); + } + + void signExtend32ToPtr(RegisterID src, RegisterID dest) + { + if (src != dest) + move(src, dest); + } + + void zeroExtend32ToPtr(RegisterID src, RegisterID dest) + { + if (src != dest) + move(src, dest); + } +#endif + + + // Forwards / external control flow operations: + // + // This set of jump and conditional branch operations return a Jump + // object which may linked at a later point, allow forwards jump, + // or jumps that will require external linkage (after the code has been + // relocated). + // + // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge + // respecitvely, for unsigned comparisons the names b, a, be, and ae are + // used (representing the names 'below' and 'above'). + // + // Operands to the comparision are provided in the expected order, e.g. + // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when + // treated as a signed 32bit value, is less than or equal to 5. + // + // jz and jnz test whether the first operand is equal to zero, and take + // an optional second operand of a mask under which to perform the test. + +public: + Jump branch32(Condition cond, RegisterID left, RegisterID right) + { + m_assembler.cmpl_rr(right, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, RegisterID left, Imm32 right) + { + if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) + m_assembler.testl_rr(left, left); + else + m_assembler.cmpl_ir(right.m_value, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, RegisterID left, Address right) + { + m_assembler.cmpl_mr(right.offset, right.base, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, Address left, RegisterID right) + { + m_assembler.cmpl_rm(right, left.offset, left.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, Address left, Imm32 right) + { + m_assembler.cmpl_im(right.m_value, left.offset, left.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch16(Condition cond, BaseIndex left, RegisterID right) + { + m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask) + { + ASSERT((cond == Zero) || (cond == NonZero)); + m_assembler.testl_rr(reg, mask); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) + { + ASSERT((cond == Zero) || (cond == NonZero)); + // if we are only interested in the low seven bits, this can be tested with a testb + if (mask.m_value == -1) + m_assembler.testl_rr(reg, reg); + else if ((mask.m_value & ~0x7f) == 0) + m_assembler.testb_i8r(mask.m_value, reg); + else + m_assembler.testl_i32r(mask.m_value, reg); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1)) + { + ASSERT((cond == Zero) || (cond == NonZero)); + if (mask.m_value == -1) + m_assembler.cmpl_im(0, address.offset, address.base); + else + m_assembler.testl_i32m(mask.m_value, address.offset, address.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) + { + ASSERT((cond == Zero) || (cond == NonZero)); + if (mask.m_value == -1) + m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale); + else + m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); + return Jump(m_assembler.jCC(cond)); + } + + Jump jump() + { + return Jump(m_assembler.jmp()); + } + + void jump(RegisterID target) + { + m_assembler.jmp_r(target); + } + + // Address is a memory location containing the address to jump to + void jump(Address address) + { + m_assembler.jmp_m(address.offset, address.base); + } + + + // Arithmetic control flow operations: + // + // This set of conditional branch operations branch based + // on the result of an arithmetic operation. The operation + // is performed as normal, storing the result. + // + // * jz operations branch if the result is zero. + // * jo operations branch if the (signed) arithmetic + // operation caused an overflow to occur. + + Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + add32(src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + add32(imm, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchMul32(Condition cond, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + mul32(src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + mul32(imm, src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchSub32(Condition cond, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + sub32(src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + sub32(imm, dest); + return Jump(m_assembler.jCC(cond)); + } + + + // Miscellaneous operations: + + void breakpoint() + { + m_assembler.int3(); + } + + Call nearCall() + { + return Call(m_assembler.call(), Call::LinkableNear); + } + + Call call(RegisterID target) + { + return Call(m_assembler.call(target), Call::None); + } + + void ret() + { + m_assembler.ret(); + } + + void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.cmpl_rr(right, left); + m_assembler.setCC_r(cond, dest); + m_assembler.movzbl_rr(dest, dest); + } + + void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest) + { + if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) + m_assembler.testl_rr(left, left); + else + m_assembler.cmpl_ir(right.m_value, left); + m_assembler.setCC_r(cond, dest); + m_assembler.movzbl_rr(dest, dest); + } + + // FIXME: + // The mask should be optional... paerhaps the argument order should be + // dest-src, operations always have a dest? ... possibly not true, considering + // asm ops like test, or pseudo ops like pop(). + void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest) + { + if (mask.m_value == -1) + m_assembler.cmpl_im(0, address.offset, address.base); + else + m_assembler.testl_i32m(mask.m_value, address.offset, address.base); + m_assembler.setCC_r(cond, dest); + m_assembler.movzbl_rr(dest, dest); + } +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // MacroAssemblerX86Common_h diff --git a/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/JavaScriptCore/assembler/MacroAssemblerX86_64.h new file mode 100644 index 0000000..971787b --- /dev/null +++ b/JavaScriptCore/assembler/MacroAssemblerX86_64.h @@ -0,0 +1,398 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MacroAssemblerX86_64_h +#define MacroAssemblerX86_64_h + +#include <wtf/Platform.h> + +#if ENABLE(ASSEMBLER) && PLATFORM(X86_64) + +#include "MacroAssemblerX86Common.h" + +namespace JSC { + +class MacroAssemblerX86_64 : public MacroAssemblerX86Common { +protected: + static const X86::RegisterID scratchRegister = X86::r11; + +public: + static const Scale ScalePtr = TimesEight; + + using MacroAssemblerX86Common::add32; + using MacroAssemblerX86Common::sub32; + using MacroAssemblerX86Common::load32; + using MacroAssemblerX86Common::store32; + using MacroAssemblerX86Common::call; + + void add32(Imm32 imm, AbsoluteAddress address) + { + move(ImmPtr(address.m_ptr), scratchRegister); + add32(imm, Address(scratchRegister)); + } + + void sub32(Imm32 imm, AbsoluteAddress address) + { + move(ImmPtr(address.m_ptr), scratchRegister); + sub32(imm, Address(scratchRegister)); + } + + void load32(void* address, RegisterID dest) + { + if (dest == X86::eax) + m_assembler.movl_mEAX(address); + else { + move(X86::eax, dest); + m_assembler.movl_mEAX(address); + swap(X86::eax, dest); + } + } + + void store32(Imm32 imm, void* address) + { + move(X86::eax, scratchRegister); + move(imm, X86::eax); + m_assembler.movl_EAXm(address); + move(scratchRegister, X86::eax); + } + + Call call() + { + DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + Call result = Call(m_assembler.call(scratchRegister), Call::Linkable); + ASSERT(differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11); + return result; + } + + Call tailRecursiveCall() + { + DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); + ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + return Call::fromTailJump(newJump); + } + + Call makeTailRecursiveCall(Jump oldJump) + { + oldJump.link(this); + DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); + ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + return Call::fromTailJump(newJump); + } + + + void addPtr(RegisterID src, RegisterID dest) + { + m_assembler.addq_rr(src, dest); + } + + void addPtr(Imm32 imm, RegisterID srcDest) + { + m_assembler.addq_ir(imm.m_value, srcDest); + } + + void addPtr(ImmPtr imm, RegisterID dest) + { + move(imm, scratchRegister); + m_assembler.addq_rr(scratchRegister, dest); + } + + void addPtr(Imm32 imm, RegisterID src, RegisterID dest) + { + m_assembler.leal_mr(imm.m_value, src, dest); + } + + void andPtr(RegisterID src, RegisterID dest) + { + m_assembler.andq_rr(src, dest); + } + + void andPtr(Imm32 imm, RegisterID srcDest) + { + m_assembler.andq_ir(imm.m_value, srcDest); + } + + void orPtr(RegisterID src, RegisterID dest) + { + m_assembler.orq_rr(src, dest); + } + + void orPtr(ImmPtr imm, RegisterID dest) + { + move(imm, scratchRegister); + m_assembler.orq_rr(scratchRegister, dest); + } + + void orPtr(Imm32 imm, RegisterID dest) + { + m_assembler.orq_ir(imm.m_value, dest); + } + + void rshiftPtr(RegisterID shift_amount, RegisterID dest) + { + // On x86 we can only shift by ecx; if asked to shift by another register we'll + // need rejig the shift amount into ecx first, and restore the registers afterwards. + if (shift_amount != X86::ecx) { + swap(shift_amount, X86::ecx); + + // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" + if (dest == shift_amount) + m_assembler.sarq_CLr(X86::ecx); + // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" + else if (dest == X86::ecx) + m_assembler.sarq_CLr(shift_amount); + // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" + else + m_assembler.sarq_CLr(dest); + + swap(shift_amount, X86::ecx); + } else + m_assembler.sarq_CLr(dest); + } + + void rshiftPtr(Imm32 imm, RegisterID dest) + { + m_assembler.sarq_i8r(imm.m_value, dest); + } + + void subPtr(RegisterID src, RegisterID dest) + { + m_assembler.subq_rr(src, dest); + } + + void subPtr(Imm32 imm, RegisterID dest) + { + m_assembler.subq_ir(imm.m_value, dest); + } + + void subPtr(ImmPtr imm, RegisterID dest) + { + move(imm, scratchRegister); + m_assembler.subq_rr(scratchRegister, dest); + } + + void xorPtr(RegisterID src, RegisterID dest) + { + m_assembler.xorq_rr(src, dest); + } + + void xorPtr(Imm32 imm, RegisterID srcDest) + { + m_assembler.xorq_ir(imm.m_value, srcDest); + } + + + void loadPtr(ImplicitAddress address, RegisterID dest) + { + m_assembler.movq_mr(address.offset, address.base, dest); + } + + void loadPtr(BaseIndex address, RegisterID dest) + { + m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest); + } + + void loadPtr(void* address, RegisterID dest) + { + if (dest == X86::eax) + m_assembler.movq_mEAX(address); + else { + move(X86::eax, dest); + m_assembler.movq_mEAX(address); + swap(X86::eax, dest); + } + } + + DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) + { + m_assembler.movq_mr_disp32(address.offset, address.base, dest); + return DataLabel32(this); + } + + void storePtr(RegisterID src, ImplicitAddress address) + { + m_assembler.movq_rm(src, address.offset, address.base); + } + + void storePtr(RegisterID src, BaseIndex address) + { + m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale); + } + + void storePtr(ImmPtr imm, ImplicitAddress address) + { + intptr_t ptr = imm.asIntptr(); + if (CAN_SIGN_EXTEND_32_64(ptr)) + m_assembler.movq_i32m(static_cast<int>(ptr), address.offset, address.base); + else { + move(imm, scratchRegister); + storePtr(scratchRegister, address); + } + } + + DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) + { + m_assembler.movq_rm_disp32(src, address.offset, address.base); + return DataLabel32(this); + } + + void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest) + { + if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) + m_assembler.testq_rr(left, left); + else + m_assembler.cmpq_ir(right.m_value, left); + m_assembler.setCC_r(cond, dest); + m_assembler.movzbl_rr(dest, dest); + } + + Jump branchPtr(Condition cond, RegisterID left, RegisterID right) + { + m_assembler.cmpq_rr(right, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchPtr(Condition cond, RegisterID left, ImmPtr right) + { + intptr_t imm = right.asIntptr(); + if (CAN_SIGN_EXTEND_32_64(imm)) { + if (!imm) + m_assembler.testq_rr(left, left); + else + m_assembler.cmpq_ir(imm, left); + return Jump(m_assembler.jCC(cond)); + } else { + move(right, scratchRegister); + return branchPtr(cond, left, scratchRegister); + } + } + + Jump branchPtr(Condition cond, RegisterID left, Address right) + { + m_assembler.cmpq_mr(right.offset, right.base, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right) + { + move(ImmPtr(left.m_ptr), scratchRegister); + return branchPtr(cond, Address(scratchRegister), right); + } + + Jump branchPtr(Condition cond, Address left, RegisterID right) + { + m_assembler.cmpq_rm(right, left.offset, left.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchPtr(Condition cond, Address left, ImmPtr right) + { + move(right, scratchRegister); + return branchPtr(cond, left, scratchRegister); + } + + Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask) + { + m_assembler.testq_rr(reg, mask); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) + { + // if we are only interested in the low seven bits, this can be tested with a testb + if (mask.m_value == -1) + m_assembler.testq_rr(reg, reg); + else if ((mask.m_value & ~0x7f) == 0) + m_assembler.testb_i8r(mask.m_value, reg); + else + m_assembler.testq_i32r(mask.m_value, reg); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1)) + { + if (mask.m_value == -1) + m_assembler.cmpq_im(0, address.offset, address.base); + else + m_assembler.testq_i32m(mask.m_value, address.offset, address.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) + { + if (mask.m_value == -1) + m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale); + else + m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); + return Jump(m_assembler.jCC(cond)); + } + + + Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + addPtr(src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + subPtr(imm, dest); + return Jump(m_assembler.jCC(cond)); + } + + DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest) + { + m_assembler.movq_i64r(initialValue.asIntptr(), dest); + return DataLabelPtr(this); + } + + Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + { + dataLabel = moveWithPatch(initialRightValue, scratchRegister); + return branchPtr(cond, left, scratchRegister); + } + + Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + { + dataLabel = moveWithPatch(initialRightValue, scratchRegister); + return branchPtr(cond, left, scratchRegister); + } + + DataLabelPtr storePtrWithPatch(Address address) + { + DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + storePtr(scratchRegister, address); + return label; + } +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // MacroAssemblerX86_64_h diff --git a/JavaScriptCore/assembler/X86Assembler.h b/JavaScriptCore/assembler/X86Assembler.h index de23e45..bcafda1 100644 --- a/JavaScriptCore/assembler/X86Assembler.h +++ b/JavaScriptCore/assembler/X86Assembler.h @@ -41,6 +41,8 @@ inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(sign #if PLATFORM(X86_64) inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; } inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; } + +#define REPTACH_OFFSET_CALL_R11 3 #endif namespace X86 { @@ -84,6 +86,29 @@ public: typedef X86::XMMRegisterID XMMRegisterID; typedef enum { + ConditionO, + ConditionNO, + ConditionB, + ConditionAE, + ConditionE, + ConditionNE, + ConditionBE, + ConditionA, + ConditionS, + ConditionNS, + ConditionP, + ConditionNP, + ConditionL, + ConditionGE, + ConditionLE, + ConditionG, + + ConditionC = ConditionB, + ConditionNC = ConditionAE, + } Condition; + +private: + typedef enum { OP_ADD_EvGv = 0x01, OP_ADD_GvEv = 0x03, OP_OR_EvGv = 0x09, @@ -147,27 +172,24 @@ public: OP2_SUBSD_VsdWsd = 0x5C, OP2_MOVD_VdEd = 0x6E, OP2_MOVD_EdVd = 0x7E, - OP2_JO_rel32 = 0x80, - OP2_JB_rel32 = 0x82, - OP2_JAE_rel32 = 0x83, - OP2_JE_rel32 = 0x84, - OP2_JNE_rel32 = 0x85, - OP2_JBE_rel32 = 0x86, - OP2_JA_rel32 = 0x87, - OP2_JS_rel32 = 0x88, - OP2_JP_rel32 = 0x8A, - OP2_JL_rel32 = 0x8C, - OP2_JGE_rel32 = 0x8D, - OP2_JLE_rel32 = 0x8E, - OP2_JG_rel32 = 0x8F, - OP_SETE = 0x94, - OP_SETNE = 0x95, + OP2_JCC_rel32 = 0x80, + OP_SETCC = 0x90, OP2_IMUL_GvEv = 0xAF, OP2_MOVZX_GvEb = 0xB6, OP2_MOVZX_GvEw = 0xB7, OP2_PEXTRW_GdUdIb = 0xC5, } TwoByteOpcodeID; + TwoByteOpcodeID jccRel32(Condition cond) + { + return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond); + } + + TwoByteOpcodeID setccOpcode(Condition cond) + { + return (TwoByteOpcodeID)(OP_SETCC + cond); + } + typedef enum { GROUP1_OP_ADD = 0, GROUP1_OP_OR = 1, @@ -192,9 +214,6 @@ public: GROUP11_MOV = 0, } GroupOpcodeID; - // Opaque label types - -private: class X86InstructionFormatter; public: @@ -222,16 +241,22 @@ public: public: JmpDst() : m_offset(-1) + , m_used(false) { } + bool isUsed() const { return m_used; } + void used() { m_used = true; } private: JmpDst(int offset) : m_offset(offset) + , m_used(false) { + ASSERT(m_offset == offset); } - int m_offset; + int m_offset : 31; + bool m_used : 1; }; X86Assembler() @@ -640,6 +665,11 @@ public: m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset); } + void cmpq_mr(int offset, RegisterID base, RegisterID src) + { + m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset); + } + void cmpq_ir(int imm, RegisterID dst) { if (CAN_SIGN_EXTEND_8_32(imm)) { @@ -750,9 +780,14 @@ public: m_formatter.immediate8(imm); } + void setCC_r(Condition cond, RegisterID dst) + { + m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst); + } + void sete_r(RegisterID dst) { - m_formatter.twoByteOp8(OP_SETE, (GroupOpcodeID)0, dst); + m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst); } void setz_r(RegisterID dst) @@ -762,7 +797,7 @@ public: void setne_r(RegisterID dst) { - m_formatter.twoByteOp8(OP_SETNE, (GroupOpcodeID)0, dst); + m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst); } void setnz_r(RegisterID dst) @@ -898,6 +933,12 @@ public: m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset); } + void movq_i32m(int imm, int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset); + m_formatter.immediate32(imm); + } + void movq_i64r(int64_t imm, RegisterID dst) { m_formatter.oneByteOp64(OP_MOV_EAXIv, dst); @@ -969,9 +1010,13 @@ public: return m_formatter.immediateRel32(); } - void jmp_r(RegisterID dst) + // Return a JmpSrc so we have a label to the jump, so we can use this + // To make a tail recursive call on x86-64. The MacroAssembler + // really shouldn't wrap this as a Jump, since it can't be linked. :-/ + JmpSrc jmp_r(RegisterID dst) { m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst); + return JmpSrc(m_formatter.size()); } void jmp_m(int offset, RegisterID base) @@ -981,7 +1026,7 @@ public: JmpSrc jne() { - m_formatter.twoByteOp(OP2_JNE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionNE)); return m_formatter.immediateRel32(); } @@ -992,73 +1037,79 @@ public: JmpSrc je() { - m_formatter.twoByteOp(OP2_JE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionE)); return m_formatter.immediateRel32(); } JmpSrc jl() { - m_formatter.twoByteOp(OP2_JL_rel32); + m_formatter.twoByteOp(jccRel32(ConditionL)); return m_formatter.immediateRel32(); } JmpSrc jb() { - m_formatter.twoByteOp(OP2_JB_rel32); + m_formatter.twoByteOp(jccRel32(ConditionB)); return m_formatter.immediateRel32(); } JmpSrc jle() { - m_formatter.twoByteOp(OP2_JLE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionLE)); return m_formatter.immediateRel32(); } JmpSrc jbe() { - m_formatter.twoByteOp(OP2_JBE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionBE)); return m_formatter.immediateRel32(); } JmpSrc jge() { - m_formatter.twoByteOp(OP2_JGE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionGE)); return m_formatter.immediateRel32(); } JmpSrc jg() { - m_formatter.twoByteOp(OP2_JG_rel32); + m_formatter.twoByteOp(jccRel32(ConditionG)); return m_formatter.immediateRel32(); } JmpSrc ja() { - m_formatter.twoByteOp(OP2_JA_rel32); + m_formatter.twoByteOp(jccRel32(ConditionA)); return m_formatter.immediateRel32(); } JmpSrc jae() { - m_formatter.twoByteOp(OP2_JAE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionAE)); return m_formatter.immediateRel32(); } JmpSrc jo() { - m_formatter.twoByteOp(OP2_JO_rel32); + m_formatter.twoByteOp(jccRel32(ConditionO)); return m_formatter.immediateRel32(); } JmpSrc jp() { - m_formatter.twoByteOp(OP2_JP_rel32); + m_formatter.twoByteOp(jccRel32(ConditionP)); return m_formatter.immediateRel32(); } JmpSrc js() { - m_formatter.twoByteOp(OP2_JS_rel32); + m_formatter.twoByteOp(jccRel32(ConditionS)); + return m_formatter.immediateRel32(); + } + + JmpSrc jCC(Condition cond) + { + m_formatter.twoByteOp(jccRel32(cond)); return m_formatter.immediateRel32(); } @@ -1191,7 +1242,7 @@ public: // Linking & patching: - void link(JmpSrc from, JmpDst to) + void linkJump(JmpSrc from, JmpDst to) { ASSERT(to.m_offset != -1); ASSERT(from.m_offset != -1); @@ -1199,20 +1250,73 @@ public: reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset; } - static void patchAddress(void* code, JmpDst position, void* value) + static void linkJump(void* code, JmpSrc from, void* to) { - ASSERT(position.m_offset != -1); + ASSERT(from.m_offset != -1); + ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset); + ASSERT(linkOffset == static_cast<int>(linkOffset)); + reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset; + } + + static void patchJump(intptr_t where, void* destination) + { + intptr_t offset = reinterpret_cast<intptr_t>(destination) - where; + ASSERT(offset == static_cast<int32_t>(offset)); + reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset); + } + +#if PLATFORM(X86_64) + // FIXME: transition these functions out of here - the assembler + // shouldn't know that that this is mov/call pair using r11. :-/ + static void patchMacroAssemblerCall(intptr_t where, void* destination) + { + patchAddress(reinterpret_cast<void*>(where - REPTACH_OFFSET_CALL_R11), JmpDst(0), destination); + } +#else + static void patchMacroAssemblerCall(intptr_t where, void* destination) + { + intptr_t offset = reinterpret_cast<intptr_t>(destination) - where; + ASSERT(offset == static_cast<int32_t>(offset)); + reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset); + } +#endif + + void linkCall(JmpSrc from, JmpDst to) + { + ASSERT(to.m_offset != -1); + ASSERT(from.m_offset != -1); - reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value; + reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset; } - static void link(void* code, JmpSrc from, void* to) + static void linkCall(void* code, JmpSrc from, void* to) { ASSERT(from.m_offset != -1); + ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset); + ASSERT(linkOffset == static_cast<int>(linkOffset)); + reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset; + } + + static void patchCall(intptr_t where, void* destination) + { + intptr_t offset = reinterpret_cast<intptr_t>(destination) - where; + ASSERT(offset == static_cast<int32_t>(offset)); + reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset); + } + + static void patchAddress(void* code, JmpDst position, void* value) + { + ASSERT(position.m_offset != -1); - reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset); + reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value; } + static unsigned getCallReturnOffset(JmpSrc call) + { + ASSERT(call.m_offset >= 0); + return call.m_offset; + } + static void* getRelocatedAddress(void* code, JmpSrc jump) { return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset); @@ -1250,13 +1354,6 @@ public: reinterpret_cast<intptr_t*>(where)[-1] = value; } - static void patchBranchOffset(intptr_t where, void* destination) - { - intptr_t offset = reinterpret_cast<intptr_t>(destination) - where; - ASSERT(offset == static_cast<int32_t>(offset)); - reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset); - } - void* executableCopy(ExecutablePool* allocator) { void* copy = m_formatter.executableCopy(allocator); @@ -1601,13 +1698,8 @@ private: { ASSERT(mode != ModRmRegister); - // Encode sacle of (1,2,4,8) -> (0,1,2,3) - int shift = 0; - while (scale >>= 1) - shift++; - putModRm(mode, reg, hasSib); - m_buffer.putByteUnchecked((shift << 6) | ((index & 7) << 3) | (base & 7)); + m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7)); } void registerModRM(int reg, RegisterID rm) diff --git a/JavaScriptCore/bytecode/CodeBlock.h b/JavaScriptCore/bytecode/CodeBlock.h index e5d78d3..9bd4090 100644 --- a/JavaScriptCore/bytecode/CodeBlock.h +++ b/JavaScriptCore/bytecode/CodeBlock.h @@ -32,6 +32,7 @@ #include "EvalCodeCache.h" #include "Instruction.h" +#include "JITCode.h" #include "JSGlobalObject.h" #include "JumpTable.h" #include "Nodes.h" @@ -58,14 +59,14 @@ namespace JSC { uint32_t target; uint32_t scopeDepth; #if ENABLE(JIT) - void* nativeCode; + MacroAssembler::CodeLocationLabel nativeCode; #endif }; #if ENABLE(JIT) // The code, and the associated pool from which it was allocated. struct JITCodeRef { - void* code; + JITCode code; #ifndef NDEBUG unsigned codeSize; #endif @@ -117,19 +118,15 @@ namespace JSC { #if ENABLE(JIT) struct CallLinkInfo { CallLinkInfo() - : callReturnLocation(0) - , hotPathBegin(0) - , hotPathOther(0) - , coldPathOther(0) - , callee(0) + : callee(0) { } unsigned bytecodeIndex; - void* callReturnLocation; - void* hotPathBegin; - void* hotPathOther; - void* coldPathOther; + MacroAssembler::CodeLocationNearCall callReturnLocation; + MacroAssembler::CodeLocationDataLabelPtr hotPathBegin; + MacroAssembler::CodeLocationNearCall hotPathOther; + MacroAssembler::CodeLocationLabel coldPathOther; CodeBlock* callee; unsigned position; @@ -161,14 +158,18 @@ namespace JSC { unsigned bytecodeOffset; }; - struct PC { - PC(ptrdiff_t nativePCOffset, unsigned bytecodeIndex) - : nativePCOffset(nativePCOffset) + // This structure is used to map from a call return location + // (given as an offset in bytes into the JIT code) back to + // the bytecode index of the corresponding bytecode operation. + // This is then used to look up the corresponding handler. + struct CallReturnOffsetToBytecodeIndex { + CallReturnOffsetToBytecodeIndex(unsigned callReturnOffset, unsigned bytecodeIndex) + : callReturnOffset(callReturnOffset) , bytecodeIndex(bytecodeIndex) { } - ptrdiff_t nativePCOffset; + unsigned callReturnOffset; unsigned bytecodeIndex; }; @@ -176,17 +177,17 @@ namespace JSC { inline void* getStructureStubInfoReturnLocation(StructureStubInfo* structureStubInfo) { - return structureStubInfo->callReturnLocation; + return structureStubInfo->callReturnLocation.calleeReturnAddressValue(); } inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo) { - return callLinkInfo->callReturnLocation; + return callLinkInfo->callReturnLocation.calleeReturnAddressValue(); } - inline ptrdiff_t getNativePCOffset(PC* pc) + inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeIndex* pc) { - return pc->nativePCOffset; + return pc->callReturnOffset; } // Binary chop algorithm, calls valueAtPosition on pre-sorted elements in array, @@ -311,8 +312,7 @@ namespace JSC { unsigned getBytecodeIndex(CallFrame* callFrame, void* nativePC) { reparseForExceptionInfoIfNecessary(callFrame); - ptrdiff_t nativePCOffset = reinterpret_cast<void**>(nativePC) - reinterpret_cast<void**>(m_jitCode.code); - return binaryChop<PC, ptrdiff_t, getNativePCOffset>(m_exceptionInfo->m_pcVector.begin(), m_exceptionInfo->m_pcVector.size(), nativePCOffset)->bytecodeIndex; + return binaryChop<CallReturnOffsetToBytecodeIndex, unsigned, getCallReturnOffset>(m_exceptionInfo->m_callReturnIndexVector.begin(), m_exceptionInfo->m_callReturnIndexVector.size(), m_jitCode.code.offsetOf(nativePC))->bytecodeIndex; } bool functionRegisterForBytecodeOffset(unsigned bytecodeOffset, int& functionRegisterIndex); @@ -328,7 +328,7 @@ namespace JSC { #if ENABLE(JIT) void setJITCode(JITCodeRef& jitCode); - void* jitCode() { return m_jitCode.code; } + JITCode jitCode() { return m_jitCode.code; } ExecutablePool* executablePool() { return m_jitCode.executablePool.get(); } #endif @@ -393,7 +393,7 @@ namespace JSC { LineInfo& lastLineInfo() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.last(); } #if ENABLE(JIT) - Vector<PC>& pcVector() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_pcVector; } + Vector<CallReturnOffsetToBytecodeIndex>& callReturnIndexVector() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_callReturnIndexVector; } #endif // Constant Pool @@ -514,7 +514,7 @@ namespace JSC { Vector<GetByIdExceptionInfo> m_getByIdExceptionInfo; #if ENABLE(JIT) - Vector<PC> m_pcVector; + Vector<CallReturnOffsetToBytecodeIndex> m_callReturnIndexVector; #endif }; OwnPtr<ExceptionInfo> m_exceptionInfo; diff --git a/JavaScriptCore/bytecode/Instruction.h b/JavaScriptCore/bytecode/Instruction.h index 1fab106..314fda4 100644 --- a/JavaScriptCore/bytecode/Instruction.h +++ b/JavaScriptCore/bytecode/Instruction.h @@ -29,6 +29,7 @@ #ifndef Instruction_h #define Instruction_h +#include "MacroAssembler.h" #include "Opcode.h" #include "Structure.h" #include <wtf/VectorTraits.h> @@ -37,6 +38,16 @@ namespace JSC { + // *Sigh*, If the JIT is enabled we need to track the stubRountine (of type MacroAssembler::CodeLocationLabel), + // If the JIT is not in use we don't actually need the variable (that said, if the JIT is not in use we don't + // curently actually use PolymorphicAccessStructureLists, which we should). Anyway, this seems like the best + // solution for now - will need to something smarter if/when we actually want mixed-mode operation. +#if ENABLE(JIT) + typedef MacroAssembler::CodeLocationLabel PolymorphicAccessStructureListStubRoutineType; +#else + typedef void* PolymorphicAccessStructureListStubRoutineType; +#endif + class JSCell; class Structure; class StructureChain; @@ -45,14 +56,14 @@ namespace JSC { struct PolymorphicAccessStructureList { struct PolymorphicStubInfo { bool isChain; - void* stubRoutine; + PolymorphicAccessStructureListStubRoutineType stubRoutine; Structure* base; union { Structure* proto; StructureChain* chain; } u; - void set(void* _stubRoutine, Structure* _base) + void set(PolymorphicAccessStructureListStubRoutineType _stubRoutine, Structure* _base) { stubRoutine = _stubRoutine; base = _base; @@ -60,7 +71,7 @@ namespace JSC { isChain = false; } - void set(void* _stubRoutine, Structure* _base, Structure* _proto) + void set(PolymorphicAccessStructureListStubRoutineType _stubRoutine, Structure* _base, Structure* _proto) { stubRoutine = _stubRoutine; base = _base; @@ -68,7 +79,7 @@ namespace JSC { isChain = false; } - void set(void* _stubRoutine, Structure* _base, StructureChain* _chain) + void set(PolymorphicAccessStructureListStubRoutineType _stubRoutine, Structure* _base, StructureChain* _chain) { stubRoutine = _stubRoutine; base = _base; @@ -77,17 +88,17 @@ namespace JSC { } } list[POLYMORPHIC_LIST_CACHE_SIZE]; - PolymorphicAccessStructureList(void* stubRoutine, Structure* firstBase) + PolymorphicAccessStructureList(PolymorphicAccessStructureListStubRoutineType stubRoutine, Structure* firstBase) { list[0].set(stubRoutine, firstBase); } - PolymorphicAccessStructureList(void* stubRoutine, Structure* firstBase, Structure* firstProto) + PolymorphicAccessStructureList(PolymorphicAccessStructureListStubRoutineType stubRoutine, Structure* firstBase, Structure* firstProto) { list[0].set(stubRoutine, firstBase, firstProto); } - PolymorphicAccessStructureList(void* stubRoutine, Structure* firstBase, StructureChain* firstChain) + PolymorphicAccessStructureList(PolymorphicAccessStructureListStubRoutineType stubRoutine, Structure* firstBase, StructureChain* firstChain) { list[0].set(stubRoutine, firstBase, firstChain); } diff --git a/JavaScriptCore/bytecode/JumpTable.h b/JavaScriptCore/bytecode/JumpTable.h index 44e224d..eee773c 100644 --- a/JavaScriptCore/bytecode/JumpTable.h +++ b/JavaScriptCore/bytecode/JumpTable.h @@ -30,6 +30,7 @@ #ifndef JumpTable_h #define JumpTable_h +#include "MacroAssembler.h" #include "UString.h" #include <wtf/HashMap.h> #include <wtf/Vector.h> @@ -39,7 +40,7 @@ namespace JSC { struct OffsetLocation { int32_t branchOffset; #if ENABLE(JIT) - void* ctiOffset; + MacroAssembler::CodeLocationLabel ctiOffset; #endif }; @@ -47,7 +48,7 @@ namespace JSC { typedef HashMap<RefPtr<UString::Rep>, OffsetLocation> StringOffsetTable; StringOffsetTable offsetTable; #if ENABLE(JIT) - void* ctiDefault; // FIXME: it should not be necessary to store this. + MacroAssembler::CodeLocationLabel ctiDefault; // FIXME: it should not be necessary to store this. #endif inline int32_t offsetForValue(UString::Rep* value, int32_t defaultOffset) @@ -60,7 +61,7 @@ namespace JSC { } #if ENABLE(JIT) - inline void* ctiForValue(UString::Rep* value) + inline MacroAssembler::CodeLocationLabel ctiForValue(UString::Rep* value) { StringOffsetTable::const_iterator end = offsetTable.end(); StringOffsetTable::const_iterator loc = offsetTable.find(value); @@ -76,8 +77,8 @@ namespace JSC { Vector<int32_t> branchOffsets; int32_t min; #if ENABLE(JIT) - Vector<void*> ctiOffsets; - void* ctiDefault; + Vector<MacroAssembler::CodeLocationLabel> ctiOffsets; + MacroAssembler::CodeLocationLabel ctiDefault; #endif int32_t offsetForValue(int32_t value, int32_t defaultOffset); @@ -88,7 +89,7 @@ namespace JSC { } #if ENABLE(JIT) - inline void* ctiForValue(int32_t value) + inline MacroAssembler::CodeLocationLabel ctiForValue(int32_t value) { if (value >= min && static_cast<uint32_t>(value - min) < ctiOffsets.size()) return ctiOffsets[value - min]; diff --git a/JavaScriptCore/bytecode/StructureStubInfo.h b/JavaScriptCore/bytecode/StructureStubInfo.h index a9e0678..24fcb7d 100644 --- a/JavaScriptCore/bytecode/StructureStubInfo.h +++ b/JavaScriptCore/bytecode/StructureStubInfo.h @@ -26,19 +26,18 @@ #ifndef StructureStubInfo_h #define StructureStubInfo_h +#if ENABLE(JIT) + #include "Instruction.h" +#include "MacroAssembler.h" #include "Opcode.h" #include "Structure.h" namespace JSC { -#if ENABLE(JIT) struct StructureStubInfo { StructureStubInfo(OpcodeID opcodeID) : opcodeID(opcodeID) - , stubRoutine(0) - , callReturnLocation(0) - , hotPathBegin(0) { } @@ -145,12 +144,13 @@ namespace JSC { } putByIdReplace; } u; - void* stubRoutine; - void* callReturnLocation; - void* hotPathBegin; + MacroAssembler::CodeLocationLabel stubRoutine; + MacroAssembler::CodeLocationCall callReturnLocation; + MacroAssembler::CodeLocationLabel hotPathBegin; }; -#endif } // namespace JSC +#endif + #endif // StructureStubInfo_h diff --git a/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp index cd89c1e..c83cdc7 100644 --- a/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp +++ b/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp @@ -1611,7 +1611,7 @@ RegisterID* BytecodeGenerator::emitNextPropertyName(RegisterID* dst, RegisterID* RegisterID* BytecodeGenerator::emitCatch(RegisterID* targetRegister, Label* start, Label* end) { #if ENABLE(JIT) - HandlerInfo info = { start->offsetFrom(0), end->offsetFrom(0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, 0 }; + HandlerInfo info = { start->offsetFrom(0), end->offsetFrom(0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, MacroAssembler::CodeLocationLabel() }; #else HandlerInfo info = { start->offsetFrom(0), end->offsetFrom(0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth }; #endif @@ -1744,9 +1744,6 @@ static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t UString::Rep* clause = static_cast<StringNode*>(nodes[i])->value().ustring().rep(); OffsetLocation location; location.branchOffset = labels[i]->offsetFrom(switchAddress); -#if ENABLE(JIT) - location.ctiOffset = 0; -#endif jumpTable.offsetTable.add(clause, location); } } diff --git a/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/JavaScriptCore/bytecompiler/BytecodeGenerator.h index ceb5881..a6f245d 100644 --- a/JavaScriptCore/bytecompiler/BytecodeGenerator.h +++ b/JavaScriptCore/bytecompiler/BytecodeGenerator.h @@ -317,6 +317,7 @@ namespace JSC { void emitDebugHook(DebugHookID, int firstLine, int lastLine); int scopeDepth() { return m_dynamicScopeDepth + m_finallyDepth; } + bool hasFinaliser() { return m_finallyDepth != 0; } void pushFinallyContext(Label* target, RegisterID* returnAddrDst); void popFinallyContext(); @@ -432,16 +433,18 @@ namespace JSC { ScopeNode* m_scopeNode; CodeBlock* m_codeBlock; + // Some of these objects keep pointers to one another. They are arranged + // to ensure a sane destruction order that avoids references to freed memory. HashSet<RefPtr<UString::Rep>, IdentifierRepHash> m_functions; RegisterID m_ignoredResultRegister; RegisterID m_thisRegister; RegisterID m_argumentsRegister; int m_activationRegisterIndex; - SegmentedVector<RegisterID, 512> m_calleeRegisters; - SegmentedVector<RegisterID, 512> m_parameters; - SegmentedVector<RegisterID, 512> m_globals; - SegmentedVector<LabelScope, 256> m_labelScopes; - SegmentedVector<Label, 256> m_labels; + SegmentedVector<RegisterID, 32> m_calleeRegisters; + SegmentedVector<RegisterID, 32> m_parameters; + SegmentedVector<RegisterID, 32> m_globals; + SegmentedVector<Label, 32> m_labels; + SegmentedVector<LabelScope, 8> m_labelScopes; RefPtr<RegisterID> m_lastConstant; int m_finallyDepth; int m_dynamicScopeDepth; @@ -472,7 +475,7 @@ namespace JSC { bool m_regeneratingForExceptionInfo; CodeBlock* m_codeBlockBeingRegeneratedFrom; - static const unsigned s_maxEmitNodeDepth = 10000; + static const unsigned s_maxEmitNodeDepth = 5000; }; } diff --git a/JavaScriptCore/interpreter/CallFrame.h b/JavaScriptCore/interpreter/CallFrame.h index d6b9b79..10d0b99 100644 --- a/JavaScriptCore/interpreter/CallFrame.h +++ b/JavaScriptCore/interpreter/CallFrame.h @@ -97,6 +97,7 @@ namespace JSC { friend class JSActivation; friend class JSGlobalObject; friend class Interpreter; + friend class JITStubs; static CallFrame* create(Register* callFrameBase) { return static_cast<CallFrame*>(callFrameBase); } Register* registers() { return this; } diff --git a/JavaScriptCore/interpreter/Interpreter.cpp b/JavaScriptCore/interpreter/Interpreter.cpp index 135c42f..8178d15 100644 --- a/JavaScriptCore/interpreter/Interpreter.cpp +++ b/JavaScriptCore/interpreter/Interpreter.cpp @@ -66,29 +66,10 @@ #include "AssemblerBuffer.h" #endif -#if PLATFORM(DARWIN) -#include <mach/mach.h> -#endif - -#if HAVE(SYS_TIME_H) -#include <sys/time.h> -#endif - -#if PLATFORM(WIN_OS) -#include <windows.h> -#endif - -#if PLATFORM(QT) -#include <QDateTime> -#endif - using namespace std; namespace JSC { -// Preferred number of milliseconds between each timeout check -static const int preferredScriptCheckTimeInterval = 1000; - static ALWAYS_INLINE unsigned bytecodeOffsetForPC(CallFrame* callFrame, CodeBlock* codeBlock, void* pc) { #if ENABLE(JIT) @@ -107,162 +88,6 @@ static int depth(CodeBlock* codeBlock, ScopeChain& sc) return sc.localDepth(); } -static inline bool jsLess(CallFrame* callFrame, JSValuePtr v1, JSValuePtr v2) -{ - if (JSValuePtr::areBothInt32Fast(v1, v2)) - return v1.getInt32Fast() < v2.getInt32Fast(); - - double n1; - double n2; - if (v1.getNumber(n1) && v2.getNumber(n2)) - return n1 < n2; - - Interpreter* interpreter = callFrame->interpreter(); - if (interpreter->isJSString(v1) && interpreter->isJSString(v2)) - return asString(v1)->value() < asString(v2)->value(); - - JSValuePtr p1; - JSValuePtr p2; - bool wasNotString1 = v1.getPrimitiveNumber(callFrame, n1, p1); - bool wasNotString2 = v2.getPrimitiveNumber(callFrame, n2, p2); - - if (wasNotString1 | wasNotString2) - return n1 < n2; - - return asString(p1)->value() < asString(p2)->value(); -} - -static inline bool jsLessEq(CallFrame* callFrame, JSValuePtr v1, JSValuePtr v2) -{ - if (JSValuePtr::areBothInt32Fast(v1, v2)) - return v1.getInt32Fast() <= v2.getInt32Fast(); - - double n1; - double n2; - if (v1.getNumber(n1) && v2.getNumber(n2)) - return n1 <= n2; - - Interpreter* interpreter = callFrame->interpreter(); - if (interpreter->isJSString(v1) && interpreter->isJSString(v2)) - return !(asString(v2)->value() < asString(v1)->value()); - - JSValuePtr p1; - JSValuePtr p2; - bool wasNotString1 = v1.getPrimitiveNumber(callFrame, n1, p1); - bool wasNotString2 = v2.getPrimitiveNumber(callFrame, n2, p2); - - if (wasNotString1 | wasNotString2) - return n1 <= n2; - - return !(asString(p2)->value() < asString(p1)->value()); -} - -static NEVER_INLINE JSValuePtr jsAddSlowCase(CallFrame* callFrame, JSValuePtr v1, JSValuePtr v2) -{ - // exception for the Date exception in defaultValue() - JSValuePtr p1 = v1.toPrimitive(callFrame); - JSValuePtr p2 = v2.toPrimitive(callFrame); - - if (p1.isString() || p2.isString()) { - RefPtr<UString::Rep> value = concatenate(p1.toString(callFrame).rep(), p2.toString(callFrame).rep()); - if (!value) - return throwOutOfMemoryError(callFrame); - return jsString(callFrame, value.release()); - } - - return jsNumber(callFrame, p1.toNumber(callFrame) + p2.toNumber(callFrame)); -} - -// Fast-path choices here are based on frequency data from SunSpider: -// <times> Add case: <t1> <t2> -// --------------------------- -// 5626160 Add case: 3 3 (of these, 3637690 are for immediate values) -// 247412 Add case: 5 5 -// 20900 Add case: 5 6 -// 13962 Add case: 5 3 -// 4000 Add case: 3 5 - -static ALWAYS_INLINE JSValuePtr jsAdd(CallFrame* callFrame, JSValuePtr v1, JSValuePtr v2) -{ - double left; - double right = 0.0; - - bool rightIsNumber = v2.getNumber(right); - if (rightIsNumber && v1.getNumber(left)) - return jsNumber(callFrame, left + right); - - bool leftIsString = v1.isString(); - if (leftIsString && v2.isString()) { - RefPtr<UString::Rep> value = concatenate(asString(v1)->value().rep(), asString(v2)->value().rep()); - if (!value) - return throwOutOfMemoryError(callFrame); - return jsString(callFrame, value.release()); - } - - if (rightIsNumber & leftIsString) { - RefPtr<UString::Rep> value = v2.isInt32Fast() ? - concatenate(asString(v1)->value().rep(), v2.getInt32Fast()) : - concatenate(asString(v1)->value().rep(), right); - - if (!value) - return throwOutOfMemoryError(callFrame); - return jsString(callFrame, value.release()); - } - - // All other cases are pretty uncommon - return jsAddSlowCase(callFrame, v1, v2); -} - -static JSValuePtr jsTypeStringForValue(CallFrame* callFrame, JSValuePtr v) -{ - if (v.isUndefined()) - return jsNontrivialString(callFrame, "undefined"); - if (v.isBoolean()) - return jsNontrivialString(callFrame, "boolean"); - if (v.isNumber()) - return jsNontrivialString(callFrame, "number"); - if (v.isString()) - return jsNontrivialString(callFrame, "string"); - if (v.isObject()) { - // Return "undefined" for objects that should be treated - // as null when doing comparisons. - if (asObject(v)->structure()->typeInfo().masqueradesAsUndefined()) - return jsNontrivialString(callFrame, "undefined"); - CallData callData; - if (asObject(v)->getCallData(callData) != CallTypeNone) - return jsNontrivialString(callFrame, "function"); - } - return jsNontrivialString(callFrame, "object"); -} - -static bool jsIsObjectType(JSValuePtr v) -{ - if (!v.isCell()) - return v.isNull(); - - JSType type = asCell(v)->structure()->typeInfo().type(); - if (type == NumberType || type == StringType) - return false; - if (type == ObjectType) { - if (asObject(v)->structure()->typeInfo().masqueradesAsUndefined()) - return false; - CallData callData; - if (asObject(v)->getCallData(callData) != CallTypeNone) - return false; - } - return true; -} - -static bool jsIsFunctionType(JSValuePtr v) -{ - if (v.isObject()) { - CallData callData; - if (asObject(v)->getCallData(callData) != CallTypeNone) - return true; - } - return false; -} - NEVER_INLINE bool Interpreter::resolve(CallFrame* callFrame, Instruction* vPC, JSValuePtr& exceptionValue) { int dst = (vPC + 1)->u.operand; @@ -364,34 +189,11 @@ NEVER_INLINE bool Interpreter::resolveGlobal(CallFrame* callFrame, Instruction* return false; } -static ALWAYS_INLINE JSValuePtr inlineResolveBase(CallFrame* callFrame, Identifier& property, ScopeChainNode* scopeChain) -{ - ScopeChainIterator iter = scopeChain->begin(); - ScopeChainIterator next = iter; - ++next; - ScopeChainIterator end = scopeChain->end(); - ASSERT(iter != end); - - PropertySlot slot; - JSObject* base; - while (true) { - base = *iter; - if (next == end || base->getPropertySlot(callFrame, property, slot)) - return base; - - iter = next; - ++next; - } - - ASSERT_NOT_REACHED(); - return noValue(); -} - NEVER_INLINE void Interpreter::resolveBase(CallFrame* callFrame, Instruction* vPC) { int dst = (vPC + 1)->u.operand; int property = (vPC + 2)->u.operand; - callFrame[dst] = JSValuePtr(inlineResolveBase(callFrame, callFrame->codeBlock()->identifier(property), callFrame->scopeChain())); + callFrame[dst] = JSValuePtr(JSC::resolveBase(callFrame, callFrame->codeBlock()->identifier(property), callFrame->scopeChain())); } NEVER_INLINE bool Interpreter::resolveBaseAndProperty(CallFrame* callFrame, Instruction* vPC, JSValuePtr& exceptionValue) @@ -545,56 +347,9 @@ NEVER_INLINE JSValuePtr Interpreter::callEval(CallFrame* callFrame, RegisterFile Interpreter::Interpreter() : m_sampler(0) -#if ENABLE(JIT) - , m_ctiArrayLengthTrampoline(0) - , m_ctiStringLengthTrampoline(0) - , m_ctiVirtualCallPreLink(0) - , m_ctiVirtualCallLink(0) - , m_ctiVirtualCall(0) -#endif , m_reentryDepth(0) - , m_timeoutTime(0) - , m_timeAtLastCheckTimeout(0) - , m_timeExecuting(0) - , m_timeoutCheckCount(0) - , m_ticksUntilNextTimeoutCheck(initialTickCountThreshold) { - initTimeout(); privateExecute(InitializeAndReturn, 0, 0, 0); - - // Bizarrely, calling fastMalloc here is faster than allocating space on the stack. - void* storage = fastMalloc(sizeof(CollectorBlock)); - - JSCell* jsArray = new (storage) JSArray(JSArray::createStructure(jsNull())); - m_jsArrayVptr = jsArray->vptr(); - jsArray->~JSCell(); - - JSCell* jsByteArray = new (storage) JSByteArray(JSByteArray::VPtrStealingHack); - m_jsByteArrayVptr = jsByteArray->vptr(); - jsByteArray->~JSCell(); - - JSCell* jsString = new (storage) JSString(JSString::VPtrStealingHack); - m_jsStringVptr = jsString->vptr(); - jsString->~JSCell(); - - JSCell* jsFunction = new (storage) JSFunction(JSFunction::createStructure(jsNull())); - m_jsFunctionVptr = jsFunction->vptr(); - jsFunction->~JSCell(); - - fastFree(storage); -} - -void Interpreter::initialize(JSGlobalData* globalData) -{ -#if ENABLE(JIT) - JIT::compileCTIMachineTrampolines(globalData); -#else - UNUSED_PARAM(globalData); -#endif -} - -Interpreter::~Interpreter() -{ } #ifndef NDEBUG @@ -865,7 +620,7 @@ JSValuePtr Interpreter::execute(ProgramNode* programNode, CallFrame* callFrame, #if ENABLE(JIT) if (!codeBlock->jitCode()) JIT::compile(scopeChain->globalData, codeBlock); - result = JIT::execute(codeBlock->jitCode(), &m_registerFile, newCallFrame, scopeChain->globalData, exception); + result = codeBlock->jitCode().execute(&m_registerFile, newCallFrame, scopeChain->globalData, exception); #else result = privateExecute(Normal, &m_registerFile, newCallFrame, exception); #endif @@ -931,7 +686,7 @@ JSValuePtr Interpreter::execute(FunctionBodyNode* functionBodyNode, CallFrame* c #if ENABLE(JIT) if (!codeBlock->jitCode()) JIT::compile(scopeChain->globalData, codeBlock); - result = JIT::execute(codeBlock->jitCode(), &m_registerFile, newCallFrame, scopeChain->globalData, exception); + result = codeBlock->jitCode().execute(&m_registerFile, newCallFrame, scopeChain->globalData, exception); #else result = privateExecute(Normal, &m_registerFile, newCallFrame, exception); #endif @@ -1023,7 +778,7 @@ JSValuePtr Interpreter::execute(EvalNode* evalNode, CallFrame* callFrame, JSObje #if ENABLE(JIT) if (!codeBlock->jitCode()) JIT::compile(scopeChain->globalData, codeBlock); - result = JIT::execute(codeBlock->jitCode(), &m_registerFile, newCallFrame, scopeChain->globalData, exception); + result = codeBlock->jitCode().execute(&m_registerFile, newCallFrame, scopeChain->globalData, exception); #else result = privateExecute(Normal, &m_registerFile, newCallFrame, exception); #endif @@ -1065,93 +820,6 @@ NEVER_INLINE void Interpreter::debug(CallFrame* callFrame, DebugHookID debugHook } } -void Interpreter::resetTimeoutCheck() -{ - m_ticksUntilNextTimeoutCheck = initialTickCountThreshold; - m_timeAtLastCheckTimeout = 0; - m_timeExecuting = 0; -} - -// Returns the time the current thread has spent executing, in milliseconds. -static inline unsigned getCPUTime() -{ -#if PLATFORM(DARWIN) - mach_msg_type_number_t infoCount = THREAD_BASIC_INFO_COUNT; - thread_basic_info_data_t info; - - // Get thread information - mach_port_t threadPort = mach_thread_self(); - thread_info(threadPort, THREAD_BASIC_INFO, reinterpret_cast<thread_info_t>(&info), &infoCount); - mach_port_deallocate(mach_task_self(), threadPort); - - unsigned time = info.user_time.seconds * 1000 + info.user_time.microseconds / 1000; - time += info.system_time.seconds * 1000 + info.system_time.microseconds / 1000; - - return time; -#elif HAVE(SYS_TIME_H) - // FIXME: This should probably use getrusage with the RUSAGE_THREAD flag. - struct timeval tv; - gettimeofday(&tv, 0); - return tv.tv_sec * 1000 + tv.tv_usec / 1000; -#elif PLATFORM(QT) - QDateTime t = QDateTime::currentDateTime(); - return t.toTime_t() * 1000 + t.time().msec(); -#elif PLATFORM(WIN_OS) - union { - FILETIME fileTime; - unsigned long long fileTimeAsLong; - } userTime, kernelTime; - - // GetThreadTimes won't accept NULL arguments so we pass these even though - // they're not used. - FILETIME creationTime, exitTime; - - GetThreadTimes(GetCurrentThread(), &creationTime, &exitTime, &kernelTime.fileTime, &userTime.fileTime); - - return userTime.fileTimeAsLong / 10000 + kernelTime.fileTimeAsLong / 10000; -#else -#error Platform does not have getCurrentTime function -#endif -} - -// We have to return a JSValue here, gcc seems to produce worse code if -// we attempt to return a bool -ALWAYS_INLINE bool Interpreter::checkTimeout(JSGlobalObject* globalObject) -{ - unsigned currentTime = getCPUTime(); - - if (!m_timeAtLastCheckTimeout) { - // Suspicious amount of looping in a script -- start timing it - m_timeAtLastCheckTimeout = currentTime; - return false; - } - - unsigned timeDiff = currentTime - m_timeAtLastCheckTimeout; - - if (timeDiff == 0) - timeDiff = 1; - - m_timeExecuting += timeDiff; - m_timeAtLastCheckTimeout = currentTime; - - // Adjust the tick threshold so we get the next checkTimeout call in the interval specified in - // preferredScriptCheckTimeInterval - m_ticksUntilNextTimeoutCheck = static_cast<unsigned>((static_cast<float>(preferredScriptCheckTimeInterval) / timeDiff) * m_ticksUntilNextTimeoutCheck); - // If the new threshold is 0 reset it to the default threshold. This can happen if the timeDiff is higher than the - // preferred script check time interval. - if (m_ticksUntilNextTimeoutCheck == 0) - m_ticksUntilNextTimeoutCheck = initialTickCountThreshold; - - if (m_timeoutTime && m_timeExecuting > m_timeoutTime) { - if (globalObject->shouldInterruptScript()) - return true; - - resetTimeoutCheck(); - } - - return false; -} - NEVER_INLINE ScopeChainNode* Interpreter::createExceptionScope(CallFrame* callFrame, const Instruction* vPC) { int dst = (++vPC)->u.operand; @@ -1251,37 +919,6 @@ NEVER_INLINE void Interpreter::uncachePutByID(CodeBlock* codeBlock, Instruction* vPC[4] = 0; } -static size_t countPrototypeChainEntriesAndCheckForProxies(CallFrame* callFrame, JSValuePtr baseValue, const PropertySlot& slot) -{ - JSCell* cell = asCell(baseValue); - size_t count = 0; - - while (slot.slotBase() != cell) { - JSValuePtr v = cell->structure()->prototypeForLookup(callFrame); - - // If we didn't find slotBase in baseValue's prototype chain, then baseValue - // must be a proxy for another object. - - if (v.isNull()) - return 0; - - cell = asCell(v); - - // Since we're accessing a prototype in a loop, it's a good bet that it - // should not be treated as a dictionary. - if (cell->structure()->isDictionary()) { - RefPtr<Structure> transition = Structure::fromDictionaryTransition(cell->structure()); - asObject(cell)->setStructure(transition.release()); - cell->structure()->setCachedPrototypeChain(0); - } - - ++count; - } - - ASSERT(count); - return count; -} - NEVER_INLINE void Interpreter::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, Instruction* vPC, JSValuePtr baseValue, const Identifier& propertyName, const PropertySlot& slot) { // Recursive invocation may already have specialized this instruction. @@ -1294,12 +931,13 @@ NEVER_INLINE void Interpreter::tryCacheGetByID(CallFrame* callFrame, CodeBlock* return; } - if (isJSArray(baseValue) && propertyName == callFrame->propertyNames().length) { + JSGlobalData* globalData = &callFrame->globalData(); + if (isJSArray(globalData, baseValue) && propertyName == callFrame->propertyNames().length) { vPC[0] = getOpcode(op_get_array_length); return; } - if (isJSString(baseValue) && propertyName == callFrame->propertyNames().length) { + if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) { vPC[0] = getOpcode(op_get_string_length); return; } @@ -1417,7 +1055,7 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe Instruction* vPC = callFrame->codeBlock()->instructions().begin(); Profiler** enabledProfilerReference = Profiler::enabledProfilerReference(); - unsigned tickCount = m_ticksUntilNextTimeoutCheck + 1; + unsigned tickCount = globalData->timeoutChecker.ticksUntilNextCheck(); #define CHECK_FOR_EXCEPTION() \ do { \ @@ -1433,19 +1071,17 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe #define CHECK_FOR_TIMEOUT() \ if (!--tickCount) { \ - if (checkTimeout(callFrame->dynamicGlobalObject())) { \ + if (globalData->timeoutChecker.didTimeOut(callFrame)) { \ exceptionValue = jsNull(); \ goto vm_throw; \ } \ - tickCount = m_ticksUntilNextTimeoutCheck; \ + tickCount = globalData->timeoutChecker.ticksUntilNextCheck(); \ } #if ENABLE(OPCODE_SAMPLING) #define SAMPLE(codeBlock, vPC) m_sampler->sample(codeBlock, vPC) - #define CTI_SAMPLER ARG_globalData->interpreter->sampler() #else #define SAMPLE(codeBlock, vPC) - #define CTI_SAMPLER 0 #endif #if HAVE(COMPUTED_GOTO) @@ -2123,7 +1759,12 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe goto vm_throw; JSObject* baseObj = asObject(baseVal); - callFrame[dst] = jsBoolean(baseObj->structure()->typeInfo().implementsHasInstance() ? baseObj->hasInstance(callFrame, callFrame[value].jsValue(callFrame), callFrame[baseProto].jsValue(callFrame)) : false); + if (baseObj->structure()->typeInfo().implementsHasInstance()) { + bool result = baseObj->hasInstance(callFrame, callFrame[value].jsValue(callFrame), callFrame[baseProto].jsValue(callFrame)); + CHECK_FOR_EXCEPTION(); + callFrame[dst] = jsBoolean(result); + } else + callFrame[dst] = jsBoolean(false); vPC += 5; NEXT_INSTRUCTION(); @@ -2607,7 +2248,7 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe int base = vPC[2].u.operand; JSValuePtr baseValue = callFrame[base].jsValue(callFrame); - if (LIKELY(isJSArray(baseValue))) { + if (LIKELY(isJSArray(globalData, baseValue))) { int dst = vPC[1].u.operand; callFrame[dst] = JSValuePtr(jsNumber(callFrame, asArray(baseValue)->length())); vPC += 8; @@ -2627,7 +2268,7 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe int base = vPC[2].u.operand; JSValuePtr baseValue = callFrame[base].jsValue(callFrame); - if (LIKELY(isJSString(baseValue))) { + if (LIKELY(isJSString(globalData, baseValue))) { int dst = vPC[1].u.operand; callFrame[dst] = JSValuePtr(jsNumber(callFrame, asString(baseValue)->value().size())); vPC += 8; @@ -2809,15 +2450,15 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe if (LIKELY(subscript.isUInt32Fast())) { uint32_t i = subscript.getUInt32Fast(); - if (isJSArray(baseValue)) { + if (isJSArray(globalData, baseValue)) { JSArray* jsArray = asArray(baseValue); if (jsArray->canGetIndex(i)) result = jsArray->getIndex(i); else result = jsArray->JSArray::get(callFrame, i); - } else if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) + } else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) result = asString(baseValue)->getIndex(&callFrame->globalData(), i); - else if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) + else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) result = asByteArray(baseValue)->getIndex(callFrame, i); else result = baseValue.get(callFrame, i); @@ -2851,13 +2492,13 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe if (LIKELY(subscript.isUInt32Fast())) { uint32_t i = subscript.getUInt32Fast(); - if (isJSArray(baseValue)) { + if (isJSArray(globalData, baseValue)) { JSArray* jsArray = asArray(baseValue); if (jsArray->canSetIndex(i)) jsArray->setIndex(i, callFrame[value].jsValue(callFrame)); else jsArray->JSArray::put(callFrame, i, callFrame[value].jsValue(callFrame)); - } else if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { + } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { JSByteArray* jsByteArray = asByteArray(baseValue); double dValue = 0; JSValuePtr jsValue = callFrame[value].jsValue(callFrame); @@ -3144,9 +2785,10 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe if (scrutinee.isInt32Fast()) vPC += callFrame->codeBlock()->immediateSwitchJumpTable(tableIndex).offsetForValue(scrutinee.getInt32Fast(), defaultOffset); else { - int32_t value; - if (scrutinee.numberToInt32(value)) - vPC += callFrame->codeBlock()->immediateSwitchJumpTable(tableIndex).offsetForValue(value, defaultOffset); + double value; + int32_t intValue; + if (scrutinee.getNumber(value) && ((intValue = static_cast<int32_t>(value)) == value)) + vPC += callFrame->codeBlock()->immediateSwitchJumpTable(tableIndex).offsetForValue(intValue, defaultOffset); else vPC += defaultOffset; } @@ -3591,7 +3233,7 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe the object in register override to register dst. */ - int dst = vPC[1].u.operand;; + int dst = vPC[1].u.operand; if (LIKELY(callFrame[dst].jsValue(callFrame).isObject())) { vPC += 3; NEXT_INSTRUCTION(); @@ -3711,7 +3353,7 @@ JSValuePtr Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registe DEFINE_OPCODE(op_catch) { /* catch ex(r) - Retrieves the VMs current exception and puts it in register + Retrieves the VM's current exception and puts it in register ex. This is only valid after an exception has been raised, and usually forms the beginning of an exception handler. */ @@ -4004,6 +3646,7 @@ CallFrame* Interpreter::findFunctionCallFrame(CallFrame* callFrame, InternalFunc return 0; } +#ifdef MANUAL_MERGE_REQUIRED #if ENABLE(JIT) #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) @@ -6137,4 +5780,6 @@ JSValueEncodedAsPointer* Interpreter::cti_vm_throw(STUB_ARGS) #endif // ENABLE(JIT) +#else // MANUAL_MERGE_REQUIRED +#endif // MANUAL_MERGE_REQUIRED } // namespace JSC diff --git a/JavaScriptCore/interpreter/Interpreter.h b/JavaScriptCore/interpreter/Interpreter.h index 18c2185..7d97962 100644 --- a/JavaScriptCore/interpreter/Interpreter.h +++ b/JavaScriptCore/interpreter/Interpreter.h @@ -32,6 +32,7 @@ #include "ArgList.h" #include "JSCell.h" #include "JSValue.h" +#include "JSObject.h" #include "Opcode.h" #include "RegisterFile.h" #include <wtf/HashMap.h> @@ -56,52 +57,6 @@ namespace JSC { class SamplingTool; struct HandlerInfo; -#if ENABLE(JIT) - -#if USE(JIT_STUB_ARGUMENT_VA_LIST) - #define STUB_ARGS void* args, ... - #define ARGS (reinterpret_cast<void**>(vl_args) - 1) -#else // JIT_STUB_ARGUMENT_REGISTER or JIT_STUB_ARGUMENT_STACK - #define STUB_ARGS void** args - #define ARGS (args) -#endif - -#if USE(JIT_STUB_ARGUMENT_REGISTER) - #if PLATFORM(X86_64) - #define JIT_STUB - #elif COMPILER(MSVC) - #define JIT_STUB __fastcall - #elif COMPILER(GCC) - #define JIT_STUB __attribute__ ((fastcall)) - #else - #error Need to support register calling convention in this compiler - #endif -#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK - #if COMPILER(MSVC) - #define JIT_STUB __cdecl - #else - #define JIT_STUB - #endif -#endif - -// The Mac compilers are fine with this, -#if PLATFORM(MAC) - struct VoidPtrPair { - void* first; - void* second; - }; -#define RETURN_PAIR(a,b) VoidPtrPair pair = { a, b }; return pair -#else - typedef uint64_t VoidPtrPair; - union VoidPtrPairValue { - struct { void* first; void* second; } s; - VoidPtrPair i; - }; -#define RETURN_PAIR(a,b) VoidPtrPairValue pair = {{ a, b }}; return pair.i -#endif - -#endif // ENABLE(JIT) - enum DebugHookID { WillExecuteProgram, DidExecuteProgram, @@ -111,16 +66,15 @@ namespace JSC { WillExecuteStatement }; - enum { MaxReentryDepth = 128 }; + enum { MaxReentryDepth = 64 }; class Interpreter { friend class JIT; + friend class JITStubs; + public: Interpreter(); - ~Interpreter(); - void initialize(JSGlobalData*); - RegisterFile& registerFile() { return m_registerFile; } Opcode getOpcode(OpcodeID id) @@ -153,161 +107,9 @@ namespace JSC { void retrieveLastCaller(CallFrame*, int& lineNumber, intptr_t& sourceID, UString& sourceURL, JSValuePtr& function) const; void getArgumentsData(CallFrame*, JSFunction*&, ptrdiff_t& firstParameterIndex, Register*& argv, int& argc); - void setTimeoutTime(unsigned timeoutTime) { m_timeoutTime = timeoutTime; } - - void startTimeoutCheck() - { - if (!m_timeoutCheckCount) - resetTimeoutCheck(); -#ifdef ANDROID_INSTRUMENT - if (!m_timeoutCheckCount) - android::TimeCounter::start(android::TimeCounter::JavaScriptTimeCounter); -#endif - - ++m_timeoutCheckCount; - } - - void stopTimeoutCheck() - { - ASSERT(m_timeoutCheckCount); - --m_timeoutCheckCount; -#ifdef ANDROID_INSTRUMENT - if (!m_timeoutCheckCount) - android::TimeCounter::record(android::TimeCounter::JavaScriptTimeCounter, __FUNCTION__); -#endif - } - - inline void initTimeout() - { - ASSERT(!m_timeoutCheckCount); - resetTimeoutCheck(); - m_timeoutTime = 0; - m_timeoutCheckCount = 0; - } - void setSampler(SamplingTool* sampler) { m_sampler = sampler; } SamplingTool* sampler() { return m_sampler; } -#if ENABLE(JIT) - - static int JIT_STUB cti_timeout_check(STUB_ARGS); - static void JIT_STUB cti_register_file_check(STUB_ARGS); - - static JSObject* JIT_STUB cti_op_convert_this(STUB_ARGS); - static void JIT_STUB cti_op_end(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_add(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_pre_inc(STUB_ARGS); - static int JIT_STUB cti_op_loop_if_less(STUB_ARGS); - static int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS); - static JSObject* JIT_STUB cti_op_new_object(STUB_ARGS); - static void JIT_STUB cti_op_put_by_id(STUB_ARGS); - static void JIT_STUB cti_op_put_by_id_second(STUB_ARGS); - static void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS); - static void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_second(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_generic(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_del_by_id(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_instanceof(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_mul(STUB_ARGS); - static JSObject* JIT_STUB cti_op_new_func(STUB_ARGS); - static void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS); - static VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS); - static void JIT_STUB cti_op_create_arguments(STUB_ARGS); - static void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS); - static void JIT_STUB cti_op_tear_off_activation(STUB_ARGS); - static void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS); - static void JIT_STUB cti_op_profile_will_call(STUB_ARGS); - static void JIT_STUB cti_op_profile_did_call(STUB_ARGS); - static void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS); - static JSObject* JIT_STUB cti_op_new_array(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_global(STUB_ARGS); - static JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_val(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS); - static VoidPtrPair JIT_STUB cti_op_resolve_func(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_sub(STUB_ARGS); - static void JIT_STUB cti_op_put_by_val(STUB_ARGS); - static void JIT_STUB cti_op_put_by_val_array(STUB_ARGS); - static void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_lesseq(STUB_ARGS); - static int JIT_STUB cti_op_loop_if_true(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_base(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_negate(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_skip(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_div(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_pre_dec(STUB_ARGS); - static int JIT_STUB cti_op_jless(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_not(STUB_ARGS); - static int JIT_STUB cti_op_jtrue(STUB_ARGS); - static VoidPtrPair JIT_STUB cti_op_post_inc(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_eq(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_lshift(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_bitand(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_rshift(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_bitnot(STUB_ARGS); - static VoidPtrPair JIT_STUB cti_op_resolve_with_base(STUB_ARGS); - static JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_mod(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_less(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_neq(STUB_ARGS); - static VoidPtrPair JIT_STUB cti_op_post_dec(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_urshift(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_bitxor(STUB_ARGS); - static JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_bitor(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_call_eval(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_throw(STUB_ARGS); - static JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_next_pname(STUB_ARGS); - static JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS); - static void JIT_STUB cti_op_pop_scope(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_typeof(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_is_undefined(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_is_boolean(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_is_number(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_is_string(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_is_object(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_is_function(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_stricteq(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_nstricteq(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_to_jsnumber(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_in(STUB_ARGS); - static JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS); - static void JIT_STUB cti_op_jmp_scopes(STUB_ARGS); - static void JIT_STUB cti_op_put_by_index(STUB_ARGS); - static void* JIT_STUB cti_op_switch_imm(STUB_ARGS); - static void* JIT_STUB cti_op_switch_char(STUB_ARGS); - static void* JIT_STUB cti_op_switch_string(STUB_ARGS); - static JSValueEncodedAsPointer* JIT_STUB cti_op_del_by_val(STUB_ARGS); - static void JIT_STUB cti_op_put_getter(STUB_ARGS); - static void JIT_STUB cti_op_put_setter(STUB_ARGS); - static JSObject* JIT_STUB cti_op_new_error(STUB_ARGS); - static void JIT_STUB cti_op_debug(STUB_ARGS); - - static JSValueEncodedAsPointer* JIT_STUB cti_vm_throw(STUB_ARGS); - static void* JIT_STUB cti_vm_dontLazyLinkCall(STUB_ARGS); - static void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS); - static JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS); - -#endif // ENABLE(JIT) - - // Default number of ticks before a timeout check should be done. - static const int initialTickCountThreshold = 1024; - - bool isJSArray(JSValuePtr v) { return v.isCell() && v.asCell()->vptr() == m_jsArrayVptr; } - bool isJSString(JSValuePtr v) { return v.isCell() && v.asCell()->vptr() == m_jsStringVptr; } - bool isJSByteArray(JSValuePtr v) { return v.isCell() && v.asCell()->vptr() == m_jsByteArrayVptr; } - private: enum ExecutionFlag { Normal, InitializeAndReturn }; @@ -336,9 +138,6 @@ namespace JSC { void dumpCallFrame(CallFrame*); void dumpRegisters(CallFrame*); - bool checkTimeout(JSGlobalObject*); - void resetTimeoutCheck(); - void tryCacheGetByID(CallFrame*, CodeBlock*, Instruction*, JSValuePtr baseValue, const Identifier& propertyName, const PropertySlot&); void uncacheGetByID(CodeBlock*, Instruction* vPC); void tryCachePutByID(CallFrame*, CodeBlock*, Instruction*, JSValuePtr baseValue, const PutPropertySlot&); @@ -346,38 +145,12 @@ namespace JSC { bool isCallBytecode(Opcode opcode) { return opcode == getOpcode(op_call) || opcode == getOpcode(op_construct) || opcode == getOpcode(op_call_eval); } -#if ENABLE(JIT) - static void throwStackOverflowPreviousFrame(CallFrame**, JSGlobalData*, void*& returnAddress); - - void tryCTICacheGetByID(CallFrame*, CodeBlock*, void* returnAddress, JSValuePtr baseValue, const Identifier& propertyName, const PropertySlot&); - void tryCTICachePutByID(CallFrame*, CodeBlock*, void* returnAddress, JSValuePtr baseValue, const PutPropertySlot&); -#endif - SamplingTool* m_sampler; -#if ENABLE(JIT) - RefPtr<ExecutablePool> m_executablePool; - void* m_ctiArrayLengthTrampoline; - void* m_ctiStringLengthTrampoline; - void* m_ctiVirtualCallPreLink; - void* m_ctiVirtualCallLink; - void* m_ctiVirtualCall; -#endif - int m_reentryDepth; - unsigned m_timeoutTime; - unsigned m_timeAtLastCheckTimeout; - unsigned m_timeExecuting; - unsigned m_timeoutCheckCount; - unsigned m_ticksUntilNextTimeoutCheck; RegisterFile m_registerFile; - void* m_jsArrayVptr; - void* m_jsByteArrayVptr; - void* m_jsStringVptr; - void* m_jsFunctionVptr; - #if HAVE(COMPUTED_GOTO) Opcode m_opcodeTable[numOpcodeIDs]; // Maps OpcodeID => Opcode for compiling HashMap<Opcode, OpcodeID> m_opcodeIDTable; // Maps Opcode => OpcodeID for decompiling diff --git a/JavaScriptCore/interpreter/Register.h b/JavaScriptCore/interpreter/Register.h index ff36bbc..5277a0f 100644 --- a/JavaScriptCore/interpreter/Register.h +++ b/JavaScriptCore/interpreter/Register.h @@ -60,8 +60,9 @@ namespace JSC { private: friend class ExecState; friend class Interpreter; + friend class JITStubs; - // Only CallFrame and Interpreter should use these functions. + // Only CallFrame, Interpreter, and JITStubs should use these functions. Register(intptr_t); diff --git a/JavaScriptCore/interpreter/RegisterFile.h b/JavaScriptCore/interpreter/RegisterFile.h index ec190d6..c320f04 100644 --- a/JavaScriptCore/interpreter/RegisterFile.h +++ b/JavaScriptCore/interpreter/RegisterFile.h @@ -29,6 +29,7 @@ #ifndef RegisterFile_h #define RegisterFile_h +#include "ExecutableAllocator.h" #include "Register.h" #include "Collector.h" #include <wtf/Noncopyable.h> @@ -111,48 +112,9 @@ namespace JSC { static const size_t defaultCapacity = 524288; static const size_t defaultMaxGlobals = 8192; - static const size_t allocationSize = 1 << 14; - static const size_t allocationSizeMask = allocationSize - 1; - - RegisterFile(size_t capacity = defaultCapacity, size_t maxGlobals = defaultMaxGlobals) - : m_numGlobals(0) - , m_maxGlobals(maxGlobals) - , m_start(0) - , m_end(0) - , m_max(0) - , m_buffer(0) - , m_globalObject(0) - { - size_t bufferLength = (capacity + maxGlobals) * sizeof(Register); -#if HAVE(MMAP) - m_buffer = static_cast<Register*>(mmap(0, bufferLength, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0)); - if (m_buffer == MAP_FAILED) { - fprintf(stderr, "Could not allocate register file: %d\n", errno); - CRASH(); - } -#elif HAVE(VIRTUALALLOC) - // Ensure bufferLength is a multiple of allocation size - bufferLength = (bufferLength + allocationSizeMask) & ~allocationSizeMask; - m_buffer = static_cast<Register*>(VirtualAlloc(0, bufferLength, MEM_RESERVE, PAGE_READWRITE)); - if (!m_buffer) { - fprintf(stderr, "Could not allocate register file: %d\n", errno); - CRASH(); - } - int initialAllocation = (maxGlobals * sizeof(Register) + allocationSizeMask) & ~allocationSizeMask; - void* commitCheck = VirtualAlloc(m_buffer, initialAllocation, MEM_COMMIT, PAGE_READWRITE); - if (commitCheck != m_buffer) { - fprintf(stderr, "Could not allocate register file: %d\n", errno); - CRASH(); - } - m_maxCommitted = reinterpret_cast<Register*>(reinterpret_cast<char*>(m_buffer) + initialAllocation); -#else - #error "Don't know how to reserve virtual memory on this platform." -#endif - m_start = m_buffer + maxGlobals; - m_end = m_start; - m_max = m_start + capacity; - } + static const size_t commitSize = 1 << 14; + RegisterFile(size_t capacity = defaultCapacity, size_t maxGlobals = defaultMaxGlobals); ~RegisterFile(); Register* start() const { return m_start; } @@ -162,31 +124,8 @@ namespace JSC { void setGlobalObject(JSGlobalObject* globalObject) { m_globalObject = globalObject; } JSGlobalObject* globalObject() { return m_globalObject; } - void shrink(Register* newEnd) - { - if (newEnd < m_end) - m_end = newEnd; - } - - bool grow(Register* newEnd) - { - if (newEnd > m_end) { - if (newEnd > m_max) - return false; -#if !HAVE(MMAP) && HAVE(VIRTUALALLOC) - if (newEnd > m_maxCommitted) { - ptrdiff_t additionalAllocation = ((reinterpret_cast<char*>(newEnd) - reinterpret_cast<char*>(m_maxCommitted)) + allocationSizeMask) & ~allocationSizeMask; - if (!VirtualAlloc(m_maxCommitted, additionalAllocation, MEM_COMMIT, PAGE_READWRITE)) { - fprintf(stderr, "Could not allocate register file: %d\n", errno); - CRASH(); - } - m_maxCommitted = reinterpret_cast<Register*>(reinterpret_cast<char*>(m_maxCommitted) + additionalAllocation); - } -#endif - m_end = newEnd; - } - return true; - } + bool grow(Register* newEnd); + void shrink(Register* newEnd); void setNumGlobals(size_t numGlobals) { m_numGlobals = numGlobals; } int numGlobals() const { return m_numGlobals; } @@ -205,12 +144,78 @@ namespace JSC { Register* m_max; Register* m_buffer; #if HAVE(VIRTUALALLOC) - Register* m_maxCommitted; + Register* m_commitEnd; #endif JSGlobalObject* m_globalObject; // The global object whose vars are currently stored in the register file. }; + inline RegisterFile::RegisterFile(size_t capacity, size_t maxGlobals) + : m_numGlobals(0) + , m_maxGlobals(maxGlobals) + , m_start(0) + , m_end(0) + , m_max(0) + , m_buffer(0) + , m_globalObject(0) + { + size_t bufferLength = (capacity + maxGlobals) * sizeof(Register); + #if HAVE(MMAP) + m_buffer = static_cast<Register*>(mmap(0, bufferLength, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0)); + if (m_buffer == MAP_FAILED) { + fprintf(stderr, "Could not allocate register file: %d\n", errno); + CRASH(); + } + #elif HAVE(VIRTUALALLOC) + m_buffer = static_cast<Register*>(VirtualAlloc(0, roundUpAllocationSize(bufferLength, commitSize), MEM_RESERVE, PAGE_READWRITE)); + if (!m_buffer) { + fprintf(stderr, "Could not allocate register file: %d\n", errno); + CRASH(); + } + size_t committedSize = roundUpAllocationSize(maxGlobals * sizeof(Register), commitSize); + void* commitCheck = VirtualAlloc(m_buffer, committedSize, MEM_COMMIT, PAGE_READWRITE); + if (commitCheck != m_buffer) { + fprintf(stderr, "Could not allocate register file: %d\n", errno); + CRASH(); + } + m_commitEnd = reinterpret_cast<Register*>(reinterpret_cast<char*>(m_buffer) + committedSize); + #else + #error "Don't know how to reserve virtual memory on this platform." + #endif + m_start = m_buffer + maxGlobals; + m_end = m_start; + m_max = m_start + capacity; + } + + inline void RegisterFile::shrink(Register* newEnd) + { + if (newEnd < m_end) + m_end = newEnd; + } + + inline bool RegisterFile::grow(Register* newEnd) + { + if (newEnd < m_end) + return true; + + if (newEnd > m_max) + return false; + +#if !HAVE(MMAP) && HAVE(VIRTUALALLOC) + if (newEnd > m_commitEnd) { + size_t size = roundUpAllocationSize(reinterpret_cast<char*>(newEnd) - reinterpret_cast<char*>(m_commitEnd), commitSize); + if (!VirtualAlloc(m_commitEnd, size, MEM_COMMIT, PAGE_READWRITE)) { + fprintf(stderr, "Could not allocate register file: %d\n", errno); + CRASH(); + } + m_commitEnd = reinterpret_cast<Register*>(reinterpret_cast<char*>(m_commitEnd) + size); + } +#endif + + m_end = newEnd; + return true; + } + } // namespace JSC #endif // RegisterFile_h diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h index 1541256..0cb78ad 100644 --- a/JavaScriptCore/jit/ExecutableAllocator.h +++ b/JavaScriptCore/jit/ExecutableAllocator.h @@ -40,6 +40,18 @@ namespace JSC { +inline size_t roundUpAllocationSize(size_t request, size_t granularity) +{ + if ((std::numeric_limits<size_t>::max() - granularity) <= request) + CRASH(); // Allocation is too large + + // Round up to next page boundary + size_t size = request + (granularity - 1); + size = size & ~(granularity - 1); + ASSERT(size >= request); + return size; +} + class ExecutablePool : public RefCounted<ExecutablePool> { private: struct Allocation { @@ -86,18 +98,6 @@ private: static Allocation systemAlloc(size_t n); static void systemRelease(const Allocation& alloc); - inline size_t roundUpAllocationSize(size_t request, size_t granularity) - { - if ((std::numeric_limits<size_t>::max() - granularity) <= request) - CRASH(); // Allocation is too large - - // Round up to next page boundary - size_t size = request + (granularity - 1); - size = size & ~(granularity - 1); - ASSERT(size >= request); - return size; - } - ExecutablePool(size_t n); void* poolAllocate(size_t n); diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp index 5640c8a..e6113fc 100644 --- a/JavaScriptCore/jit/JIT.cpp +++ b/JavaScriptCore/jit/JIT.cpp @@ -79,14 +79,14 @@ asm( ".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n" SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n" #if USE(JIT_STUB_ARGUMENT_VA_LIST) - "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPvz) "\n" + "call " SYMBOL_STRING(_ZN3JSC8JITStubs12cti_vm_throwEPvz) "\n" #else #if USE(JIT_STUB_ARGUMENT_REGISTER) "movl %esp, %ecx" "\n" #else // JIT_STUB_ARGUMENT_STACK "movl %esp, 0(%esp)" "\n" #endif - "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv) "\n" + "call " SYMBOL_STRING(_ZN3JSC8JITStubs12cti_vm_throwEPPv) "\n" #endif "addl $0x1c, %esp" "\n" "popl %ebx" "\n" @@ -138,7 +138,7 @@ asm( SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n" #if USE(JIT_STUB_ARGUMENT_REGISTER) "movq %rsp, %rdi" "\n" - "call " SYMBOL_STRING(_ZN3JSC11Interpreter12cti_vm_throwEPPv) "\n" + "call " SYMBOL_STRING(_ZN3JSC8JITStubs12cti_vm_throwEPPv) "\n" #else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK #error "JIT_STUB_ARGUMENT configuration not supported." #endif @@ -186,7 +186,7 @@ extern "C" { #else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK #error "JIT_STUB_ARGUMENT configuration not supported." #endif - call JSC::Interpreter::cti_vm_throw; + call JSC::JITStubs::cti_vm_throw; add esp, 0x1c; pop ebx; pop edi; @@ -200,14 +200,19 @@ extern "C" { #endif -void ctiSetReturnAddress(void** where, void* what) +void ctiSetReturnAddress(void** addressOfReturnAddress, void* newDestinationToReturnTo) { - *where = what; + *addressOfReturnAddress = newDestinationToReturnTo; } -void ctiPatchCallByReturnAddress(void* where, void* what) +void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction) { - MacroAssembler::Jump::patch(where, what); + returnAddress.relinkCallerToFunction(newCalleeFunction); +} + +void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction) +{ + returnAddress.relinkNearCallerToFunction(newCalleeFunction); } JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) @@ -228,62 +233,28 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy unsigned src1 = currentInstruction[2].u.operand; unsigned src2 = currentInstruction[3].u.operand; - emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx); + emitGetVirtualRegisters(src1, regT0, src2, regT1); -#if USE(ALTERNATE_JSIMMEDIATE) // Jump to a slow case if either operand is a number, or if both are JSCell*s. - move(X86::eax, X86::ecx); - orPtr(X86::edx, X86::ecx); - addSlowCase(emitJumpIfJSCell(X86::ecx)); - addSlowCase(emitJumpIfImmediateNumber(X86::ecx)); + move(regT0, regT2); + orPtr(regT1, regT2); + addSlowCase(emitJumpIfJSCell(regT2)); + addSlowCase(emitJumpIfImmediateNumber(regT2)); if (type == OpStrictEq) - sete32(X86::edx, X86::eax); + set32(Equal, regT1, regT0, regT0); else - setne32(X86::edx, X86::eax); - emitTagAsBoolImmediate(X86::eax); -#else - bool negated = (type == OpNStrictEq); - - // Check that both are immediates, if so check if they're equal - Jump firstNotImmediate = emitJumpIfJSCell(X86::eax); - Jump secondNotImmediate = emitJumpIfJSCell(X86::edx); - Jump bothWereImmediatesButNotEqual = jnePtr(X86::edx, X86::eax); - - // They are equal - set the result to true. (Or false, if negated). - move(ImmPtr(JSValuePtr::encode(jsBoolean(!negated))), X86::eax); - Jump bothWereImmediatesAndEqual = jump(); - - // eax was not an immediate, we haven't yet checked edx. - // If edx is also a JSCell, or is 0, then jump to a slow case, - // otherwise these values are not equal. - firstNotImmediate.link(this); - emitJumpSlowCaseIfJSCell(X86::edx); - addSlowCase(jePtr(X86::edx, ImmPtr(JSValuePtr::encode(js0())))); - Jump firstWasNotImmediate = jump(); - - // eax was an immediate, but edx wasn't. - // If eax is 0 jump to a slow case, otherwise these values are not equal. - secondNotImmediate.link(this); - addSlowCase(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(js0())))); - - // We get here if the two values are different immediates, or one is 0 and the other is a JSCell. - // Vaelues are not equal, set the result to false. - bothWereImmediatesButNotEqual.link(this); - firstWasNotImmediate.link(this); - move(ImmPtr(JSValuePtr::encode(jsBoolean(negated))), X86::eax); - - bothWereImmediatesAndEqual.link(this); -#endif + set32(NotEqual, regT1, regT0, regT0); + emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(dst); } -void JIT::emitSlowScriptCheck() +void JIT::emitTimeoutCheck() { - Jump skipTimeout = jnzSub32(Imm32(1), timeoutCheckRegister); - emitCTICall(Interpreter::cti_timeout_check); - move(X86::eax, timeoutCheckRegister); + Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister); + emitCTICall(JITStubs::cti_timeout_check); + move(regT0, timeoutCheckRegister); skipTimeout.link(this); killLastResultRegister(); @@ -296,21 +267,24 @@ void JIT::emitSlowScriptCheck() #define CTI_COMPILE_BINARY_OP(name) \ case name: { \ - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); \ - emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx); \ - emitCTICall(Interpreter::cti_##name); \ + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); \ + emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2); \ + emitCTICall(JITStubs::cti_##name); \ emitPutVirtualRegister(currentInstruction[1].u.operand); \ NEXT_OPCODE(name); \ } #define CTI_COMPILE_UNARY_OP(name) \ case name: { \ - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); \ - emitCTICall(Interpreter::cti_##name); \ + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); \ + emitCTICall(JITStubs::cti_##name); \ emitPutVirtualRegister(currentInstruction[1].u.operand); \ NEXT_OPCODE(name); \ } +#define RECORD_JUMP_TARGET(targetOffset) \ + do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false) + void JIT::privateCompileMainPass() { Instruction* instructionsBegin = m_codeBlock->instructions().begin(); @@ -328,13 +302,31 @@ void JIT::privateCompileMainPass() sampleInstruction(currentInstruction); #endif + if (m_labels[m_bytecodeIndex].isUsed()) + killLastResultRegister(); + m_labels[m_bytecodeIndex] = label(); OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode); switch (opcodeID) { case op_mov: { - emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax); - emitPutVirtualRegister(currentInstruction[1].u.operand); + int src = currentInstruction[2].u.operand; + int dst = currentInstruction[1].u.operand; + + if (m_codeBlock->isConstantRegisterIndex(src)) { + storePtr(ImmPtr(JSValuePtr::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register))); + if (dst == m_lastResultBytecodeRegister) + killLastResultRegister(); + } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) { + // If either the src or dst is the cached register go though + // get/put registers to make sure we track this correctly. + emitGetVirtualRegister(src, regT0); + emitPutVirtualRegister(dst); + } else { + // Perform the copy via regT1; do not disturb any mapping in regT0. + loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1); + storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register))); + } NEXT_OPCODE(op_mov); } case op_add: { @@ -343,8 +335,9 @@ void JIT::privateCompileMainPass() } case op_end: { if (m_codeBlock->needsFullScopeChain()) - emitCTICall(Interpreter::cti_op_end); - emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax); + emitCTICall(JITStubs::cti_op_end); + ASSERT(returnValueRegister != callFrameRegister); + emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); push(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register)))); ret(); NEXT_OPCODE(op_end); @@ -352,6 +345,7 @@ void JIT::privateCompileMainPass() case op_jmp: { unsigned target = currentInstruction[1].u.operand; addJump(jump(), target + 1); + RECORD_JUMP_TARGET(target + 1); NEXT_OPCODE(op_jmp); } case op_pre_inc: { @@ -359,60 +353,60 @@ void JIT::privateCompileMainPass() NEXT_OPCODE(op_pre_inc); } case op_loop: { - emitSlowScriptCheck(); + emitTimeoutCheck(); unsigned target = currentInstruction[1].u.operand; addJump(jump(), target + 1); NEXT_OPCODE(op_end); } case op_loop_if_less: { - emitSlowScriptCheck(); + emitTimeoutCheck(); unsigned op1 = currentInstruction[1].u.operand; unsigned op2 = currentInstruction[2].u.operand; unsigned target = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { - emitGetVirtualRegister(op1, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(op1, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) int32_t op2imm = getConstantOperandImmediateInt(op2); #else int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); #endif - addJump(jl32(X86::eax, Imm32(op2imm)), target + 3); + addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3); } else { - emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::edx); - addJump(jl32(X86::eax, X86::edx), target + 3); + emitGetVirtualRegisters(op1, regT0, op2, regT1); + emitJumpSlowCaseIfNotImmediateInteger(regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT1); + addJump(branch32(LessThan, regT0, regT1), target + 3); } NEXT_OPCODE(op_loop_if_less); } case op_loop_if_lesseq: { - emitSlowScriptCheck(); + emitTimeoutCheck(); unsigned op1 = currentInstruction[1].u.operand; unsigned op2 = currentInstruction[2].u.operand; unsigned target = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { - emitGetVirtualRegister(op1, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(op1, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) int32_t op2imm = getConstantOperandImmediateInt(op2); #else int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); #endif - addJump(jle32(X86::eax, Imm32(op2imm)), target + 3); + addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3); } else { - emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::edx); - addJump(jle32(X86::eax, X86::edx), target + 3); + emitGetVirtualRegisters(op1, regT0, op2, regT1); + emitJumpSlowCaseIfNotImmediateInteger(regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT1); + addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3); } NEXT_OPCODE(op_loop_if_less); } case op_new_object: { - emitCTICall(Interpreter::cti_op_new_object); + emitCTICall(JITStubs::cti_op_new_object); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_new_object); } @@ -425,49 +419,49 @@ void JIT::privateCompileMainPass() NEXT_OPCODE(op_get_by_id); } case op_instanceof: { - emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax); // value - emitGetVirtualRegister(currentInstruction[3].u.operand, X86::ecx); // baseVal - emitGetVirtualRegister(currentInstruction[4].u.operand, X86::edx); // proto + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); // value + emitGetVirtualRegister(currentInstruction[3].u.operand, regT2); // baseVal + emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // proto // check if any are immediates - move(X86::eax, X86::ebx); - orPtr(X86::ecx, X86::ebx); - orPtr(X86::edx, X86::ebx); - emitJumpSlowCaseIfNotJSCell(X86::ebx); + move(regT0, regT3); + orPtr(regT2, regT3); + orPtr(regT1, regT3); + emitJumpSlowCaseIfNotJSCell(regT3); // check that all are object type - this is a bit of a bithack to avoid excess branching; // we check that the sum of the three type codes from Structures is exactly 3 * ObjectType, // this works because NumberType and StringType are smaller - move(Imm32(3 * ObjectType), X86::ebx); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::eax); - loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - loadPtr(Address(X86::edx, FIELD_OFFSET(JSCell, m_structure)), X86::edx); - sub32(Address(X86::eax, FIELD_OFFSET(Structure, m_typeInfo.m_type)), X86::ebx); - sub32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_type)), X86::ebx); - addSlowCase(jne32(Address(X86::edx, FIELD_OFFSET(Structure, m_typeInfo.m_type)), X86::ebx)); + move(Imm32(3 * ObjectType), regT3); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT0); + loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2); + loadPtr(Address(regT1, FIELD_OFFSET(JSCell, m_structure)), regT1); + sub32(Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3); + sub32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3); + addSlowCase(branch32(NotEqual, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3)); // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance - load32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), X86::ecx); - and32(Imm32(ImplementsHasInstance | OverridesHasInstance), X86::ecx); - addSlowCase(jne32(X86::ecx, Imm32(ImplementsHasInstance))); + load32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), regT2); + and32(Imm32(ImplementsHasInstance | OverridesHasInstance), regT2); + addSlowCase(branch32(NotEqual, regT2, Imm32(ImplementsHasInstance))); - emitGetVirtualRegister(currentInstruction[2].u.operand, X86::ecx); // reload value - emitGetVirtualRegister(currentInstruction[4].u.operand, X86::edx); // reload proto + emitGetVirtualRegister(currentInstruction[2].u.operand, regT2); // reload value + emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // reload proto // optimistically load true result - move(ImmPtr(JSValuePtr::encode(jsBoolean(true))), X86::eax); + move(ImmPtr(JSValuePtr::encode(jsBoolean(true))), regT0); Label loop(this); // load value's prototype - loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx); + loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2); + loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2); - Jump exit = jePtr(X86::ecx, X86::edx); + Jump exit = branchPtr(Equal, regT2, regT1); - jnePtr(X86::ecx, ImmPtr(JSValuePtr::encode(jsNull())), loop); + branchPtr(NotEqual, regT2, ImmPtr(JSValuePtr::encode(jsNull())), loop); - move(ImmPtr(JSValuePtr::encode(jsBoolean(false))), X86::eax); + move(ImmPtr(JSValuePtr::encode(jsBoolean(false))), regT0); exit.link(this); @@ -476,10 +470,10 @@ void JIT::privateCompileMainPass() NEXT_OPCODE(op_instanceof); } case op_del_by_id: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); emitPutJITStubArgConstant(ident, 2); - emitCTICall(Interpreter::cti_op_del_by_id); + emitCTICall(JITStubs::cti_op_del_by_id); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_del_by_id); } @@ -490,7 +484,7 @@ void JIT::privateCompileMainPass() case op_new_func: { FuncDeclNode* func = m_codeBlock->function(currentInstruction[2].u.operand); emitPutJITStubArgConstant(func, 1); - emitCTICall(Interpreter::cti_op_new_func); + emitCTICall(JITStubs::cti_op_new_func); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_new_func); } @@ -508,67 +502,71 @@ void JIT::privateCompileMainPass() } case op_get_global_var: { JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell); - move(ImmPtr(globalObject), X86::eax); - emitGetVariableObjectRegister(X86::eax, currentInstruction[3].u.operand, X86::eax); + move(ImmPtr(globalObject), regT0); + emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_get_global_var); } case op_put_global_var: { - emitGetVirtualRegister(currentInstruction[3].u.operand, X86::edx); + emitGetVirtualRegister(currentInstruction[3].u.operand, regT1); JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell); - move(ImmPtr(globalObject), X86::eax); - emitPutVariableObjectRegister(X86::edx, X86::eax, currentInstruction[2].u.operand); + move(ImmPtr(globalObject), regT0); + emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand); NEXT_OPCODE(op_put_global_var); } case op_get_scoped_var: { int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain(); - emitGetFromCallFrameHeader(RegisterFile::ScopeChain, X86::eax); + emitGetFromCallFrameHeader(RegisterFile::ScopeChain, regT0); while (skip--) - loadPtr(Address(X86::eax, FIELD_OFFSET(ScopeChainNode, next)), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, next)), regT0); - loadPtr(Address(X86::eax, FIELD_OFFSET(ScopeChainNode, object)), X86::eax); - emitGetVariableObjectRegister(X86::eax, currentInstruction[2].u.operand, X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, object)), regT0); + emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_get_scoped_var); } case op_put_scoped_var: { int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain(); - emitGetFromCallFrameHeader(RegisterFile::ScopeChain, X86::edx); - emitGetVirtualRegister(currentInstruction[3].u.operand, X86::eax); + emitGetFromCallFrameHeader(RegisterFile::ScopeChain, regT1); + emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); while (skip--) - loadPtr(Address(X86::edx, FIELD_OFFSET(ScopeChainNode, next)), X86::edx); + loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, next)), regT1); - loadPtr(Address(X86::edx, FIELD_OFFSET(ScopeChainNode, object)), X86::edx); - emitPutVariableObjectRegister(X86::eax, X86::edx, currentInstruction[1].u.operand); + loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, object)), regT1); + emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand); NEXT_OPCODE(op_put_scoped_var); } case op_tear_off_activation: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx); - emitCTICall(Interpreter::cti_op_tear_off_activation); + emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2); + emitCTICall(JITStubs::cti_op_tear_off_activation); NEXT_OPCODE(op_tear_off_activation); } case op_tear_off_arguments: { - emitCTICall(Interpreter::cti_op_tear_off_arguments); + emitCTICall(JITStubs::cti_op_tear_off_arguments); NEXT_OPCODE(op_tear_off_arguments); } case op_ret: { // We could JIT generate the deref, only calling out to C when the refcount hits zero. if (m_codeBlock->needsFullScopeChain()) - emitCTICall(Interpreter::cti_op_ret_scopeChain); + emitCTICall(JITStubs::cti_op_ret_scopeChain); + + ASSERT(callFrameRegister != regT1); + ASSERT(regT1 != returnValueRegister); + ASSERT(returnValueRegister != callFrameRegister); // Return the result in %eax. - emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax); + emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister); // Grab the return address. - emitGetFromCallFrameHeader(RegisterFile::ReturnPC, X86::edx); + emitGetFromCallFrameHeader(RegisterFile::ReturnPC, regT1); // Restore our caller's "r". emitGetFromCallFrameHeader(RegisterFile::CallerFrame, callFrameRegister); // Return. - push(X86::edx); + push(regT1); ret(); NEXT_OPCODE(op_ret); @@ -576,29 +574,29 @@ void JIT::privateCompileMainPass() case op_new_array: { emitPutJITStubArgConstant(currentInstruction[2].u.operand, 1); emitPutJITStubArgConstant(currentInstruction[3].u.operand, 2); - emitCTICall(Interpreter::cti_op_new_array); + emitCTICall(JITStubs::cti_op_new_array); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_new_array); } case op_resolve: { Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); emitPutJITStubArgConstant(ident, 1); - emitCTICall(Interpreter::cti_op_resolve); + emitCTICall(JITStubs::cti_op_resolve); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_resolve); } case op_construct_verify: { - emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - emitJumpSlowCaseIfNotJSCell(X86::eax); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - addSlowCase(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); + emitJumpSlowCaseIfNotJSCell(regT0); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); NEXT_OPCODE(op_construct_verify); } case op_get_by_val: { - emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx); - emitJumpSlowCaseIfNotImmediateInteger(X86::edx); + emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateInteger(regT1); #if USE(ALTERNATE_JSIMMEDIATE) // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if @@ -606,27 +604,27 @@ void JIT::privateCompileMainPass() // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign // extending since it makes it easier to re-tag the value in the slow case. - zeroExtend32ToPtr(X86::edx, X86::edx); + zeroExtend32ToPtr(regT1, regT1); #else - emitFastArithImmToInt(X86::edx); + emitFastArithImmToInt(regT1); #endif - emitJumpSlowCaseIfNotJSCell(X86::eax); - addSlowCase(jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr))); + emitJumpSlowCaseIfNotJSCell(regT0); + addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff - loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx); - addSlowCase(jae32(X86::edx, Address(X86::eax, FIELD_OFFSET(JSArray, m_fastAccessCutoff)))); + loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); + addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)))); // Get the value from the vector - loadPtr(BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), X86::eax); + loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_get_by_val); } case op_resolve_func: { Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); emitPutJITStubArgConstant(ident, 1); - emitCTICall(Interpreter::cti_op_resolve_func); - emitPutVirtualRegister(currentInstruction[2].u.operand, X86::edx); + emitCTICall(JITStubs::cti_op_resolve_func); + emitPutVirtualRegister(currentInstruction[2].u.operand, regT1); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_resolve_func); } @@ -635,45 +633,45 @@ void JIT::privateCompileMainPass() NEXT_OPCODE(op_sub); } case op_put_by_val: { - emitGetVirtualRegisters(currentInstruction[1].u.operand, X86::eax, currentInstruction[2].u.operand, X86::edx); - emitJumpSlowCaseIfNotImmediateInteger(X86::edx); + emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateInteger(regT1); #if USE(ALTERNATE_JSIMMEDIATE) // See comment in op_get_by_val. - zeroExtend32ToPtr(X86::edx, X86::edx); + zeroExtend32ToPtr(regT1, regT1); #else - emitFastArithImmToInt(X86::edx); + emitFastArithImmToInt(regT1); #endif - emitJumpSlowCaseIfNotJSCell(X86::eax); - addSlowCase(jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr))); + emitJumpSlowCaseIfNotJSCell(regT0); + addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff - loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx); - Jump inFastVector = jb32(X86::edx, Address(X86::eax, FIELD_OFFSET(JSArray, m_fastAccessCutoff))); + loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); + Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))); // No; oh well, check if the access if within the vector - if so, we may still be okay. - addSlowCase(jae32(X86::edx, Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_vectorLength)))); + addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)))); // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location. // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff. - addSlowCase(jzPtr(BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])))); + addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])))); // All good - put the value into the array. inFastVector.link(this); - emitGetVirtualRegister(currentInstruction[3].u.operand, X86::eax); - storePtr(X86::eax, BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))); + emitGetVirtualRegister(currentInstruction[3].u.operand, regT0); + storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))); NEXT_OPCODE(op_put_by_val); } CTI_COMPILE_BINARY_OP(op_lesseq) case op_loop_if_true: { - emitSlowScriptCheck(); + emitTimeoutCheck(); unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - Jump isZero = jePtr(X86::eax, ImmPtr(JSValuePtr::encode(js0()))); - addJump(emitJumpIfImmediateInteger(X86::eax), target + 2); + Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(js0()))); + addJump(emitJumpIfImmediateInteger(regT0), target + 2); - addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2); - addSlowCase(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(false))))); + addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2); + addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(false))))); isZero.link(this); NEXT_OPCODE(op_loop_if_true); @@ -681,13 +679,13 @@ void JIT::privateCompileMainPass() case op_resolve_base: { Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); emitPutJITStubArgConstant(ident, 1); - emitCTICall(Interpreter::cti_op_resolve_base); + emitCTICall(JITStubs::cti_op_resolve_base); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_resolve_base); } case op_negate: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); - emitCTICall(Interpreter::cti_op_negate); + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); + emitCTICall(JITStubs::cti_op_negate); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_negate); } @@ -695,7 +693,7 @@ void JIT::privateCompileMainPass() Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); emitPutJITStubArgConstant(ident, 1); emitPutJITStubArgConstant(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain(), 2); - emitCTICall(Interpreter::cti_op_resolve_skip); + emitCTICall(JITStubs::cti_op_resolve_skip); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_resolve_skip); } @@ -709,14 +707,14 @@ void JIT::privateCompileMainPass() void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset); // Check Structure of global object - move(ImmPtr(globalObject), X86::eax); - loadPtr(structureAddress, X86::edx); - Jump noMatch = jnePtr(X86::edx, Address(X86::eax, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match + move(ImmPtr(globalObject), regT0); + loadPtr(structureAddress, regT1); + Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match // Load cached property - loadPtr(Address(X86::eax, FIELD_OFFSET(JSGlobalObject, m_propertyStorage)), X86::eax); - load32(offsetAddr, X86::edx); - loadPtr(BaseIndex(X86::eax, X86::edx, ScalePtr), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(JSGlobalObject, m_propertyStorage)), regT0); + load32(offsetAddr, regT1); + loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); Jump end = jump(); @@ -725,7 +723,7 @@ void JIT::privateCompileMainPass() emitPutJITStubArgConstant(globalObject, 1); emitPutJITStubArgConstant(ident, 2); emitPutJITStubArgConstant(currentIndex, 3); - emitCTICall(Interpreter::cti_op_resolve_global); + emitCTICall(JITStubs::cti_op_resolve_global); emitPutVirtualRegister(currentInstruction[1].u.operand); end.link(this); NEXT_OPCODE(op_resolve_global); @@ -740,81 +738,85 @@ void JIT::privateCompileMainPass() unsigned op2 = currentInstruction[2].u.operand; unsigned target = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { - emitGetVirtualRegister(op1, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(op1, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) int32_t op2imm = getConstantOperandImmediateInt(op2); #else int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); #endif - addJump(jge32(X86::eax, Imm32(op2imm)), target + 3); + addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3); } else { - emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::edx); - addJump(jge32(X86::eax, X86::edx), target + 3); + emitGetVirtualRegisters(op1, regT0, op2, regT1); + emitJumpSlowCaseIfNotImmediateInteger(regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT1); + addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3); } + RECORD_JUMP_TARGET(target + 3); NEXT_OPCODE(op_jnless); } case op_not: { - emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax); - xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), X86::eax); - addSlowCase(jnzPtr(X86::eax, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue)))); - xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), X86::eax); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0); + addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue)))); + xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_not); } case op_jfalse: { unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(js0()))), target + 2); - Jump isNonZero = emitJumpIfImmediateInteger(X86::eax); + addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(js0()))), target + 2); + Jump isNonZero = emitJumpIfImmediateInteger(regT0); - addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(false)))), target + 2); - addSlowCase(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(true))))); + addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(false)))), target + 2); + addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(true))))); isNonZero.link(this); + RECORD_JUMP_TARGET(target + 2); NEXT_OPCODE(op_jfalse); }; case op_jeq_null: { unsigned src = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(src, X86::eax); - Jump isImmediate = emitJumpIfNotJSCell(X86::eax); + emitGetVirtualRegister(src, regT0); + Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - addJump(jnz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + addJump(branchTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); Jump wasNotImmediate = jump(); // Now handle the immediate cases - undefined & null isImmediate.link(this); - andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax); - addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2); + andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); + addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2); wasNotImmediate.link(this); + RECORD_JUMP_TARGET(target + 2); NEXT_OPCODE(op_jeq_null); }; case op_jneq_null: { unsigned src = currentInstruction[1].u.operand; unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(src, X86::eax); - Jump isImmediate = emitJumpIfNotJSCell(X86::eax); + emitGetVirtualRegister(src, regT0); + Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - addJump(jz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + addJump(branchTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2); Jump wasNotImmediate = jump(); // Now handle the immediate cases - undefined & null isImmediate.link(this); - andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax); - addJump(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2); + andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); + addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2); wasNotImmediate.link(this); + RECORD_JUMP_TARGET(target + 2); NEXT_OPCODE(op_jneq_null); } case op_post_inc: { @@ -823,7 +825,7 @@ void JIT::privateCompileMainPass() } case op_unexpected_load: { JSValuePtr v = m_codeBlock->unexpectedConstant(currentInstruction[2].u.operand); - move(ImmPtr(JSValuePtr::encode(v)), X86::eax); + move(ImmPtr(JSValuePtr::encode(v)), regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_unexpected_load); } @@ -833,17 +835,20 @@ void JIT::privateCompileMainPass() DataLabelPtr storeLocation = storePtrWithPatch(Address(callFrameRegister, sizeof(Register) * retAddrDst)); addJump(jump(), target + 2); m_jsrSites.append(JSRInfo(storeLocation, label())); + killLastResultRegister(); + RECORD_JUMP_TARGET(target + 2); NEXT_OPCODE(op_jsr); } case op_sret: { jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand)); + killLastResultRegister(); NEXT_OPCODE(op_sret); } case op_eq: { - emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx); - emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx); - sete32(X86::edx, X86::eax); - emitTagAsBoolImmediate(X86::eax); + emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + set32(Equal, regT1, regT0, regT0); + emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_eq); } @@ -860,13 +865,13 @@ void JIT::privateCompileMainPass() NEXT_OPCODE(op_rshift); } case op_bitnot: { - emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) - not32(X86::eax); - emitFastArithIntToImmNoCheck(X86::eax, X86::eax); + not32(regT0); + emitFastArithIntToImmNoCheck(regT0, regT0); #else - xorPtr(Imm32(~JSImmediate::TagTypeNumber), X86::eax); + xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0); #endif emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_bitnot); @@ -874,15 +879,15 @@ void JIT::privateCompileMainPass() case op_resolve_with_base: { Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); emitPutJITStubArgConstant(ident, 1); - emitCTICall(Interpreter::cti_op_resolve_with_base); - emitPutVirtualRegister(currentInstruction[2].u.operand, X86::edx); + emitCTICall(JITStubs::cti_op_resolve_with_base); + emitPutVirtualRegister(currentInstruction[2].u.operand, regT1); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_resolve_with_base); } case op_new_func_exp: { FuncExprNode* func = m_codeBlock->functionExpression(currentInstruction[2].u.operand); emitPutJITStubArgConstant(func, 1); - emitCTICall(Interpreter::cti_op_new_func_exp); + emitCTICall(JITStubs::cti_op_new_func_exp); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_new_func_exp); } @@ -892,23 +897,24 @@ void JIT::privateCompileMainPass() } case op_jtrue: { unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - Jump isZero = jePtr(X86::eax, ImmPtr(JSValuePtr::encode(js0()))); - addJump(emitJumpIfImmediateInteger(X86::eax), target + 2); + Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(js0()))); + addJump(emitJumpIfImmediateInteger(regT0), target + 2); - addJump(jePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2); - addSlowCase(jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsBoolean(false))))); + addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2); + addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(false))))); isZero.link(this); + RECORD_JUMP_TARGET(target + 2); NEXT_OPCODE(op_jtrue); } CTI_COMPILE_BINARY_OP(op_less) case op_neq: { - emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx); - emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx); - setne32(X86::edx, X86::eax); - emitTagAsBoolImmediate(X86::eax); + emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + set32(NotEqual, regT1, regT0, regT0); + emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); @@ -920,30 +926,31 @@ void JIT::privateCompileMainPass() } CTI_COMPILE_BINARY_OP(op_urshift) case op_bitxor: { - emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx); - emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx); - xorPtr(X86::edx, X86::eax); - emitFastArithReTagImmediate(X86::eax, X86::eax); + emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + xorPtr(regT1, regT0); + emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_bitxor); } case op_new_regexp: { RegExp* regExp = m_codeBlock->regexp(currentInstruction[2].u.operand); emitPutJITStubArgConstant(regExp, 1); - emitCTICall(Interpreter::cti_op_new_regexp); + emitCTICall(JITStubs::cti_op_new_regexp); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_new_regexp); } case op_bitor: { - emitGetVirtualRegisters(currentInstruction[2].u.operand, X86::eax, currentInstruction[3].u.operand, X86::edx); - emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx); - orPtr(X86::edx, X86::eax); + emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); + emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2); + orPtr(regT1, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_bitor); } case op_throw: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx); - emitCTICall(Interpreter::cti_op_throw); + emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2); + emitCTICall(JITStubs::cti_op_throw); + ASSERT(regT0 == returnValueRegister); #if PLATFORM(X86_64) addPtr(Imm32(0x48), X86::esp); pop(X86::ebx); @@ -964,29 +971,29 @@ void JIT::privateCompileMainPass() NEXT_OPCODE(op_throw); } case op_get_pnames: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); - emitCTICall(Interpreter::cti_op_get_pnames); + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); + emitCTICall(JITStubs::cti_op_get_pnames); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_get_pnames); } case op_next_pname: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); unsigned target = currentInstruction[3].u.operand; - emitCTICall(Interpreter::cti_op_next_pname); - Jump endOfIter = jzPtr(X86::eax); + emitCTICall(JITStubs::cti_op_next_pname); + Jump endOfIter = branchTestPtr(Zero, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); addJump(jump(), target + 3); endOfIter.link(this); NEXT_OPCODE(op_next_pname); } case op_push_scope: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx); - emitCTICall(Interpreter::cti_op_push_scope); + emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2); + emitCTICall(JITStubs::cti_op_push_scope); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_push_scope); } case op_pop_scope: { - emitCTICall(Interpreter::cti_op_pop_scope); + emitCTICall(JITStubs::cti_op_pop_scope); NEXT_OPCODE(op_pop_scope); } CTI_COMPILE_UNARY_OP(op_typeof) @@ -1006,13 +1013,13 @@ void JIT::privateCompileMainPass() } case op_to_jsnumber: { int srcVReg = currentInstruction[2].u.operand; - emitGetVirtualRegister(srcVReg, X86::eax); + emitGetVirtualRegister(srcVReg, regT0); - Jump wasImmediate = emitJumpIfImmediateInteger(X86::eax); + Jump wasImmediate = emitJumpIfImmediateInteger(regT0); - emitJumpSlowCaseIfNotJSCell(X86::eax, srcVReg); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - addSlowCase(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType))); + emitJumpSlowCaseIfNotJSCell(regT0, srcVReg); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType))); wasImmediate.link(this); @@ -1023,8 +1030,8 @@ void JIT::privateCompileMainPass() case op_push_new_scope: { Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); emitPutJITStubArgConstant(ident, 1); - emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_push_new_scope); + emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2); + emitCTICall(JITStubs::cti_op_push_new_scope); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_push_new_scope); } @@ -1036,16 +1043,17 @@ void JIT::privateCompileMainPass() case op_jmp_scopes: { unsigned count = currentInstruction[1].u.operand; emitPutJITStubArgConstant(count, 1); - emitCTICall(Interpreter::cti_op_jmp_scopes); + emitCTICall(JITStubs::cti_op_jmp_scopes); unsigned target = currentInstruction[2].u.operand; addJump(jump(), target + 2); + RECORD_JUMP_TARGET(target + 2); NEXT_OPCODE(op_jmp_scopes); } case op_put_by_index: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx); + emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2); emitPutJITStubArgConstant(currentInstruction[2].u.operand, 2); - emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, X86::ecx); - emitCTICall(Interpreter::cti_op_put_by_index); + emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, regT2); + emitCTICall(JITStubs::cti_op_put_by_index); NEXT_OPCODE(op_put_by_index); } case op_switch_imm: { @@ -1058,10 +1066,10 @@ void JIT::privateCompileMainPass() m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate)); jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); - emitPutJITStubArgFromVirtualRegister(scrutinee, 1, X86::ecx); + emitPutJITStubArgFromVirtualRegister(scrutinee, 1, regT2); emitPutJITStubArgConstant(tableIndex, 2); - emitCTICall(Interpreter::cti_op_switch_imm); - jump(X86::eax); + emitCTICall(JITStubs::cti_op_switch_imm); + jump(regT0); NEXT_OPCODE(op_switch_imm); } case op_switch_char: { @@ -1074,10 +1082,10 @@ void JIT::privateCompileMainPass() m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character)); jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); - emitPutJITStubArgFromVirtualRegister(scrutinee, 1, X86::ecx); + emitPutJITStubArgFromVirtualRegister(scrutinee, 1, regT2); emitPutJITStubArgConstant(tableIndex, 2); - emitCTICall(Interpreter::cti_op_switch_char); - jump(X86::eax); + emitCTICall(JITStubs::cti_op_switch_char); + jump(regT0); NEXT_OPCODE(op_switch_char); } case op_switch_string: { @@ -1089,33 +1097,33 @@ void JIT::privateCompileMainPass() StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset)); - emitPutJITStubArgFromVirtualRegister(scrutinee, 1, X86::ecx); + emitPutJITStubArgFromVirtualRegister(scrutinee, 1, regT2); emitPutJITStubArgConstant(tableIndex, 2); - emitCTICall(Interpreter::cti_op_switch_string); - jump(X86::eax); + emitCTICall(JITStubs::cti_op_switch_string); + jump(regT0); NEXT_OPCODE(op_switch_string); } case op_del_by_val: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); - emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_del_by_val); + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); + emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2); + emitCTICall(JITStubs::cti_op_del_by_val); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_del_by_val); } case op_put_getter: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx); + emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2); Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); emitPutJITStubArgConstant(ident, 2); - emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, X86::ecx); - emitCTICall(Interpreter::cti_op_put_getter); + emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, regT2); + emitCTICall(JITStubs::cti_op_put_getter); NEXT_OPCODE(op_put_getter); } case op_put_setter: { - emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::ecx); + emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2); Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); emitPutJITStubArgConstant(ident, 2); - emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, X86::ecx); - emitCTICall(Interpreter::cti_op_put_setter); + emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, regT2); + emitCTICall(JITStubs::cti_op_put_setter); NEXT_OPCODE(op_put_setter); } case op_new_error: { @@ -1123,7 +1131,7 @@ void JIT::privateCompileMainPass() emitPutJITStubArgConstant(currentInstruction[2].u.operand, 1); emitPutJITStubArgConstant(JSValuePtr::encode(message), 2); emitPutJITStubArgConstant(m_bytecodeIndex, 3); - emitCTICall(Interpreter::cti_op_new_error); + emitCTICall(JITStubs::cti_op_new_error); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_new_error); } @@ -1131,29 +1139,29 @@ void JIT::privateCompileMainPass() emitPutJITStubArgConstant(currentInstruction[1].u.operand, 1); emitPutJITStubArgConstant(currentInstruction[2].u.operand, 2); emitPutJITStubArgConstant(currentInstruction[3].u.operand, 3); - emitCTICall(Interpreter::cti_op_debug); + emitCTICall(JITStubs::cti_op_debug); NEXT_OPCODE(op_debug); } case op_eq_null: { unsigned dst = currentInstruction[1].u.operand; unsigned src1 = currentInstruction[2].u.operand; - emitGetVirtualRegister(src1, X86::eax); - Jump isImmediate = emitJumpIfNotJSCell(X86::eax); + emitGetVirtualRegister(src1, regT0); + Jump isImmediate = emitJumpIfNotJSCell(regT0); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - setnz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + setTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); Jump wasNotImmediate = jump(); isImmediate.link(this); - andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax); - sete32(Imm32(JSImmediate::FullTagTypeNull), X86::eax); + andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); + setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0); wasNotImmediate.link(this); - emitTagAsBoolImmediate(X86::eax); + emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(dst); NEXT_OPCODE(op_eq_null); @@ -1162,22 +1170,22 @@ void JIT::privateCompileMainPass() unsigned dst = currentInstruction[1].u.operand; unsigned src1 = currentInstruction[2].u.operand; - emitGetVirtualRegister(src1, X86::eax); - Jump isImmediate = emitJumpIfNotJSCell(X86::eax); + emitGetVirtualRegister(src1, regT0); + Jump isImmediate = emitJumpIfNotJSCell(regT0); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - setz32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); + setTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0); Jump wasNotImmediate = jump(); isImmediate.link(this); - andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), X86::eax); - setne32(Imm32(JSImmediate::FullTagTypeNull), X86::eax); + andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0); + setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0); wasNotImmediate.link(this); - emitTagAsBoolImmediate(X86::eax); + emitTagAsBoolImmediate(regT0); emitPutVirtualRegister(dst); NEXT_OPCODE(op_neq_null); @@ -1200,41 +1208,41 @@ void JIT::privateCompileMainPass() for (size_t j = 0; j < count; ++j) emitInitRegister(j); - emitCTICall(Interpreter::cti_op_push_activation); + emitCTICall(JITStubs::cti_op_push_activation); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_enter_with_activation); } case op_create_arguments: { if (m_codeBlock->m_numParameters == 1) - emitCTICall(Interpreter::cti_op_create_arguments_no_params); + emitCTICall(JITStubs::cti_op_create_arguments_no_params); else - emitCTICall(Interpreter::cti_op_create_arguments); + emitCTICall(JITStubs::cti_op_create_arguments); NEXT_OPCODE(op_create_arguments); } case op_convert_this: { - emitGetVirtualRegister(currentInstruction[1].u.operand, X86::eax); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - emitJumpSlowCaseIfNotJSCell(X86::eax); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::edx); - addSlowCase(jnz32(Address(X86::edx, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); + emitJumpSlowCaseIfNotJSCell(regT0); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1); + addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion))); NEXT_OPCODE(op_convert_this); } case op_profile_will_call: { - emitGetCTIParam(STUB_ARGS_profilerReference, X86::eax); - Jump noProfiler = jzPtr(Address(X86::eax)); - emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::eax); - emitCTICall(Interpreter::cti_op_profile_will_call); + emitGetCTIParam(STUB_ARGS_profilerReference, regT0); + Jump noProfiler = branchTestPtr(Zero, Address(regT0)); + emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT0); + emitCTICall(JITStubs::cti_op_profile_will_call); noProfiler.link(this); NEXT_OPCODE(op_profile_will_call); } case op_profile_did_call: { - emitGetCTIParam(STUB_ARGS_profilerReference, X86::eax); - Jump noProfiler = jzPtr(Address(X86::eax)); - emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, X86::eax); - emitCTICall(Interpreter::cti_op_profile_did_call); + emitGetCTIParam(STUB_ARGS_profilerReference, regT0); + Jump noProfiler = branchTestPtr(Zero, Address(regT0)); + emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT0); + emitCTICall(JITStubs::cti_op_profile_did_call); noProfiler.link(this); NEXT_OPCODE(op_profile_did_call); @@ -1292,8 +1300,8 @@ void JIT::privateCompileSlowCases() case op_convert_this: { linkSlowCase(iter); linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_convert_this); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_convert_this); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_convert_this); } @@ -1304,7 +1312,7 @@ void JIT::privateCompileSlowCases() case op_construct_verify: { linkSlowCase(iter); linkSlowCase(iter); - emitGetVirtualRegister(currentInstruction[2].u.operand, X86::eax); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_construct_verify); @@ -1316,25 +1324,25 @@ void JIT::privateCompileSlowCases() Jump notImm = getSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); - emitFastArithIntToImmNoCheck(X86::edx, X86::edx); + emitFastArithIntToImmNoCheck(regT1, regT1); notImm.link(this); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_get_by_val); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_get_by_val); emitPutVirtualRegister(currentInstruction[1].u.operand); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val)); // This is slow case that handles accesses to arrays above the fast cut-off. // First, check if this is an access to the vector linkSlowCase(iter); - jae32(X86::edx, Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow); + branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow); // okay, missed the fast region, but it is still in the vector. Get the value. - loadPtr(BaseIndex(X86::ecx, X86::edx, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), X86::ecx); + loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT2); // Check whether the value loaded is zero; if so we need to return undefined. - jzPtr(X86::ecx, beginGetByValSlow); - move(X86::ecx, X86::eax); - emitPutVirtualRegister(currentInstruction[1].u.operand, X86::eax); + branchTestPtr(Zero, regT2, beginGetByValSlow); + move(regT2, regT0); + emitPutVirtualRegister(currentInstruction[1].u.operand, regT0); NEXT_OPCODE(op_get_by_val); } @@ -1355,17 +1363,17 @@ void JIT::privateCompileSlowCases() unsigned target = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_loop_if_less); - emitJumpSlowToHot(jnz32(X86::eax), target + 3); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArgFromVirtualRegister(op2, 2, regT2); + emitCTICall(JITStubs::cti_op_loop_if_less); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); } else { linkSlowCase(iter); linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_loop_if_less); - emitJumpSlowToHot(jnz32(X86::eax), target + 3); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_loop_if_less); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); } NEXT_OPCODE(op_loop_if_less); } @@ -1382,17 +1390,17 @@ void JIT::privateCompileSlowCases() unsigned target = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_loop_if_lesseq); - emitJumpSlowToHot(jnz32(X86::eax), target + 3); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, regT2); + emitCTICall(JITStubs::cti_op_loop_if_lesseq); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); } else { linkSlowCase(iter); linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_loop_if_lesseq); - emitJumpSlowToHot(jnz32(X86::eax), target + 3); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_loop_if_lesseq); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3); } NEXT_OPCODE(op_loop_if_lesseq); } @@ -1405,32 +1413,32 @@ void JIT::privateCompileSlowCases() Jump notImm = getSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); - emitFastArithIntToImmNoCheck(X86::edx, X86::edx); + emitFastArithIntToImmNoCheck(regT1, regT1); notImm.link(this); - emitGetVirtualRegister(currentInstruction[3].u.operand, X86::ecx); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitPutJITStubArg(X86::ecx, 3); - emitCTICall(Interpreter::cti_op_put_by_val); + emitGetVirtualRegister(currentInstruction[3].u.operand, regT2); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitPutJITStubArg(regT2, 3); + emitCTICall(JITStubs::cti_op_put_by_val); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val)); // slow cases for immediate int accesses to arrays linkSlowCase(iter); linkSlowCase(iter); - emitGetVirtualRegister(currentInstruction[3].u.operand, X86::ecx); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitPutJITStubArg(X86::ecx, 3); - emitCTICall(Interpreter::cti_op_put_by_val_array); + emitGetVirtualRegister(currentInstruction[3].u.operand, regT2); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitPutJITStubArg(regT2, 3); + emitCTICall(JITStubs::cti_op_put_by_val_array); NEXT_OPCODE(op_put_by_val); } case op_loop_if_true: { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_jtrue); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_jtrue); unsigned target = currentInstruction[2].u.operand; - emitJumpSlowToHot(jnz32(X86::eax), target + 2); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2); NEXT_OPCODE(op_loop_if_true); } case op_pre_dec: { @@ -1442,34 +1450,34 @@ void JIT::privateCompileSlowCases() unsigned target = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_jless); - emitJumpSlowToHot(jz32(X86::eax), target + 3); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, regT2); + emitCTICall(JITStubs::cti_op_jless); + emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3); } else { linkSlowCase(iter); linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_jless); - emitJumpSlowToHot(jz32(X86::eax), target + 3); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_jless); + emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3); } NEXT_OPCODE(op_jnless); } case op_not: { linkSlowCase(iter); - xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), X86::eax); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_not); + xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_not); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_not); } case op_jfalse: { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_jtrue); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_jtrue); unsigned target = currentInstruction[2].u.operand; - emitJumpSlowToHot(jz32(X86::eax), target + 2); // inverted! + emitJumpSlowToHot(branchTest32(Zero, regT0), target + 2); // inverted! NEXT_OPCODE(op_jfalse); } case op_post_inc: { @@ -1478,8 +1486,8 @@ void JIT::privateCompileSlowCases() } case op_bitnot: { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_bitnot); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_bitnot); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_bitnot); } @@ -1489,10 +1497,10 @@ void JIT::privateCompileSlowCases() } case op_jtrue: { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_jtrue); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_jtrue); unsigned target = currentInstruction[2].u.operand; - emitJumpSlowToHot(jnz32(X86::eax), target + 2); + emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2); NEXT_OPCODE(op_jtrue); } case op_post_dec: { @@ -1501,57 +1509,51 @@ void JIT::privateCompileSlowCases() } case op_bitxor: { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_bitxor); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_bitxor); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_bitxor); } case op_bitor: { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_bitor); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_bitor); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_bitor); } case op_eq: { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_eq); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_eq); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_eq); } case op_neq: { linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_neq); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_neq); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_neq); } case op_stricteq: { linkSlowCase(iter); linkSlowCase(iter); -#if !USE(ALTERNATE_JSIMMEDIATE) - linkSlowCase(iter); -#endif - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_stricteq); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_stricteq); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_stricteq); } case op_nstricteq: { linkSlowCase(iter); linkSlowCase(iter); -#if !USE(ALTERNATE_JSIMMEDIATE) - linkSlowCase(iter); -#endif - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 2); - emitCTICall(Interpreter::cti_op_nstricteq); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 2); + emitCTICall(JITStubs::cti_op_nstricteq); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_nstricteq); } @@ -1559,10 +1561,10 @@ void JIT::privateCompileSlowCases() linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); - emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, X86::ecx); - emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, X86::ecx); - emitPutJITStubArgFromVirtualRegister(currentInstruction[4].u.operand, 3, X86::ecx); - emitCTICall(Interpreter::cti_op_instanceof); + emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); + emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2); + emitPutJITStubArgFromVirtualRegister(currentInstruction[4].u.operand, 3, regT2); + emitCTICall(JITStubs::cti_op_instanceof); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_instanceof); } @@ -1591,8 +1593,8 @@ void JIT::privateCompileSlowCases() linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand); linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_to_jsnumber); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_to_jsnumber); emitPutVirtualRegister(currentInstruction[1].u.operand); NEXT_OPCODE(op_to_jsnumber); @@ -1627,8 +1629,8 @@ void JIT::privateCompile() #endif // Could use a pop_m, but would need to offset the following instruction if so. - pop(X86::ecx); - emitPutToCallFrameHeader(X86::ecx, RegisterFile::ReturnPC); + pop(regT2); + emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); Jump slowRegisterFileCheck; Label afterRegisterFileCheck; @@ -1636,10 +1638,10 @@ void JIT::privateCompile() // In the case of a fast linked call, we do not set this up in the caller. emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); - emitGetCTIParam(STUB_ARGS_registerFile, X86::eax); - addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, X86::edx); + emitGetCTIParam(STUB_ARGS_registerFile, regT0); + addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); - slowRegisterFileCheck = jg32(X86::edx, Address(X86::eax, FIELD_OFFSET(RegisterFile, m_end))); + slowRegisterFileCheck = branch32(GreaterThan, regT1, Address(regT0, FIELD_OFFSET(RegisterFile, m_end))); afterRegisterFileCheck = label(); } @@ -1650,7 +1652,7 @@ void JIT::privateCompile() if (m_codeBlock->codeType() == FunctionCode) { slowRegisterFileCheck.link(this); m_bytecodeIndex = 0; // emitCTICall will add to the map, but doesn't actually need this... - emitCTICall(Interpreter::cti_register_file_check); + emitCTICall(JITStubs::cti_register_file_check); #ifndef NDEBUG // reset this, in order to guard it's use with asserts m_bytecodeIndex = (unsigned)-1; @@ -1660,7 +1662,7 @@ void JIT::privateCompile() ASSERT(m_jmpTable.isEmpty()); - RefPtr<ExecutablePool> allocator = m_globalData->poolForSize(m_assembler.size()); + RefPtr<ExecutablePool> allocator = m_globalData->executableAllocator.poolForSize(m_assembler.size()); void* code = m_assembler.executableCopy(allocator.get()); JITCodeRef codeRef(code, allocator); #ifndef NDEBUG @@ -1678,28 +1680,28 @@ void JIT::privateCompile() ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); - record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]); + record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]); for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; - record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; + record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; } } else { ASSERT(record.type == SwitchRecord::String); - record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]); + record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + record.defaultOffset]); StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { unsigned offset = it->second.branchOffset; - it->second.ctiOffset = offset ? patchBuffer.addressOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.stringJumpTable->ctiDefault; + it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + 3 + offset]) : record.jumpTable.stringJumpTable->ctiDefault; } } } for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { HandlerInfo& handler = m_codeBlock->exceptionHandler(i); - handler.nativeCode = patchBuffer.addressOf(m_labels[handler.target]); + handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]); } for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { @@ -1708,61 +1710,53 @@ void JIT::privateCompile() } if (m_codeBlock->hasExceptionInfo()) { - m_codeBlock->pcVector().reserveCapacity(m_calls.size()); + m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size()); for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) - m_codeBlock->pcVector().append(PC(reinterpret_cast<void**>(patchBuffer.addressOf(iter->from)) - reinterpret_cast<void**>(code), iter->bytecodeIndex)); + m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeIndex(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeIndex)); } // Link absolute addresses for jsr for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter) - patchBuffer.setPtr(iter->storeLocation, patchBuffer.addressOf(iter->target)); + patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).addressForJSR()); +#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) { StructureStubInfo& info = m_codeBlock->structureStubInfo(i); -#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - info.callReturnLocation = patchBuffer.addressOf(m_propertyAccessCompilationInfo[i].callReturnLocation); - info.hotPathBegin = patchBuffer.addressOf(m_propertyAccessCompilationInfo[i].hotPathBegin); -#else - info.callReturnLocation = 0; - info.hotPathBegin = 0; -#endif + info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation); + info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin); } +#endif +#if ENABLE(JIT_OPTIMIZE_CALL) for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); -#if ENABLE(JIT_OPTIMIZE_CALL) - info.callReturnLocation = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].callReturnLocation); - info.hotPathBegin = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].hotPathBegin); - info.hotPathOther = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].hotPathOther); - info.coldPathOther = patchBuffer.addressOf(m_callStructureStubCompilationInfo[i].coldPathOther); -#else - info.callReturnLocation = 0; - info.hotPathBegin = 0; - info.hotPathOther = 0; - info.coldPathOther = 0; -#endif + info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); + info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); + info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); + info.coldPathOther = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].coldPathOther); } +#endif m_codeBlock->setJITCode(codeRef); } -void JIT::privateCompileCTIMachineTrampolines() +void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall) { #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) // (1) The first function provides fast property access for array length Label arrayLengthBegin = align(); // Check eax is an array - Jump array_failureCases1 = emitJumpIfNotJSCell(X86::eax); - Jump array_failureCases2 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr)); + Jump array_failureCases1 = emitJumpIfNotJSCell(regT0); + Jump array_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)); // Checks out okay! - get the length from the storage - loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::eax); - load32(Address(X86::eax, FIELD_OFFSET(ArrayStorage, m_length)), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT0); + load32(Address(regT0, FIELD_OFFSET(ArrayStorage, m_length)), regT0); - Jump array_failureCases3 = ja32(X86::eax, Imm32(JSImmediate::maxImmediateInt)); + Jump array_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt)); - // X86::eax contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here. - emitFastArithIntToImmNoCheck(X86::eax, X86::eax); + // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here. + emitFastArithIntToImmNoCheck(regT0, regT0); ret(); @@ -1770,159 +1764,175 @@ void JIT::privateCompileCTIMachineTrampolines() Label stringLengthBegin = align(); // Check eax is a string - Jump string_failureCases1 = emitJumpIfNotJSCell(X86::eax); - Jump string_failureCases2 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsStringVptr)); + Jump string_failureCases1 = emitJumpIfNotJSCell(regT0); + Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)); // Checks out okay! - get the length from the Ustring. - loadPtr(Address(X86::eax, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep)), X86::eax); - load32(Address(X86::eax, FIELD_OFFSET(UString::Rep, len)), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(JSString, m_value) + FIELD_OFFSET(UString, m_rep)), regT0); + load32(Address(regT0, FIELD_OFFSET(UString::Rep, len)), regT0); - Jump string_failureCases3 = ja32(X86::eax, Imm32(JSImmediate::maxImmediateInt)); + Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt)); - // X86::eax contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here. - emitFastArithIntToImmNoCheck(X86::eax, X86::eax); + // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here. + emitFastArithIntToImmNoCheck(regT0, regT0); ret(); #endif +#if !(PLATFORM(X86) || PLATFORM(X86_64)) +#error "This code is less portable than it looks this code assumes that regT3 is callee preserved, which happens to be true on x86/x86-64." +#endif + // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct. Label virtualCallPreLinkBegin = align(); // Load the callee CodeBlock* into eax - loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax); - loadPtr(Address(X86::eax, FIELD_OFFSET(FunctionBodyNode, m_code)), X86::eax); - Jump hasCodeBlock1 = jnzPtr(X86::eax); - pop(X86::ebx); + loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT0); + loadPtr(Address(regT0, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0); + Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0); + pop(regT3); restoreArgumentReference(); - Jump callJSFunction1 = call(); - emitGetJITStubArg(1, X86::ecx); - emitGetJITStubArg(3, X86::edx); - push(X86::ebx); + Call callJSFunction1 = call(); + emitGetJITStubArg(1, regT2); + emitGetJITStubArg(3, regT1); + push(regT3); hasCodeBlock1.link(this); // Check argCount matches callee arity. - Jump arityCheckOkay1 = je32(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_numParameters)), X86::edx); - pop(X86::ebx); - emitPutJITStubArg(X86::ebx, 2); - emitPutJITStubArg(X86::eax, 4); + Jump arityCheckOkay1 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1); + pop(regT3); + emitPutJITStubArg(regT3, 2); + emitPutJITStubArg(regT0, 4); restoreArgumentReference(); - Jump callArityCheck1 = call(); - move(X86::edx, callFrameRegister); - emitGetJITStubArg(1, X86::ecx); - emitGetJITStubArg(3, X86::edx); - push(X86::ebx); + Call callArityCheck1 = call(); + move(regT1, callFrameRegister); + emitGetJITStubArg(1, regT2); + emitGetJITStubArg(3, regT1); + push(regT3); arityCheckOkay1.link(this); compileOpCallInitializeCallFrame(); - pop(X86::ebx); - emitPutJITStubArg(X86::ebx, 2); + pop(regT3); + emitPutJITStubArg(regT3, 2); restoreArgumentReference(); - Jump callDontLazyLinkCall = call(); - push(X86::ebx); + Call callDontLazyLinkCall = call(); + push(regT3); - jump(X86::eax); + jump(regT0); Label virtualCallLinkBegin = align(); // Load the callee CodeBlock* into eax - loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax); - loadPtr(Address(X86::eax, FIELD_OFFSET(FunctionBodyNode, m_code)), X86::eax); - Jump hasCodeBlock2 = jnzPtr(X86::eax); - pop(X86::ebx); + loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT0); + loadPtr(Address(regT0, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0); + Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0); + pop(regT3); restoreArgumentReference(); - Jump callJSFunction2 = call(); - emitGetJITStubArg(1, X86::ecx); - emitGetJITStubArg(3, X86::edx); - push(X86::ebx); + Call callJSFunction2 = call(); + emitGetJITStubArg(1, regT2); + emitGetJITStubArg(3, regT1); + push(regT3); hasCodeBlock2.link(this); // Check argCount matches callee arity. - Jump arityCheckOkay2 = je32(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_numParameters)), X86::edx); - pop(X86::ebx); - emitPutJITStubArg(X86::ebx, 2); - emitPutJITStubArg(X86::eax, 4); + Jump arityCheckOkay2 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1); + pop(regT3); + emitPutJITStubArg(regT3, 2); + emitPutJITStubArg(regT0, 4); restoreArgumentReference(); - Jump callArityCheck2 = call(); - move(X86::edx, callFrameRegister); - emitGetJITStubArg(1, X86::ecx); - emitGetJITStubArg(3, X86::edx); - push(X86::ebx); + Call callArityCheck2 = call(); + move(regT1, callFrameRegister); + emitGetJITStubArg(1, regT2); + emitGetJITStubArg(3, regT1); + push(regT3); arityCheckOkay2.link(this); compileOpCallInitializeCallFrame(); - pop(X86::ebx); - emitPutJITStubArg(X86::ebx, 2); + pop(regT3); + emitPutJITStubArg(regT3, 2); restoreArgumentReference(); - Jump callLazyLinkCall = call(); - push(X86::ebx); + Call callLazyLinkCall = call(); + push(regT3); - jump(X86::eax); + jump(regT0); Label virtualCallBegin = align(); // Load the callee CodeBlock* into eax - loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_body)), X86::eax); - loadPtr(Address(X86::eax, FIELD_OFFSET(FunctionBodyNode, m_code)), X86::eax); - Jump hasCodeBlock3 = jnzPtr(X86::eax); - pop(X86::ebx); + loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT0); + loadPtr(Address(regT0, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0); + Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0); + pop(regT3); restoreArgumentReference(); - Jump callJSFunction3 = call(); - emitGetJITStubArg(1, X86::ecx); - emitGetJITStubArg(3, X86::edx); - push(X86::ebx); + Call callJSFunction3 = call(); + emitGetJITStubArg(1, regT2); + emitGetJITStubArg(3, regT1); + push(regT3); hasCodeBlock3.link(this); // Check argCount matches callee arity. - Jump arityCheckOkay3 = je32(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_numParameters)), X86::edx); - pop(X86::ebx); - emitPutJITStubArg(X86::ebx, 2); - emitPutJITStubArg(X86::eax, 4); + Jump arityCheckOkay3 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1); + pop(regT3); + emitPutJITStubArg(regT3, 2); + emitPutJITStubArg(regT0, 4); restoreArgumentReference(); - Jump callArityCheck3 = call(); - move(X86::edx, callFrameRegister); - emitGetJITStubArg(1, X86::ecx); - emitGetJITStubArg(3, X86::edx); - push(X86::ebx); + Call callArityCheck3 = call(); + move(regT1, callFrameRegister); + emitGetJITStubArg(1, regT2); + emitGetJITStubArg(3, regT1); + push(regT3); arityCheckOkay3.link(this); compileOpCallInitializeCallFrame(); // load ctiCode from the new codeBlock. - loadPtr(Address(X86::eax, FIELD_OFFSET(CodeBlock, m_jitCode)), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(CodeBlock, m_jitCode)), regT0); - jump(X86::eax); + jump(regT0); + +#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + Call array_failureCases1Call = makeTailRecursiveCall(array_failureCases1); + Call array_failureCases2Call = makeTailRecursiveCall(array_failureCases2); + Call array_failureCases3Call = makeTailRecursiveCall(array_failureCases3); + Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1); + Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2); + Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3); +#endif // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. - m_interpreter->m_executablePool = m_globalData->poolForSize(m_assembler.size()); - void* code = m_assembler.executableCopy(m_interpreter->m_executablePool.get()); - PatchBuffer patchBuffer(code); + *executablePool = m_globalData->executableAllocator.poolForSize(m_assembler.size()); + void* code = m_assembler.executableCopy((*executablePool).get()); + PatchBuffer patchBuffer(code); #if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - patchBuffer.link(array_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); - patchBuffer.link(array_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); - patchBuffer.link(array_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); - patchBuffer.link(string_failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail)); - patchBuffer.link(string_failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail)); - patchBuffer.link(string_failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_string_fail)); - - m_interpreter->m_ctiArrayLengthTrampoline = patchBuffer.addressOf(arrayLengthBegin); - m_interpreter->m_ctiStringLengthTrampoline = patchBuffer.addressOf(stringLengthBegin); + patchBuffer.link(array_failureCases1Call, JITStubs::cti_op_get_by_id_array_fail); + patchBuffer.link(array_failureCases2Call, JITStubs::cti_op_get_by_id_array_fail); + patchBuffer.link(array_failureCases3Call, JITStubs::cti_op_get_by_id_array_fail); + patchBuffer.link(string_failureCases1Call, JITStubs::cti_op_get_by_id_string_fail); + patchBuffer.link(string_failureCases2Call, JITStubs::cti_op_get_by_id_string_fail); + patchBuffer.link(string_failureCases3Call, JITStubs::cti_op_get_by_id_string_fail); + + *ctiArrayLengthTrampoline = patchBuffer.trampolineAt(arrayLengthBegin); + *ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin); +#else + UNUSED_PARAM(ctiArrayLengthTrampoline); + UNUSED_PARAM(ctiStringLengthTrampoline); #endif - patchBuffer.link(callArityCheck1, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck)); - patchBuffer.link(callArityCheck2, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck)); - patchBuffer.link(callArityCheck3, reinterpret_cast<void*>(Interpreter::cti_op_call_arityCheck)); - patchBuffer.link(callJSFunction1, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction)); - patchBuffer.link(callJSFunction2, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction)); - patchBuffer.link(callJSFunction3, reinterpret_cast<void*>(Interpreter::cti_op_call_JSFunction)); - patchBuffer.link(callDontLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_dontLazyLinkCall)); - patchBuffer.link(callLazyLinkCall, reinterpret_cast<void*>(Interpreter::cti_vm_lazyLinkCall)); - - m_interpreter->m_ctiVirtualCallPreLink = patchBuffer.addressOf(virtualCallPreLinkBegin); - m_interpreter->m_ctiVirtualCallLink = patchBuffer.addressOf(virtualCallLinkBegin); - m_interpreter->m_ctiVirtualCall = patchBuffer.addressOf(virtualCallBegin); + patchBuffer.link(callArityCheck1, JITStubs::cti_op_call_arityCheck); + patchBuffer.link(callArityCheck2, JITStubs::cti_op_call_arityCheck); + patchBuffer.link(callArityCheck3, JITStubs::cti_op_call_arityCheck); + patchBuffer.link(callJSFunction1, JITStubs::cti_op_call_JSFunction); + patchBuffer.link(callJSFunction2, JITStubs::cti_op_call_JSFunction); + patchBuffer.link(callJSFunction3, JITStubs::cti_op_call_JSFunction); + patchBuffer.link(callDontLazyLinkCall, JITStubs::cti_vm_dontLazyLinkCall); + patchBuffer.link(callLazyLinkCall, JITStubs::cti_vm_lazyLinkCall); + + *ctiVirtualCallPreLink = patchBuffer.trampolineAt(virtualCallPreLinkBegin); + *ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin); + *ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin); } void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst) diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h index d13fbb5..25c7825 100644 --- a/JavaScriptCore/jit/JIT.h +++ b/JavaScriptCore/jit/JIT.h @@ -27,17 +27,19 @@ #define JIT_h #include <wtf/Platform.h> -#include <bytecode/SamplingTool.h> #if ENABLE(JIT) #define WTF_USE_CTI_REPATCH_PIC 1 #include "Interpreter.h" +#include "JITCode.h" +#include "JITStubs.h" #include "Opcode.h" #include "RegisterFile.h" #include "MacroAssembler.h" #include "Profiler.h" +#include <bytecode/SamplingTool.h> #include <wtf/AlwaysInline.h> #include <wtf/Vector.h> @@ -113,7 +115,7 @@ namespace JSC { typedef VoidPtrPair (JIT_STUB *CTIHelper_2)(STUB_ARGS); struct CallRecord { - MacroAssembler::Jump from; + MacroAssembler::Call from; unsigned bytecodeIndex; void* to; @@ -121,7 +123,7 @@ namespace JSC { { } - CallRecord(MacroAssembler::Jump from, unsigned bytecodeIndex, void* to = 0) + CallRecord(MacroAssembler::Call from, unsigned bytecodeIndex, void* to = 0) : from(from) , bytecodeIndex(bytecodeIndex) , to(to) @@ -188,44 +190,73 @@ namespace JSC { }; struct PropertyStubCompilationInfo { - MacroAssembler::Jump callReturnLocation; + MacroAssembler::Call callReturnLocation; MacroAssembler::Label hotPathBegin; }; struct StructureStubCompilationInfo { MacroAssembler::DataLabelPtr hotPathBegin; - MacroAssembler::Jump hotPathOther; - MacroAssembler::Jump callReturnLocation; + MacroAssembler::Call hotPathOther; + MacroAssembler::Call callReturnLocation; MacroAssembler::Label coldPathOther; }; extern "C" { - JSValueEncodedAsPointer* ctiTrampoline( -#if PLATFORM(X86_64) - // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect). - // We can allow register passing here, and move the writes of these values into the trampoline. - void*, void*, void*, void*, void*, void*, -#endif - void* code, RegisterFile*, CallFrame*, JSValuePtr* exception, Profiler**, JSGlobalData*); void ctiVMThrowTrampoline(); }; - void ctiSetReturnAddress(void** where, void* what); - void ctiPatchCallByReturnAddress(void* where, void* what); + void ctiSetReturnAddress(void** addressOfReturnAddress, void* newDestinationToReturnTo); + void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction); + void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction); class JIT : private MacroAssembler { using MacroAssembler::Jump; using MacroAssembler::JumpList; using MacroAssembler::Label; + // NOTES: + // + // regT0 has two special meanings. The return value from a stub + // call will always be in regT0, and by default (unless + // a register is specified) emitPutVirtualRegister() will store + // the value from regT0. + // + // tempRegister2 is has no such dependencies. It is important that + // on x86/x86-64 it is ecx for performance reasons, since the + // MacroAssembler will need to plant register swaps if it is not - + // however the code will still function correctly. #if PLATFORM(X86_64) + static const RegisterID returnValueRegister = X86::eax; + static const RegisterID cachedResultRegister = X86::eax; + static const RegisterID firstArgumentRegister = X86::edi; + static const RegisterID timeoutCheckRegister = X86::r12; static const RegisterID callFrameRegister = X86::r13; static const RegisterID tagTypeNumberRegister = X86::r14; static const RegisterID tagMaskRegister = X86::r15; -#else + + static const RegisterID regT0 = X86::eax; + static const RegisterID regT1 = X86::edx; + static const RegisterID regT2 = X86::ecx; + // NOTE: privateCompileCTIMachineTrampolines() relies on this being callee preserved; this should be considered non-interface. + static const RegisterID regT3 = X86::ebx; +#elif PLATFORM(X86) + static const RegisterID returnValueRegister = X86::eax; + static const RegisterID cachedResultRegister = X86::eax; + // On x86 we always use fastcall conventions = but on + // OS X if might make more sense to just use regparm. + static const RegisterID firstArgumentRegister = X86::ecx; + static const RegisterID timeoutCheckRegister = X86::esi; static const RegisterID callFrameRegister = X86::edi; + + static const RegisterID regT0 = X86::eax; + static const RegisterID regT1 = X86::edx; + static const RegisterID regT2 = X86::ecx; + // NOTE: privateCompileCTIMachineTrampolines() relies on this being callee preserved; this should be considered non-interface. + static const RegisterID regT3 = X86::ebx; +#else + #error "JIT not supported on this platform." #endif static const int patchGetByIdDefaultStructure = -1; @@ -255,9 +286,9 @@ namespace JSC { static const int patchOffsetGetByIdPropertyMapOffset = 31; static const int patchOffsetGetByIdPutResult = 31; #if ENABLE(OPCODE_SAMPLING) - static const int patchOffsetGetByIdSlowCaseCall = 53 + ctiArgumentInitSize; + static const int patchOffsetGetByIdSlowCaseCall = 61 + ctiArgumentInitSize; #else - static const int patchOffsetGetByIdSlowCaseCall = 30 + ctiArgumentInitSize; + static const int patchOffsetGetByIdSlowCaseCall = 38 + ctiArgumentInitSize; #endif static const int patchOffsetOpCallCompareToJump = 9; #else @@ -284,13 +315,13 @@ namespace JSC { jit.privateCompile(); } - static void compileGetByIdSelf(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) + static void compileGetByIdSelf(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { JIT jit(globalData, codeBlock); jit.privateCompileGetByIdSelf(stubInfo, structure, cachedOffset, returnAddress); } - static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress) + static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { JIT jit(globalData, codeBlock); jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame); @@ -314,51 +345,43 @@ namespace JSC { } #endif - static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress) + static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress) { JIT jit(globalData, codeBlock); jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame); } - static void compilePutByIdReplace(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) + static void compilePutByIdReplace(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { JIT jit(globalData, codeBlock); jit.privateCompilePutByIdReplace(stubInfo, structure, cachedOffset, returnAddress); } - static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress) + static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress) { JIT jit(globalData, codeBlock); jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress); } - static void compileCTIMachineTrampolines(JSGlobalData* globalData) + static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall) + { JIT jit(globalData); - jit.privateCompileCTIMachineTrampolines(); + jit.privateCompileCTIMachineTrampolines(executablePool, ctiArrayLengthTrampoline, ctiStringLengthTrampoline, ctiVirtualCallPreLink, ctiVirtualCallLink, ctiVirtualCall); } - static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress); - static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress); + static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress); + static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress); - static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, void* returnAddress) + static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ProcessorReturnAddress returnAddress) { JIT jit(globalData, codeBlock); return jit.privateCompilePatchGetArrayLength(returnAddress); } - static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount); + static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount); static void unlinkCall(CallLinkInfo*); - inline static JSValuePtr execute(void* code, RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValuePtr* exception) - { - return JSValuePtr::decode(ctiTrampoline( -#if PLATFORM(X86_64) - 0, 0, 0, 0, 0, 0, -#endif - code, registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData)); - } - private: JIT(JSGlobalData*, CodeBlock* = 0); @@ -366,19 +389,19 @@ namespace JSC { void privateCompileLinkPass(); void privateCompileSlowCases(); void privateCompile(); - void privateCompileGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress); - void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame); + void privateCompileGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress); + void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame); #if USE(CTI_REPATCH_PIC) void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset); void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame); void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame); #endif - void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame); - void privateCompilePutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, void* returnAddress); - void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, void* returnAddress); + void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame); + void privateCompilePutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress); + void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ProcessorReturnAddress returnAddress); - void privateCompileCTIMachineTrampolines(); - void privateCompilePatchGetArrayLength(void* returnAddress); + void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall); + void privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress); void addSlowCase(Jump); void addJump(Jump, int); @@ -396,7 +419,6 @@ namespace JSC { void compileOpConstructSetupArgs(Instruction*); enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq }; void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type); - void putDoubleResultToJSNumberCellOrJSImmediate(X86Assembler::XMMRegisterID xmmSource, RegisterID jsNumberCell, unsigned dst, X86Assembler::JmpSrc* wroteJSNumberCell, X86Assembler::XMMRegisterID tempXmm, RegisterID tempReg1, RegisterID tempReg2); void compileFastArith_op_add(Instruction*); void compileFastArith_op_sub(Instruction*); @@ -427,7 +449,7 @@ namespace JSC { void emitGetVirtualRegister(int src, RegisterID dst); void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2); - void emitPutVirtualRegister(unsigned dst, RegisterID from = X86::eax); + void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0); void emitPutJITStubArg(RegisterID src, unsigned argumentNumber); void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch); @@ -458,6 +480,16 @@ namespace JSC { #if USE(ALTERNATE_JSIMMEDIATE) JIT::Jump emitJumpIfImmediateNumber(RegisterID); JIT::Jump emitJumpIfNotImmediateNumber(RegisterID); +#else + JIT::Jump emitJumpIfImmediateNumber(RegisterID reg) + { + return emitJumpIfImmediateInteger(reg); + } + + JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg) + { + return emitJumpIfNotImmediateInteger(reg); + } #endif Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter) @@ -492,21 +524,20 @@ namespace JSC { void restoreArgumentReference(); void restoreArgumentReferenceForTrampoline(); - Jump emitNakedCall(RegisterID); - Jump emitNakedCall(void* function); - Jump emitCTICall_internal(void*); - Jump emitCTICall(CTIHelper_j helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } - Jump emitCTICall(CTIHelper_o helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } - Jump emitCTICall(CTIHelper_p helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } - Jump emitCTICall(CTIHelper_v helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } - Jump emitCTICall(CTIHelper_s helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } - Jump emitCTICall(CTIHelper_b helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } - Jump emitCTICall(CTIHelper_2 helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } + Call emitNakedCall(void* function); + Call emitCTICall_internal(void*); + Call emitCTICall(CTIHelper_j helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } + Call emitCTICall(CTIHelper_o helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } + Call emitCTICall(CTIHelper_p helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } + Call emitCTICall(CTIHelper_v helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } + Call emitCTICall(CTIHelper_s helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } + Call emitCTICall(CTIHelper_b helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } + Call emitCTICall(CTIHelper_2 helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); } void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst); void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index); - void emitSlowScriptCheck(); + void emitTimeoutCheck(); #ifndef NDEBUG void printBytecodeOperandTypes(unsigned src1, unsigned src2); #endif diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp index 0a3e9ab..8fe245e 100644 --- a/JavaScriptCore/jit/JITArithmetic.cpp +++ b/JavaScriptCore/jit/JITArithmetic.cpp @@ -48,23 +48,23 @@ namespace JSC { void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2) { - emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx); + emitGetVirtualRegisters(op1, regT0, op2, regT2); // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent. - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::ecx); - emitFastArithImmToInt(X86::eax); - emitFastArithImmToInt(X86::ecx); + emitJumpSlowCaseIfNotImmediateInteger(regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT2); + emitFastArithImmToInt(regT0); + emitFastArithImmToInt(regT2); #if !PLATFORM(X86) // Mask with 0x1f as per ecma-262 11.7.2 step 7. // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction. - and32(Imm32(0x1f), X86::ecx); + and32(Imm32(0x1f), regT2); #endif - lshift32(X86::ecx, X86::eax); + lshift32(regT2, regT0); #if !USE(ALTERNATE_JSIMMEDIATE) - addSlowCase(joAdd32(X86::eax, X86::eax)); - signExtend32ToPtr(X86::eax, X86::eax); + addSlowCase(branchAdd32(Overflow, regT0, regT0)); + signExtend32ToPtr(regT0, regT0); #endif - emitFastArithReTagImmediate(X86::eax, X86::eax); + emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(result); } void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter) @@ -79,47 +79,47 @@ void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned Jump notImm1 = getSlowCase(iter); Jump notImm2 = getSlowCase(iter); linkSlowCase(iter); - emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx); + emitGetVirtualRegisters(op1, regT0, op2, regT2); notImm1.link(this); notImm2.link(this); #endif - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::ecx, 2); - emitCTICall(Interpreter::cti_op_lshift); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT2, 2); + emitCTICall(JITStubs::cti_op_lshift); emitPutVirtualRegister(result); } void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2) { if (isOperandConstantImmediateInt(op2)) { - emitGetVirtualRegister(op1, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(op1, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); // Mask with 0x1f as per ecma-262 11.7.2 step 7. #if USE(ALTERNATE_JSIMMEDIATE) - rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), X86::eax); + rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0); #else - rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), X86::eax); + rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0); #endif } else { - emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::ecx); - emitFastArithImmToInt(X86::ecx); + emitGetVirtualRegisters(op1, regT0, op2, regT2); + emitJumpSlowCaseIfNotImmediateInteger(regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT2); + emitFastArithImmToInt(regT2); #if !PLATFORM(X86) // Mask with 0x1f as per ecma-262 11.7.2 step 7. // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction. - and32(Imm32(0x1f), X86::ecx); + and32(Imm32(0x1f), regT2); #endif #if USE(ALTERNATE_JSIMMEDIATE) - rshift32(X86::ecx, X86::eax); + rshift32(regT2, regT0); #else - rshiftPtr(X86::ecx, X86::eax); + rshiftPtr(regT2, regT0); #endif } #if USE(ALTERNATE_JSIMMEDIATE) - emitFastArithIntToImmNoCheck(X86::eax, X86::eax); + emitFastArithIntToImmNoCheck(regT0, regT0); #else - orPtr(Imm32(JSImmediate::TagTypeNumber), X86::eax); + orPtr(Imm32(JSImmediate::TagTypeNumber), regT0); #endif emitPutVirtualRegister(result); } @@ -127,45 +127,45 @@ void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned, unsigned op2 { linkSlowCase(iter); if (isOperandConstantImmediateInt(op2)) - emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); + emitPutJITStubArgFromVirtualRegister(op2, 2, regT2); else { linkSlowCase(iter); - emitPutJITStubArg(X86::ecx, 2); + emitPutJITStubArg(regT2, 2); } - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_rshift); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_rshift); emitPutVirtualRegister(result); } void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2) { if (isOperandConstantImmediateInt(op1)) { - emitGetVirtualRegister(op2, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(op2, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) int32_t imm = getConstantOperandImmediateInt(op1); - andPtr(Imm32(imm), X86::eax); + andPtr(Imm32(imm), regT0); if (imm >= 0) - emitFastArithIntToImmNoCheck(X86::eax, X86::eax); + emitFastArithIntToImmNoCheck(regT0, regT0); #else - andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), X86::eax); + andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0); #endif } else if (isOperandConstantImmediateInt(op2)) { - emitGetVirtualRegister(op1, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(op1, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) int32_t imm = getConstantOperandImmediateInt(op2); - andPtr(Imm32(imm), X86::eax); + andPtr(Imm32(imm), regT0); if (imm >= 0) - emitFastArithIntToImmNoCheck(X86::eax, X86::eax); + emitFastArithIntToImmNoCheck(regT0, regT0); #else - andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), X86::eax); + andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0); #endif } else { - emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx); - andPtr(X86::edx, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegisters(op1, regT0, op2, regT1); + andPtr(regT1, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); } emitPutVirtualRegister(result); } @@ -173,31 +173,34 @@ void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned { linkSlowCase(iter); if (isOperandConstantImmediateInt(op1)) { - emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); - emitPutJITStubArg(X86::eax, 2); + emitPutJITStubArgFromVirtualRegister(op1, 1, regT2); + emitPutJITStubArg(regT0, 2); } else if (isOperandConstantImmediateInt(op2)) { - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArgFromVirtualRegister(op2, 2, regT2); } else { - emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); - emitPutJITStubArg(X86::edx, 2); + emitPutJITStubArgFromVirtualRegister(op1, 1, regT2); + emitPutJITStubArg(regT1, 2); } - emitCTICall(Interpreter::cti_op_bitand); + emitCTICall(JITStubs::cti_op_bitand); emitPutVirtualRegister(result); } +#if PLATFORM(X86) || PLATFORM(X86_64) void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2) { emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::ecx); #if USE(ALTERNATE_JSIMMEDIATE) - addSlowCase(jePtr(X86::ecx, ImmPtr(JSValuePtr::encode(js0())))); - mod32(X86::ecx, X86::eax, X86::edx); + addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValuePtr::encode(js0())))); + m_assembler.cdq(); + m_assembler.idivl_r(X86::ecx); #else emitFastArithDeTagImmediate(X86::eax); addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx)); - mod32(X86::ecx, X86::eax, X86::edx); + m_assembler.cdq(); + m_assembler.idivl_r(X86::ecx); signExtend32ToPtr(X86::edx, X86::edx); #endif emitFastArithReTagImmediate(X86::edx, X86::eax); @@ -220,70 +223,83 @@ void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vecto #endif emitPutJITStubArg(X86::eax, 1); emitPutJITStubArg(X86::ecx, 2); - emitCTICall(Interpreter::cti_op_mod); + emitCTICall(JITStubs::cti_op_mod); + emitPutVirtualRegister(result); +} +#else +void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2) +{ + emitPutJITStubArgFromVirtualRegister(op1, 1, regT2); + emitPutJITStubArgFromVirtualRegister(op2, 2, regT2); + emitCTICall(JITStubs::cti_op_mod); emitPutVirtualRegister(result); } +void JIT::compileFastArithSlow_op_mod(unsigned, unsigned, unsigned, Vector<SlowCaseEntry>::iterator&) +{ + ASSERT_NOT_REACHED(); +} +#endif void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst) { - emitGetVirtualRegister(srcDst, X86::eax); - move(X86::eax, X86::edx); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(srcDst, regT0); + move(regT0, regT1); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) - addSlowCase(joAdd32(Imm32(1), X86::edx)); - emitFastArithIntToImmNoCheck(X86::edx, X86::edx); + addSlowCase(branchAdd32(Overflow, Imm32(1), regT1)); + emitFastArithIntToImmNoCheck(regT1, regT1); #else - addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx)); - signExtend32ToPtr(X86::edx, X86::edx); + addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1)); + signExtend32ToPtr(regT1, regT1); #endif - emitPutVirtualRegister(srcDst, X86::edx); + emitPutVirtualRegister(srcDst, regT1); emitPutVirtualRegister(result); } void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_post_inc); - emitPutVirtualRegister(srcDst, X86::edx); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_post_inc); + emitPutVirtualRegister(srcDst, regT1); emitPutVirtualRegister(result); } void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst) { - emitGetVirtualRegister(srcDst, X86::eax); - move(X86::eax, X86::edx); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(srcDst, regT0); + move(regT0, regT1); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) - addSlowCase(joSub32(Imm32(1), X86::edx)); - emitFastArithIntToImmNoCheck(X86::edx, X86::edx); + addSlowCase(branchSub32(Zero, Imm32(1), regT1)); + emitFastArithIntToImmNoCheck(regT1, regT1); #else - addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::edx)); - signExtend32ToPtr(X86::edx, X86::edx); + addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1)); + signExtend32ToPtr(regT1, regT1); #endif - emitPutVirtualRegister(srcDst, X86::edx); + emitPutVirtualRegister(srcDst, regT1); emitPutVirtualRegister(result); } void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); linkSlowCase(iter); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_post_dec); - emitPutVirtualRegister(srcDst, X86::edx); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_post_dec); + emitPutVirtualRegister(srcDst, regT1); emitPutVirtualRegister(result); } void JIT::compileFastArith_op_pre_inc(unsigned srcDst) { - emitGetVirtualRegister(srcDst, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(srcDst, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) - addSlowCase(joAdd32(Imm32(1), X86::eax)); - emitFastArithIntToImmNoCheck(X86::eax, X86::eax); + addSlowCase(branchAdd32(Overflow, Imm32(1), regT0)); + emitFastArithIntToImmNoCheck(regT0, regT0); #else - addSlowCase(joAdd32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax)); - signExtend32ToPtr(X86::eax, X86::eax); + addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0)); + signExtend32ToPtr(regT0, regT0); #endif emitPutVirtualRegister(srcDst); } @@ -291,23 +307,23 @@ void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry> { Jump notImm = getSlowCase(iter); linkSlowCase(iter); - emitGetVirtualRegister(srcDst, X86::eax); + emitGetVirtualRegister(srcDst, regT0); notImm.link(this); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_pre_inc); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_pre_inc); emitPutVirtualRegister(srcDst); } void JIT::compileFastArith_op_pre_dec(unsigned srcDst) { - emitGetVirtualRegister(srcDst, X86::eax); - emitJumpSlowCaseIfNotImmediateInteger(X86::eax); + emitGetVirtualRegister(srcDst, regT0); + emitJumpSlowCaseIfNotImmediateInteger(regT0); #if USE(ALTERNATE_JSIMMEDIATE) - addSlowCase(joSub32(Imm32(1), X86::eax)); - emitFastArithIntToImmNoCheck(X86::eax, X86::eax); + addSlowCase(branchSub32(Zero, Imm32(1), regT0)); + emitFastArithIntToImmNoCheck(regT0, regT0); #else - addSlowCase(joSub32(Imm32(1 << JSImmediate::IntegerPayloadShift), X86::eax)); - signExtend32ToPtr(X86::eax, X86::eax); + addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0)); + signExtend32ToPtr(regT0, regT0); #endif emitPutVirtualRegister(srcDst); } @@ -315,10 +331,10 @@ void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry> { Jump notImm = getSlowCase(iter); linkSlowCase(iter); - emitGetVirtualRegister(srcDst, X86::eax); + emitGetVirtualRegister(srcDst, regT0); notImm.link(this); - emitPutJITStubArg(X86::eax, 1); - emitCTICall(Interpreter::cti_op_pre_dec); + emitPutJITStubArg(regT0, 1); + emitCTICall(JITStubs::cti_op_pre_dec); emitPutVirtualRegister(srcDst); } @@ -331,9 +347,9 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction) unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; - emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); - emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_add); + emitPutJITStubArgFromVirtualRegister(op1, 1, regT2); + emitPutJITStubArgFromVirtualRegister(op2, 2, regT2); + emitCTICall(JITStubs::cti_op_add); emitPutVirtualRegister(result); } void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&) @@ -347,9 +363,9 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction) unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; - emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); - emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_mul); + emitPutJITStubArgFromVirtualRegister(op1, 1, regT2); + emitPutJITStubArgFromVirtualRegister(op2, 2, regT2); + emitCTICall(JITStubs::cti_op_mul); emitPutVirtualRegister(result); } void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&) @@ -363,9 +379,9 @@ void JIT::compileFastArith_op_sub(Instruction* currentInstruction) unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; - emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); - emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_sub); + emitPutJITStubArgFromVirtualRegister(op1, 1, regT2); + emitPutJITStubArgFromVirtualRegister(op2, 2, regT2); + emitCTICall(JITStubs::cti_op_sub); emitPutVirtualRegister(result); } void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&) @@ -381,13 +397,13 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsign emitJumpSlowCaseIfNotImmediateInteger(X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::edx); if (opcodeID == op_add) - addSlowCase(joAdd32(X86::edx, X86::eax)); + addSlowCase(branchAdd32(Overflow, X86::edx, X86::eax)); else if (opcodeID == op_sub) - addSlowCase(joSub32(X86::edx, X86::eax)); + addSlowCase(branchSub32(Overflow, X86::edx, X86::eax)); else { ASSERT(opcodeID == op_mul); - addSlowCase(joMul32(X86::edx, X86::eax)); - addSlowCase(jz32(X86::eax)); + addSlowCase(branchMul32(Overflow, X86::edx, X86::eax)); + addSlowCase(branchTest32(Zero, X86::eax)); } emitFastArithIntToImmNoCheck(X86::eax, X86::eax); } @@ -409,12 +425,12 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>: emitPutJITStubArg(X86::eax, 1); emitPutJITStubArg(X86::edx, 2); if (opcodeID == op_add) - emitCTICall(Interpreter::cti_op_add); + emitCTICall(JITStubs::cti_op_add); else if (opcodeID == op_sub) - emitCTICall(Interpreter::cti_op_sub); + emitCTICall(JITStubs::cti_op_sub); else { ASSERT(opcodeID == op_mul); - emitCTICall(Interpreter::cti_op_mul); + emitCTICall(JITStubs::cti_op_mul); } Jump end = jump(); @@ -464,7 +480,7 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction) if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_add); + emitCTICall(JITStubs::cti_op_add); emitPutVirtualRegister(result); return; } @@ -472,12 +488,12 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction) if (isOperandConstantImmediateInt(op1)) { emitGetVirtualRegister(op2, X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1)), X86::eax)); + addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), X86::eax)); emitFastArithIntToImmNoCheck(X86::eax, X86::eax); } else if (isOperandConstantImmediateInt(op2)) { emitGetVirtualRegister(op1, X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2)), X86::eax)); + addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), X86::eax)); emitFastArithIntToImmNoCheck(X86::eax, X86::eax); } else compileBinaryArithOp(op_add, result, op1, op2, types); @@ -496,13 +512,13 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl linkSlowCase(iter); emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_add); + emitCTICall(JITStubs::cti_op_add); } else if (isOperandConstantImmediateInt(op2)) { linkSlowCase(iter); linkSlowCase(iter); emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_add); + emitCTICall(JITStubs::cti_op_add); } else compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types); @@ -521,12 +537,12 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction) if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) { emitGetVirtualRegister(op2, X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax)); + addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax)); emitFastArithReTagImmediate(X86::eax, X86::eax); } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) { emitGetVirtualRegister(op1, X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax)); + addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax)); emitFastArithReTagImmediate(X86::eax, X86::eax); } else compileBinaryArithOp(op_mul, result, op1, op2, types); @@ -547,7 +563,7 @@ void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<Sl // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0. emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_mul); + emitCTICall(JITStubs::cti_op_mul); } else compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types); @@ -605,9 +621,19 @@ static bool isSSE2Present() cpuid; mov flags, edx; } +#elif COMPILER(GCC) + asm ( + "movl $0x1, %%eax;" + "pushl %%ebx;" + "cpuid;" + "popl %%ebx;" + "movl %%edx, %0;" + : "=g" (flags) + : + : "%eax", "%ecx", "%edx" + ); #else flags = 0; - // FIXME: Add GCC code to do above asm #endif present = (flags & SSE2FeatureBit) != 0; } @@ -619,53 +645,11 @@ static bool isSSE2Present() #endif -/* - This is required since number representation is canonical - values representable as a JSImmediate should not be stored in a JSNumberCell. - - In the common case, the double value from 'xmmSource' is written to the reusable JSNumberCell pointed to by 'jsNumberCell', then 'jsNumberCell' - is written to the output SF Register 'dst', and then a jump is planted (stored into *wroteJSNumberCell). - - However if the value from xmmSource is representable as a JSImmediate, then the JSImmediate value will be written to the output, and flow - control will fall through from the code planted. -*/ -void JIT::putDoubleResultToJSNumberCellOrJSImmediate(X86::XMMRegisterID xmmSource, X86::RegisterID jsNumberCell, unsigned dst, JmpSrc* wroteJSNumberCell, X86::XMMRegisterID tempXmm, X86::RegisterID tempReg1, X86::RegisterID tempReg2) -{ - // convert (double -> JSImmediate -> double), and check if the value is unchanged - in which case the value is representable as a JSImmediate. - __ cvttsd2si_rr(xmmSource, tempReg1); - __ addl_rr(tempReg1, tempReg1); - __ sarl_i8r(1, tempReg1); - __ cvtsi2sd_rr(tempReg1, tempXmm); - // Compare & branch if immediate. - __ ucomisd_rr(tempXmm, xmmSource); - JmpSrc resultIsImm = __ je(); - JmpDst resultLookedLikeImmButActuallyIsnt = __ label(); - - // Store the result to the JSNumberCell and jump. - __ movsd_rm(xmmSource, FIELD_OFFSET(JSNumberCell, m_value), jsNumberCell); - if (jsNumberCell != X86::eax) - __ movl_rr(jsNumberCell, X86::eax); - emitPutVirtualRegister(dst); - *wroteJSNumberCell = __ jmp(); - - __ link(resultIsImm, __ label()); - // value == (double)(JSImmediate)value... or at least, it looks that way... - // ucomi will report that (0 == -0), and will report true if either input in NaN (result is unordered). - __ link(__ jp(), resultLookedLikeImmButActuallyIsnt); // Actually was a NaN - __ pextrw_irr(3, xmmSource, tempReg2); - __ cmpl_ir(0x8000, tempReg2); - __ link(__ je(), resultLookedLikeImmButActuallyIsnt); // Actually was -0 - // Yes it really really really is representable as a JSImmediate. - emitFastArithIntToImmNoCheck(tempReg1, X86::eax); - emitPutVirtualRegister(dst); -} - void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types) { Structure* numberStructure = m_globalData->numberStructure.get(); JmpSrc wasJSNumberCell1; - JmpSrc wasJSNumberCell1b; JmpSrc wasJSNumberCell2; - JmpSrc wasJSNumberCell2b; emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx); @@ -695,11 +679,11 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0); JmpSrc loadedDouble = __ jmp(); // (1b) if we get here, src1 is an immediate - __ link(op1imm, __ label()); + __ linkJump(op1imm, __ label()); emitFastArithImmToInt(X86::eax); __ cvtsi2sd_rr(X86::eax, X86::xmm0); // (1c) - __ link(loadedDouble, __ label()); + __ linkJump(loadedDouble, __ label()); if (opcodeID == op_add) __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0); else if (opcodeID == op_sub) @@ -709,12 +693,15 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0); } - putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::edx, dst, &wasJSNumberCell2, X86::xmm1, X86::ecx, X86::eax); - wasJSNumberCell2b = __ jmp(); + // Store the result to the JSNumberCell and jump. + __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::edx); + __ movl_rr(X86::edx, X86::eax); + emitPutVirtualRegister(dst); + wasJSNumberCell2 = __ jmp(); // (2) This handles cases where src2 is an immediate number. // Two slow cases - either src1 isn't an immediate, or the subtract overflows. - __ link(op2imm, __ label()); + __ linkJump(op2imm, __ label()); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); } else if (types.first().isReusable() && isSSE2Present()) { ASSERT(types.first().mightBeNumber()); @@ -742,11 +729,11 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1); JmpSrc loadedDouble = __ jmp(); // (1b) if we get here, src2 is an immediate - __ link(op2imm, __ label()); + __ linkJump(op2imm, __ label()); emitFastArithImmToInt(X86::edx); __ cvtsi2sd_rr(X86::edx, X86::xmm1); // (1c) - __ link(loadedDouble, __ label()); + __ linkJump(loadedDouble, __ label()); __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0); if (opcodeID == op_add) __ addsd_rr(X86::xmm1, X86::xmm0); @@ -759,12 +746,14 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax); emitPutVirtualRegister(dst); - putDoubleResultToJSNumberCellOrJSImmediate(X86::xmm0, X86::eax, dst, &wasJSNumberCell1, X86::xmm1, X86::ecx, X86::edx); - wasJSNumberCell1b = __ jmp(); + // Store the result to the JSNumberCell and jump. + __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax); + emitPutVirtualRegister(dst); + wasJSNumberCell1 = __ jmp(); // (2) This handles cases where src1 is an immediate number. // Two slow cases - either src2 isn't an immediate, or the subtract overflows. - __ link(op1imm, __ label()); + __ linkJump(op1imm, __ label()); emitJumpSlowCaseIfNotImmediateInteger(X86::edx); } else emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx); @@ -782,17 +771,17 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u ASSERT(opcodeID == op_mul); // convert eax & edx from JSImmediates to ints, and check if either are zero emitFastArithImmToInt(X86::edx); - JmpSrc op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax); + Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax); __ testl_rr(X86::edx, X86::edx); JmpSrc op2NonZero = __ jne(); - __ link(op1Zero, __ label()); + op1Zero.link(this); // if either input is zero, add the two together, and check if the result is < 0. // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate. __ movl_rr(X86::eax, X86::ecx); __ addl_rr(X86::edx, X86::ecx); addSlowCase(__ js()); // Skip the above check if neither input is zero - __ link(op2NonZero, __ label()); + __ linkJump(op2NonZero, __ label()); __ imull_rr(X86::edx, X86::eax); addSlowCase(__ jo()); signExtend32ToPtr(X86::eax, X86::eax); @@ -801,12 +790,10 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u emitPutVirtualRegister(dst); if (types.second().isReusable() && isSSE2Present()) { - __ link(wasJSNumberCell2, __ label()); - __ link(wasJSNumberCell2b, __ label()); + __ linkJump(wasJSNumberCell2, __ label()); } else if (types.first().isReusable() && isSSE2Present()) { - __ link(wasJSNumberCell1, __ label()); - __ link(wasJSNumberCell1b, __ label()); + __ linkJump(wasJSNumberCell1, __ label()); } } @@ -841,12 +828,12 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>: emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx); emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx); if (opcodeID == op_add) - emitCTICall(Interpreter::cti_op_add); + emitCTICall(JITStubs::cti_op_add); else if (opcodeID == op_sub) - emitCTICall(Interpreter::cti_op_sub); + emitCTICall(JITStubs::cti_op_sub); else { ASSERT(opcodeID == op_mul); - emitCTICall(Interpreter::cti_op_mul); + emitCTICall(JITStubs::cti_op_mul); } emitPutVirtualRegister(dst); } @@ -860,13 +847,13 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction) if (isOperandConstantImmediateInt(op1)) { emitGetVirtualRegister(op2, X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax)); + addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax)); signExtend32ToPtr(X86::eax, X86::eax); emitPutVirtualRegister(result); } else if (isOperandConstantImmediateInt(op2)) { emitGetVirtualRegister(op1, X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); - addSlowCase(joAdd32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax)); + addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax)); signExtend32ToPtr(X86::eax, X86::eax); emitPutVirtualRegister(result); } else { @@ -876,7 +863,7 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction) else { emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_add); + emitCTICall(JITStubs::cti_op_add); emitPutVirtualRegister(result); } } @@ -894,7 +881,7 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl notImm.link(this); emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); emitPutJITStubArg(X86::eax, 2); - emitCTICall(Interpreter::cti_op_add); + emitCTICall(JITStubs::cti_op_add); emitPutVirtualRegister(result); } else if (isOperandConstantImmediateInt(op2)) { Jump notImm = getSlowCase(iter); @@ -903,7 +890,7 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl notImm.link(this); emitPutJITStubArg(X86::eax, 1); emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_add); + emitCTICall(JITStubs::cti_op_add); emitPutVirtualRegister(result); } else { OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); @@ -924,7 +911,7 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction) emitGetVirtualRegister(op2, X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); emitFastArithDeTagImmediate(X86::eax); - addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax)); + addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax)); signExtend32ToPtr(X86::eax, X86::eax); emitFastArithReTagImmediate(X86::eax, X86::eax); emitPutVirtualRegister(result); @@ -932,7 +919,7 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction) emitGetVirtualRegister(op1, X86::eax); emitJumpSlowCaseIfNotImmediateInteger(X86::eax); emitFastArithDeTagImmediate(X86::eax); - addSlowCase(joMul32(Imm32(value), X86::eax, X86::eax)); + addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax)); signExtend32ToPtr(X86::eax, X86::eax); emitFastArithReTagImmediate(X86::eax, X86::eax); emitPutVirtualRegister(result); @@ -952,7 +939,7 @@ void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<Sl // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0. emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx); emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx); - emitCTICall(Interpreter::cti_op_mul); + emitCTICall(JITStubs::cti_op_mul); emitPutVirtualRegister(result); } else compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand)); diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp index af26712..62c7149 100644 --- a/JavaScriptCore/jit/JITCall.cpp +++ b/JavaScriptCore/jit/JITCall.cpp @@ -49,10 +49,11 @@ void JIT::unlinkCall(CallLinkInfo* callLinkInfo) // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive // match). Reset the check so it no longer matches. - DataLabelPtr::patch(callLinkInfo->hotPathBegin, JSValuePtr::encode(jsImpossibleValue())); + callLinkInfo->hotPathBegin.repatch(JSValuePtr::encode(jsImpossibleValue())); } -void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount) +//void JIT::linkCall(JSFunction* , CodeBlock* , JITCode , CallLinkInfo* callLinkInfo, int ) +void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount) { // Currently we only link calls with the exact number of arguments. if (callerArgCount == calleeCodeBlock->m_numParameters) { @@ -60,24 +61,23 @@ void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, void* ctiCode calleeCodeBlock->addCaller(callLinkInfo); - DataLabelPtr::patch(callLinkInfo->hotPathBegin, callee); - Jump::patch(callLinkInfo->hotPathOther, ctiCode); + callLinkInfo->hotPathBegin.repatch(callee); + callLinkInfo->hotPathOther.relink(ctiCode.addressForCall()); } // patch the instruction that jumps out to the cold path, so that we only try to link once. - void* patchCheck = reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(callLinkInfo->hotPathBegin) + patchOffsetOpCallCompareToJump); - Jump::patch(patchCheck, callLinkInfo->coldPathOther); + callLinkInfo->hotPathBegin.jumpAtOffset(patchOffsetOpCallCompareToJump).relink(callLinkInfo->coldPathOther); } void JIT::compileOpCallInitializeCallFrame() { - store32(X86::edx, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)))); + store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)))); - loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain + loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register)))); - storePtr(X86::ecx, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); - storePtr(X86::edx, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); + storePtr(regT2, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); + storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); } void JIT::compileOpCallSetupArgs(Instruction* instruction) @@ -86,7 +86,7 @@ void JIT::compileOpCallSetupArgs(Instruction* instruction) int registerOffset = instruction[4].u.operand; // ecx holds func - emitPutJITStubArg(X86::ecx, 1); + emitPutJITStubArg(regT2, 1); emitPutJITStubArgConstant(registerOffset, 2); emitPutJITStubArgConstant(argCount, 3); } @@ -97,7 +97,7 @@ void JIT::compileOpCallEvalSetupArgs(Instruction* instruction) int registerOffset = instruction[4].u.operand; // ecx holds func - emitPutJITStubArg(X86::ecx, 1); + emitPutJITStubArg(regT2, 1); emitPutJITStubArgConstant(registerOffset, 2); emitPutJITStubArgConstant(argCount, 3); } @@ -110,10 +110,10 @@ void JIT::compileOpConstructSetupArgs(Instruction* instruction) int thisRegister = instruction[6].u.operand; // ecx holds func - emitPutJITStubArg(X86::ecx, 1); + emitPutJITStubArg(regT2, 1); emitPutJITStubArgConstant(registerOffset, 2); emitPutJITStubArgConstant(argCount, 3); - emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax); + emitPutJITStubArgFromVirtualRegister(proto, 4, regT0); emitPutJITStubArgConstant(thisRegister, 5); } @@ -129,14 +129,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned) // Handle eval Jump wasEval; if (opcodeID == op_call_eval) { - emitGetVirtualRegister(callee, X86::ecx); + emitGetVirtualRegister(callee, regT2); compileOpCallEvalSetupArgs(instruction); - emitCTICall(Interpreter::cti_op_call_eval); - wasEval = jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsImpossibleValue()))); + emitCTICall(JITStubs::cti_op_call_eval); + wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsImpossibleValue()))); } - emitGetVirtualRegister(callee, X86::ecx); + emitGetVirtualRegister(callee, regT2); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); @@ -144,22 +144,22 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. - emitJumpSlowCaseIfNotJSCell(X86::ecx); - addSlowCase(jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr))); + emitJumpSlowCaseIfNotJSCell(regT2); + addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr))); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { - emitCTICall(Interpreter::cti_op_construct_JSConstruct); + emitCTICall(JITStubs::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); - emitGetVirtualRegister(callee, X86::ecx); + emitGetVirtualRegister(callee, regT2); } // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); - move(Imm32(argCount), X86::edx); + move(Imm32(argCount), regT1); - emitNakedCall(m_interpreter->m_ctiVirtualCall); + emitNakedCall(m_globalData->jitStubs.ctiVirtualCall()); if (opcodeID == op_call_eval) wasEval.link(this); @@ -178,7 +178,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>: linkSlowCase(iter); // This handles host functions - emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); + emitCTICall(((opcodeID == op_construct) ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction)); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); @@ -187,12 +187,6 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>: #else -static NO_RETURN void unreachable() -{ - ASSERT_NOT_REACHED(); - exit(1); -} - void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int dst = instruction[1].u.operand; @@ -203,18 +197,18 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca // Handle eval Jump wasEval; if (opcodeID == op_call_eval) { - emitGetVirtualRegister(callee, X86::ecx); + emitGetVirtualRegister(callee, regT2); compileOpCallEvalSetupArgs(instruction); - emitCTICall(Interpreter::cti_op_call_eval); - wasEval = jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(jsImpossibleValue()))); + emitCTICall(JITStubs::cti_op_call_eval); + wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsImpossibleValue()))); } // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee. // This deliberately leaves the callee in ecx, used when setting up the stack frame below - emitGetVirtualRegister(callee, X86::ecx); + emitGetVirtualRegister(callee, regT2); DataLabelPtr addressOfLinkedFunctionCheck; - Jump jumpToSlow = jnePtrWithPatch(X86::ecx, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(jsImpossibleValue()))); + Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT2, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(jsImpossibleValue()))); addSlowCase(jumpToSlow); ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; @@ -226,25 +220,25 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca int proto = instruction[5].u.operand; int thisRegister = instruction[6].u.operand; - emitPutJITStubArg(X86::ecx, 1); - emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax); - emitCTICall(Interpreter::cti_op_construct_JSConstruct); + emitPutJITStubArg(regT2, 1); + emitPutJITStubArgFromVirtualRegister(proto, 4, regT0); + emitCTICall(JITStubs::cti_op_construct_JSConstruct); emitPutVirtualRegister(thisRegister); - emitGetVirtualRegister(callee, X86::ecx); + emitGetVirtualRegister(callee, regT2); } // Fast version of stack frame initialization, directly relative to edi. // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)))); - storePtr(X86::ecx, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)))); - loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain + storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)))); + loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)))); storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)))); - storePtr(X86::edx, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); + storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister); // Call to the callee - m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable)); + m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(0)); if (opcodeID == op_call_eval) wasEval.link(this); @@ -271,24 +265,24 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>: compileOpConstructSetupArgs(instruction); // Fast check for JS function. - Jump callLinkFailNotObject = emitJumpIfNotJSCell(X86::ecx); - Jump callLinkFailNotJSFunction = jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); + Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT2); + Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { - emitCTICall(Interpreter::cti_op_construct_JSConstruct); + emitCTICall(JITStubs::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); - emitGetVirtualRegister(callee, X86::ecx); + emitGetVirtualRegister(callee, regT2); } - move(Imm32(argCount), X86::edx); + move(Imm32(argCount), regT1); // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = - emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink); + emitNakedCall(m_globalData->jitStubs.ctiVirtualCallPreLink()); Jump storeResultForFirstRun = jump(); @@ -303,14 +297,14 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>: compileOpConstructSetupArgs(instruction); // Check for JSFunctions. - Jump isNotObject = emitJumpIfNotJSCell(X86::ecx); - Jump isJSFunction = jePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); + Jump isNotObject = emitJumpIfNotJSCell(regT2); + Jump isJSFunction = branchPtr(Equal, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)); // This handles host functions isNotObject.link(this); callLinkFailNotObject.link(this); callLinkFailNotJSFunction.link(this); - emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); + emitCTICall(((opcodeID == op_construct) ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction)); Jump wasNotJSFunction = jump(); // Next, handle JSFunctions... @@ -318,17 +312,17 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>: // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { - emitCTICall(Interpreter::cti_op_construct_JSConstruct); + emitCTICall(JITStubs::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); - emitGetVirtualRegister(callee, X86::ecx); + emitGetVirtualRegister(callee, regT2); } // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); - move(Imm32(argCount), X86::edx); + move(Imm32(argCount), regT1); - emitNakedCall(m_interpreter->m_ctiVirtualCall); + emitNakedCall(m_globalData->jitStubs.ctiVirtualCall()); // Put the return value in dst. In the interpreter, op_ret does this. wasNotJSFunction.link(this); diff --git a/JavaScriptCore/jit/JITCode.h b/JavaScriptCore/jit/JITCode.h new file mode 100644 index 0000000..0490d0e --- /dev/null +++ b/JavaScriptCore/jit/JITCode.h @@ -0,0 +1,97 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JITCode_h +#define JITCode_h + +#include <wtf/Platform.h> + +#if ENABLE(JIT) + +#include "CallFrame.h" +#include "JSValue.h" +#include "Profiler.h" + +namespace JSC { + + class JSGlobalData; + class RegisterFile; + + extern "C" { + JSValueEncodedAsPointer* ctiTrampoline( +#if PLATFORM(X86_64) + // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect). + // We can allow register passing here, and move the writes of these values into the trampoline. + void*, void*, void*, void*, void*, void*, +#endif + void* code, RegisterFile*, CallFrame*, JSValuePtr* exception, Profiler**, JSGlobalData*); + }; + + class JITCode { + public: + JITCode(void* code) + : code(code) + { + } + + operator bool() + { + return code != 0; + } + + void* addressForCall() + { + return code; + } + + // This function returns the offset in bytes of 'pointerIntoCode' into + // this block of code. The pointer provided must be a pointer into this + // block of code. It is ASSERTed that no codeblock >4gb in size. + unsigned offsetOf(void* pointerIntoCode) + { + intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(code); + ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result); + return static_cast<unsigned>(result); + } + + // Execute the code! + inline JSValuePtr execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValuePtr* exception) + { + return JSValuePtr::decode(ctiTrampoline( +#if PLATFORM(X86_64) + 0, 0, 0, 0, 0, 0, +#endif + code, registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData)); + } + + private: + void* code; + }; + +}; + +#endif + +#endif diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h index 7a97cd8..684c404 100644 --- a/JavaScriptCore/jit/JITInlineMethods.h +++ b/JavaScriptCore/jit/JITInlineMethods.h @@ -69,8 +69,8 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst) if (!atJumpTarget) { // The argument we want is already stored in eax - if (dst != X86::eax) - move(X86::eax, dst); + if (dst != cachedResultRegister) + move(cachedResultRegister, dst); killLastResultRegister(); return; } @@ -177,7 +177,7 @@ ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeader ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from) { storePtr(from, Address(callFrameRegister, dst * sizeof(Register))); - m_lastResultBytecodeRegister = (from == X86::eax) ? dst : std::numeric_limits<int>::max(); + m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max(); // FIXME: #ifndef NDEBUG, Write the correct m_type to the register. } @@ -187,20 +187,11 @@ ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst) // FIXME: #ifndef NDEBUG, Write the correct m_type to the register. } -ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(X86::RegisterID r) +ALWAYS_INLINE JIT::Call JIT::emitNakedCall(void* function) { ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. - Jump nakedCall = call(r); - m_calls.append(CallRecord(nakedCall, m_bytecodeIndex)); - return nakedCall; -} - -ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(void* function) -{ - ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. - - Jump nakedCall = call(); + Call nakedCall = nearCall(); m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function)); return nakedCall; } @@ -208,25 +199,21 @@ ALWAYS_INLINE JIT::Jump JIT::emitNakedCall(void* function) #if USE(JIT_STUB_ARGUMENT_REGISTER) ALWAYS_INLINE void JIT::restoreArgumentReference() { -#if PLATFORM(X86_64) - move(X86::esp, X86::edi); -#else - move(X86::esp, X86::ecx); -#endif + move(stackPointerRegister, firstArgumentRegister); emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame); } ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() { // In the trampoline on x86-64, the first argument register is not overwritten. #if !PLATFORM(X86_64) - move(X86::esp, X86::ecx); - addPtr(Imm32(sizeof(void*)), X86::ecx); + move(stackPointerRegister, firstArgumentRegister); + addPtr(Imm32(sizeof(void*)), firstArgumentRegister); #endif } #elif USE(JIT_STUB_ARGUMENT_STACK) ALWAYS_INLINE void JIT::restoreArgumentReference() { - storePtr(X86::esp, X86::esp); + poke(stackPointerRegister); emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame); } ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {} @@ -238,7 +225,7 @@ ALWAYS_INLINE void JIT::restoreArgumentReference() ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {} #endif -ALWAYS_INLINE JIT::Jump JIT::emitCTICall_internal(void* helper) +ALWAYS_INLINE JIT::Call JIT::emitCTICall_internal(void* helper) { ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. @@ -246,7 +233,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitCTICall_internal(void* helper) sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, true); #endif restoreArgumentReference(); - Jump ctiCall = call(); + Call ctiCall = call(); m_calls.append(CallRecord(ctiCall, m_bytecodeIndex, helper)); #if ENABLE(OPCODE_SAMPLING) sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, false); @@ -258,15 +245,15 @@ ALWAYS_INLINE JIT::Jump JIT::emitCTICall_internal(void* helper) ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure) { - return jnePtr(Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure)); + return branchPtr(NotEqual, Address(reg, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(structure)); } ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg) { #if USE(ALTERNATE_JSIMMEDIATE) - return jzPtr(reg, tagMaskRegister); + return branchTestPtr(Zero, reg, tagMaskRegister); #else - return jz32(reg, Imm32(JSImmediate::TagMask)); + return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask)); #endif } @@ -285,9 +272,9 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg) ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg) { #if USE(ALTERNATE_JSIMMEDIATE) - return jnzPtr(reg, tagMaskRegister); + return branchTestPtr(NonZero, reg, tagMaskRegister); #else - return jnz32(reg, Imm32(JSImmediate::TagMask)); + return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask)); #endif } @@ -311,29 +298,29 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& #if USE(ALTERNATE_JSIMMEDIATE) ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg) { - return jnzPtr(reg, tagTypeNumberRegister); + return branchTestPtr(NonZero, reg, tagTypeNumberRegister); } ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg) { - return jzPtr(reg, tagTypeNumberRegister); + return branchTestPtr(Zero, reg, tagTypeNumberRegister); } #endif ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg) { #if USE(ALTERNATE_JSIMMEDIATE) - return jaePtr(reg, tagTypeNumberRegister); + return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister); #else - return jnz32(reg, Imm32(JSImmediate::TagTypeNumber)); + return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber)); #endif } ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg) { #if USE(ALTERNATE_JSIMMEDIATE) - return jbPtr(reg, tagTypeNumberRegister); + return branchPtr(Below, reg, tagTypeNumberRegister); #else - return jz32(reg, Imm32(JSImmediate::TagTypeNumber)); + return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber)); #endif } @@ -362,7 +349,7 @@ ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg) ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg) { - return jzSubPtr(Imm32(JSImmediate::TagTypeNumber), reg); + return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg); } #endif diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp index 6740bec..ce90ee4 100644 --- a/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -53,11 +53,11 @@ void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. - emitGetVirtualRegister(baseVReg, X86::eax); + emitGetVirtualRegister(baseVReg, regT0); - emitPutJITStubArg(X86::eax, 1); + emitPutJITStubArg(regT0, 1); emitPutJITStubArgConstant(ident, 2); - emitCTICall(Interpreter::cti_op_get_by_id_generic); + emitCTICall(JITStubs::cti_op_get_by_id_generic); emitPutVirtualRegister(resultVReg); } @@ -73,12 +73,12 @@ void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code // such that the Structure & offset are always at the same distance from this. - emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx); + emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); emitPutJITStubArgConstant(ident, 2); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 3); - emitCTICall(Interpreter::cti_op_put_by_id_generic); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 3); + emitCTICall(JITStubs::cti_op_put_by_id_generic); } void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::iterator&, unsigned) @@ -95,21 +95,21 @@ void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsig // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. - emitGetVirtualRegister(baseVReg, X86::eax); + emitGetVirtualRegister(baseVReg, regT0); - emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg); + emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); Label hotPathBegin(this); m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; DataLabelPtr structureToCompare; - Jump structureCheck = jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); + Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); addSlowCase(structureCheck); ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure); ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(X86::eax, patchGetByIdDefaultOffset), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0); ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset); Label putResult(this); @@ -132,9 +132,9 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident #ifndef NDEBUG Label coldPathBegin(this); #endif - emitPutJITStubArg(X86::eax, 1); + emitPutJITStubArg(regT0, 1); emitPutJITStubArgConstant(ident, 2); - Jump call = emitCTICall(Interpreter::cti_op_get_by_id); + Call call = emitCTICall(JITStubs::cti_op_get_by_id); emitPutVirtualRegister(resultVReg); ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall); @@ -149,22 +149,22 @@ void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsign // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code // such that the Structure & offset are always at the same distance from this. - emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx); + emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); // Jump to a slow case if either the base object is an immediate, or if the Structure does not match. - emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg); + emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); Label hotPathBegin(this); m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. DataLabelPtr structureToCompare; - addSlowCase(jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); + addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure); // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(X86::edx, Address(X86::eax, patchGetByIdDefaultOffset)); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset)); ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset); } @@ -174,9 +174,9 @@ void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<Sl linkSlowCase(iter); emitPutJITStubArgConstant(ident, 2); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 3); - Jump call = emitCTICall(Interpreter::cti_op_put_by_id); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 3); + Call call = emitCTICall(JITStubs::cti_op_put_by_id); // Track the location of the call; this will be used to recover patch information. m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call; @@ -193,54 +193,55 @@ static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Str return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); } -void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress) +void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress) { JumpList failureCases; // Check eax is an object of the right Structure. - failureCases.append(emitJumpIfNotJSCell(X86::eax)); - failureCases.append(jnePtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure))); + failureCases.append(emitJumpIfNotJSCell(regT0)); + failureCases.append(branchPtr(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure))); JumpList successCases; // ecx = baseObject - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); // proto(ecx) = baseObject->structure()->prototype() - failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); + failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); - loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx); + loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2); // ecx = baseObject->m_structure for (RefPtr<Structure>* it = chain->head(); *it; ++it) { // null check the prototype - successCases.append(jePtr(X86::ecx, ImmPtr(JSValuePtr::encode(jsNull())))); + successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValuePtr::encode(jsNull())))); // Check the structure id - failureCases.append(jnePtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get()))); + failureCases.append(branchPtr(NotEqual, Address(regT2, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get()))); - loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); - loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx); + loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2); + failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); + loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2); } successCases.link(this); - Jump callTarget; + Call callTarget; // emit a call only if storage realloc is needed - if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) { + bool willNeedStorageRealloc = transitionWillNeedStorageRealloc(oldStructure, newStructure); + if (willNeedStorageRealloc) { pop(X86::ebx); #if PLATFORM(X86_64) - move(Imm32(newStructure->propertyStorageCapacity()), X86::edx); + move(Imm32(newStructure->propertyStorageCapacity()), regT1); move(Imm32(oldStructure->propertyStorageCapacity()), X86::esi); - move(X86::eax, X86::edi); + move(regT0, X86::edi); callTarget = call(); #else push(Imm32(newStructure->propertyStorageCapacity())); push(Imm32(oldStructure->propertyStorageCapacity())); - push(X86::eax); + push(regT0); callTarget = call(); addPtr(Imm32(3 * sizeof(void*)), X86::esp); #endif - emitGetJITStubArg(3, X86::edx); + emitGetJITStubArg(3, regT1); push(X86::ebx); } @@ -248,150 +249,144 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure // codeblock should ensure oldStructure->m_refCount > 0 sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount())); add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount())); - storePtr(ImmPtr(newStructure), Address(X86::eax, FIELD_OFFSET(JSCell, m_structure))); + storePtr(ImmPtr(newStructure), Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // write the value - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr))); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + storePtr(regT1, Address(regT0, cachedOffset * sizeof(JSValuePtr))); ret(); - Jump failureJump; - bool plantedFailureJump = false; - if (!failureCases.empty()) { - failureCases.link(this); - restoreArgumentReferenceForTrampoline(); - failureJump = jump(); - plantedFailureJump = true; - } + ASSERT(!failureCases.empty()); + failureCases.link(this); + restoreArgumentReferenceForTrampoline(); + Call failureCall = tailRecursiveCall(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); - if (plantedFailureJump) - patchBuffer.link(failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); + patchBuffer.link(failureCall, JITStubs::cti_op_put_by_id_fail); - if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) - patchBuffer.link(callTarget, reinterpret_cast<void*>(resizePropertyStorage)); - - stubInfo->stubRoutine = code; + if (willNeedStorageRealloc) + patchBuffer.link(callTarget, resizePropertyStorage); - Jump::patch(returnAddress, code); + stubInfo->stubRoutine = patchBuffer.entry(); + + returnAddress.relinkCallerToFunction(code); } -void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) +void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { // We don't want to patch more than once - in future go to cti_op_get_by_id_generic. - // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail)); + // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. + returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_self_fail); // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - void* structureAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdStructure); - void* displacementAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPropertyMapOffset); - DataLabelPtr::patch(structureAddress, structure); - DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr)); + stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure).repatch(structure); + stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset).repatch(cachedOffset * sizeof(JSValuePtr)); } -void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) +void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic)); + // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now. + returnAddress.relinkCallerToFunction(JITStubs::cti_op_put_by_id_generic); // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - void* structureAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdStructure; - void* displacementAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdPropertyMapOffset; - DataLabelPtr::patch(structureAddress, structure); - DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr)); + stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure).repatch(structure); + stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset).repatch(cachedOffset * sizeof(JSValuePtr)); } -void JIT::privateCompilePatchGetArrayLength(void* returnAddress) +void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress) { StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); + returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_array_fail); // Check eax is an array - Jump failureCases1 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr)); + Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)); // Checks out okay! - get the length from the storage - loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx); - load32(Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_length)), X86::ecx); + loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); + load32(Address(regT2, FIELD_OFFSET(ArrayStorage, m_length)), regT2); - Jump failureCases2 = ja32(X86::ecx, Imm32(JSImmediate::maxImmediateInt)); + Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt)); - emitFastArithIntToImmNoCheck(X86::ecx, X86::eax); + emitFastArithIntToImmNoCheck(regT2, regT0); Jump success = jump(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall; + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); patchBuffer.link(failureCases1, slowCaseBegin); patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - void* hotPathPutResult = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, hotPathPutResult); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = code; + CodeLocationLabel entryLabel = patchBuffer.entry(); + stubInfo->stubRoutine = entryLabel; - // Finally patch the jump to sow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + // Finally patch the jump to slow case back in the hot path to jump here instead. + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); } -void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) +void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { // Check eax is an object of the right Structure. - Jump failureCases1 = emitJumpIfNotJSCell(X86::eax); - Jump failureCases2 = checkStructure(X86::eax, structure); + Jump failureCases1 = emitJumpIfNotJSCell(regT0); + Jump failureCases2 = checkStructure(regT0, structure); // Checks out okay! - getDirectOffset - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + loadPtr(Address(regT0, cachedOffset * sizeof(JSValuePtr)), regT0); ret(); + Call failureCases1Call = makeTailRecursiveCall(failureCases1); + Call failureCases2Call = makeTailRecursiveCall(failureCases2); + void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); - patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail)); - patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail)); + patchBuffer.link(failureCases1Call, JITStubs::cti_op_get_by_id_self_fail); + patchBuffer.link(failureCases2Call, JITStubs::cti_op_get_by_id_self_fail); - stubInfo->stubRoutine = code; + stubInfo->stubRoutine = patchBuffer.entry(); - Jump::patch(returnAddress, code); + returnAddress.relinkCallerToFunction(code); } -void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) +void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame) { #if USE(CTI_REPATCH_PIC) // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); + returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list); // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(static_cast<void*>(protoPropertyStorage), X86::edx); + loadPtr(static_cast<void*>(protoPropertyStorage), regT1); // Check eax is an object of the right Structure. - Jump failureCases1 = checkStructure(X86::eax, structure); + Jump failureCases1 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(prototypeStructure), X86::ebx); - Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)); + move(ImmPtr(prototypeStructure), regT3); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); #else - Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); #endif // Checks out okay! - getDirectOffset - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); @@ -399,59 +394,59 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall; + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); patchBuffer.link(failureCases1, slowCaseBegin); patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = code; + CodeLocationLabel entryLabel = patchBuffer.entry(); + stubInfo->stubRoutine = entryLabel; // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); #else // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); + loadPtr(protoPropertyStorage, regT1); // Check eax is an object of the right Structure. - Jump failureCases1 = emitJumpIfNotJSCell(X86::eax); - Jump failureCases2 = checkStructure(X86::eax, structure); + Jump failureCases1 = emitJumpIfNotJSCell(regT0); + Jump failureCases2 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); - Jump failureCases3 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); + Jump failureCases3 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); // Checks out okay! - getDirectOffset - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); ret(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); - patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); - patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); - patchBuffer.link(failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); + patchBuffer.link(failureCases1, JITStubs::cti_op_get_by_id_proto_fail); + patchBuffer.link(failureCases2, JITStubs::cti_op_get_by_id_proto_fail); + patchBuffer.link(failureCases3, JITStubs::cti_op_get_by_id_proto_fail); - stubInfo->stubRoutine = code; + stubInfo->stubRoutine = patchBuffer.entry(); - Jump::patch(returnAddress, code); + returnAddress.relinkCallerToFunction(code); #endif } #if USE(CTI_REPATCH_PIC) void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) { - Jump failureCase = checkStructure(X86::eax, structure); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax); + Jump failureCase = checkStructure(regT0, structure); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + loadPtr(Address(regT0, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); @@ -459,22 +454,23 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; + CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; if (!lastProtoBegin) - lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall; + lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); patchBuffer.link(failureCase, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + CodeLocationLabel entryLabel = patchBuffer.entry(); structure->ref(); - polymorphicStructures->list[currentIndex].set(code, structure); + polymorphicStructures->list[currentIndex].set(entryLabel, structure); // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); } void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) @@ -483,22 +479,22 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); + loadPtr(protoPropertyStorage, regT1); // Check eax is an object of the right Structure. - Jump failureCases1 = checkStructure(X86::eax, structure); + Jump failureCases1 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(prototypeStructure), X86::ebx); - Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)); + move(ImmPtr(prototypeStructure), regT3); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); #else - Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); #endif // Checks out okay! - getDirectOffset - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); @@ -506,21 +502,22 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; + CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; patchBuffer.link(failureCases1, lastProtoBegin); patchBuffer.link(failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + CodeLocationLabel entryLabel = patchBuffer.entry(); structure->ref(); prototypeStructure->ref(); - prototypeStructures->list[currentIndex].set(code, structure, prototypeStructure); + prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure); // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); } void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) @@ -530,7 +527,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi JumpList bucketsOfFail; // Check eax is an object of the right Structure. - Jump baseObjectCheck = checkStructure(X86::eax, structure); + Jump baseObjectCheck = checkStructure(regT0, structure); bucketsOfFail.append(baseObjectCheck); Structure* currStructure = structure; @@ -543,54 +540,55 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(currStructure), X86::ebx); - bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress))); + move(ImmPtr(currStructure), regT3); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); #else - bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); #endif } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(protoPropertyStorage, regT1); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; + CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; patchBuffer.link(bucketsOfFail, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + CodeLocationLabel entryLabel = patchBuffer.entry(); // Track the stub we have created so that it will be deleted later. structure->ref(); chain->ref(); - prototypeStructures->list[currentIndex].set(code, structure, chain); + prototypeStructures->list[currentIndex].set(entryLabel, structure, chain); // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); } #endif -void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) +void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame) { #if USE(CTI_REPATCH_PIC) // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); + returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list); ASSERT(count); JumpList bucketsOfFail; // Check eax is an object of the right Structure. - bucketsOfFail.append(checkStructure(X86::eax, structure)); + bucketsOfFail.append(checkStructure(regT0, structure)); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); @@ -602,45 +600,43 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(currStructure), X86::ebx); - bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress))); + move(ImmPtr(currStructure), regT3); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); #else - bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); #endif } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(protoPropertyStorage, regT1); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall; - - patchBuffer.link(bucketsOfFail, slowCaseBegin); + patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = code; + CodeLocationLabel entryLabel = patchBuffer.entry(); + stubInfo->stubRoutine = entryLabel; // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); #else ASSERT(count); JumpList bucketsOfFail; // Check eax is an object of the right Structure. - bucketsOfFail.append(emitJumpIfNotJSCell(X86::eax)); - bucketsOfFail.append(checkStructure(X86::eax, structure)); + bucketsOfFail.append(emitJumpIfNotJSCell(regT0)); + bucketsOfFail.append(checkStructure(regT0, structure)); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); @@ -652,49 +648,52 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(currStructure), X86::ebx); - bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress))); + move(ImmPtr(currStructure), regT3); + bucketsOfFail.append(branchPtr(NotEqual, regT3, AbsoluteAddress(prototypeStructureAddress))); #else - bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); #endif } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(protoPropertyStorage, regT1); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); ret(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); - patchBuffer.link(bucketsOfFail, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); + patchBuffer.link(bucketsOfFail, JITStubs::cti_op_get_by_id_proto_fail); - stubInfo->stubRoutine = code; + stubInfo->stubRoutine = patchBuffer.entry(); - Jump::patch(returnAddress, code); + returnAddress.relinkCallerToFunction(code); #endif } -void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) +void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { // Check eax is an object of the right Structure. - Jump failureCases1 = emitJumpIfNotJSCell(X86::eax); - Jump failureCases2 = checkStructure(X86::eax, structure); + Jump failureCases1 = emitJumpIfNotJSCell(regT0); + Jump failureCases2 = checkStructure(regT0, structure); // checks out okay! - putDirectOffset - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr))); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + storePtr(regT1, Address(regT0, cachedOffset * sizeof(JSValuePtr))); ret(); + Call failureCases1Call = makeTailRecursiveCall(failureCases1); + Call failureCases2Call = makeTailRecursiveCall(failureCases2); + void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); - patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); - patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); + patchBuffer.link(failureCases1Call, JITStubs::cti_op_put_by_id_fail); + patchBuffer.link(failureCases2Call, JITStubs::cti_op_put_by_id_fail); - stubInfo->stubRoutine = code; + stubInfo->stubRoutine = patchBuffer.entry(); - Jump::patch(returnAddress, code); + returnAddress.relinkCallerToFunction(code); } #endif diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp new file mode 100644 index 0000000..de528a5 --- /dev/null +++ b/JavaScriptCore/jit/JITStubs.cpp @@ -0,0 +1,2196 @@ +/* + * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JITStubs.h" + +#if ENABLE(JIT) + +#include "Arguments.h" +#include "CallFrame.h" +#include "CodeBlock.h" +#include "Collector.h" +#include "Debugger.h" +#include "ExceptionHelpers.h" +#include "GlobalEvalFunction.h" +#include "JIT.h" +#include "JSActivation.h" +#include "JSArray.h" +#include "JSByteArray.h" +#include "JSFunction.h" +#include "JSNotAnObject.h" +#include "JSPropertyNameIterator.h" +#include "JSStaticScopeObject.h" +#include "JSString.h" +#include "ObjectPrototype.h" +#include "Operations.h" +#include "Parser.h" +#include "Profiler.h" +#include "RegExpObject.h" +#include "RegExpPrototype.h" +#include "Register.h" +#include "SamplingTool.h" +#include <stdio.h> + +using namespace std; + +namespace JSC { + +#if ENABLE(OPCODE_SAMPLING) + #define CTI_SAMPLER ARG_globalData->interpreter->sampler() +#else + #define CTI_SAMPLER 0 +#endif + +JITStubs::JITStubs(JSGlobalData* globalData) + : m_ctiArrayLengthTrampoline(0) + , m_ctiStringLengthTrampoline(0) + , m_ctiVirtualCallPreLink(0) + , m_ctiVirtualCallLink(0) + , m_ctiVirtualCall(0) +{ + JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiArrayLengthTrampoline, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallPreLink, &m_ctiVirtualCallLink, &m_ctiVirtualCall); +} + +#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + +NEVER_INLINE void JITStubs::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValuePtr baseValue, const PutPropertySlot& slot) +{ + // The interpreter checks for recursion here; I do not believe this can occur in CTI. + + if (!baseValue.isCell()) + return; + + // Uncacheable: give up. + if (!slot.isCacheable()) { + ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_put_by_id_generic)); + return; + } + + JSCell* baseCell = asCell(baseValue); + Structure* structure = baseCell->structure(); + + if (structure->isDictionary()) { + ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_put_by_id_generic)); + return; + } + + // If baseCell != base, then baseCell must be a proxy for another object. + if (baseCell != slot.base()) { + ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_put_by_id_generic)); + return; + } + + StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress); + + // Cache hit: Specialize instruction and ref Structures. + + // Structure transition, cache transition info + if (slot.type() == PutPropertySlot::NewProperty) { + StructureChain* prototypeChain = structure->prototypeChain(callFrame); + stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain); + JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress); + return; + } + + stubInfo->initPutByIdReplace(structure); + +#if USE(CTI_REPATCH_PIC) + JIT::patchPutByIdReplace(stubInfo, structure, slot.cachedOffset(), returnAddress); +#else + JIT::compilePutByIdReplace(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress); +#endif +} + +NEVER_INLINE void JITStubs::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValuePtr baseValue, const Identifier& propertyName, const PropertySlot& slot) +{ + // FIXME: Write a test that proves we need to check for recursion here just + // like the interpreter does, then add a check for recursion. + + // FIXME: Cache property access for immediates. + if (!baseValue.isCell()) { + ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_get_by_id_generic)); + return; + } + + JSGlobalData* globalData = &callFrame->globalData(); + + if (isJSArray(globalData, baseValue) && propertyName == callFrame->propertyNames().length) { +#if USE(CTI_REPATCH_PIC) + JIT::compilePatchGetArrayLength(callFrame->scopeChain()->globalData, codeBlock, returnAddress); +#else + ctiPatchCallByReturnAddress(returnAddress, globalData->jitStubs.ctiArrayLengthTrampoline()); +#endif + return; + } + + if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) { + // The tradeoff of compiling an patched inline string length access routine does not seem + // to pay off, so we currently only do this for arrays. + ctiPatchCallByReturnAddress(returnAddress, globalData->jitStubs.ctiStringLengthTrampoline()); + return; + } + + // Uncacheable: give up. + if (!slot.isCacheable()) { + ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_get_by_id_generic)); + return; + } + + JSCell* baseCell = asCell(baseValue); + Structure* structure = baseCell->structure(); + + if (structure->isDictionary()) { + ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_get_by_id_generic)); + return; + } + + // In the interpreter the last structure is trapped here; in CTI we use the + // *_second method to achieve a similar (but not quite the same) effect. + + StructureStubInfo* stubInfo = &codeBlock->getStubInfo(returnAddress); + + // Cache hit: Specialize instruction and ref Structures. + + if (slot.slotBase() == baseValue) { + // set this up, so derefStructures can do it's job. + stubInfo->initGetByIdSelf(structure); + +#if USE(CTI_REPATCH_PIC) + JIT::patchGetByIdSelf(stubInfo, structure, slot.cachedOffset(), returnAddress); +#else + JIT::compileGetByIdSelf(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress); +#endif + return; + } + + if (slot.slotBase() == structure->prototypeForLookup(callFrame)) { + ASSERT(slot.slotBase().isObject()); + + JSObject* slotBaseObject = asObject(slot.slotBase()); + + // Since we're accessing a prototype in a loop, it's a good bet that it + // should not be treated as a dictionary. + if (slotBaseObject->structure()->isDictionary()) + slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure())); + + stubInfo->initGetByIdProto(structure, slotBaseObject->structure()); + + JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), slot.cachedOffset(), returnAddress); + return; + } + + size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot); + if (!count) { + stubInfo->opcodeID = op_get_by_id_generic; + return; + } + + StructureChain* prototypeChain = structure->prototypeChain(callFrame); + stubInfo->initGetByIdChain(structure, prototypeChain); + JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, slot.cachedOffset(), returnAddress); +} + +#endif + +#if USE(JIT_STUB_ARGUMENT_VA_LIST) +#define SETUP_VA_LISTL_ARGS va_list vl_args; va_start(vl_args, args) +#else // JIT_STUB_ARGUMENT_REGISTER or JIT_STUB_ARGUMENT_STACK +#define SETUP_VA_LISTL_ARGS +#endif + +#ifndef NDEBUG + +extern "C" { + +static void jscGeneratedNativeCode() +{ + // When executing a CTI function (which might do an allocation), we hack the return address + // to pretend to be executing this function, to keep stack logging tools from blowing out + // memory. +} + +} + +struct StackHack { + ALWAYS_INLINE StackHack(void** location) + { + returnAddressLocation = location; + savedReturnAddress = *returnAddressLocation; + ctiSetReturnAddress(returnAddressLocation, reinterpret_cast<void*>(jscGeneratedNativeCode)); + } + ALWAYS_INLINE ~StackHack() + { + ctiSetReturnAddress(returnAddressLocation, savedReturnAddress); + } + + void** returnAddressLocation; + void* savedReturnAddress; +}; + +#define BEGIN_STUB_FUNCTION() SETUP_VA_LISTL_ARGS; StackHack stackHack(&STUB_RETURN_ADDRESS_SLOT) +#define STUB_SET_RETURN_ADDRESS(address) stackHack.savedReturnAddress = address +#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress + +#else + +#define BEGIN_STUB_FUNCTION() SETUP_VA_LISTL_ARGS +#define STUB_SET_RETURN_ADDRESS(address) ctiSetReturnAddress(&STUB_RETURN_ADDRESS_SLOT, address); +#define STUB_RETURN_ADDRESS STUB_RETURN_ADDRESS_SLOT + +#endif + +// The reason this is not inlined is to avoid having to do a PIC branch +// to get the address of the ctiVMThrowTrampoline function. It's also +// good to keep the code size down by leaving as much of the exception +// handling code out of line as possible. +static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, void* exceptionLocation, void*& returnAddressSlot) +{ + ASSERT(globalData->exception); + globalData->exceptionLocation = exceptionLocation; + ctiSetReturnAddress(&returnAddressSlot, reinterpret_cast<void*>(ctiVMThrowTrampoline)); +} + +static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, void* exceptionLocation, void*& returnAddressSlot) +{ + globalData->exception = createStackOverflowError(callFrame); + returnToThrowTrampoline(globalData, exceptionLocation, returnAddressSlot); +} + +#define VM_THROW_EXCEPTION() \ + do { \ + VM_THROW_EXCEPTION_AT_END(); \ + return 0; \ + } while (0) +#define VM_THROW_EXCEPTION_2() \ + do { \ + VM_THROW_EXCEPTION_AT_END(); \ + RETURN_PAIR(0, 0); \ + } while (0) +#define VM_THROW_EXCEPTION_AT_END() \ + returnToThrowTrampoline(ARG_globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS) + +#define CHECK_FOR_EXCEPTION() \ + do { \ + if (UNLIKELY(ARG_globalData->exception != noValue())) \ + VM_THROW_EXCEPTION(); \ + } while (0) +#define CHECK_FOR_EXCEPTION_AT_END() \ + do { \ + if (UNLIKELY(ARG_globalData->exception != noValue())) \ + VM_THROW_EXCEPTION_AT_END(); \ + } while (0) +#define CHECK_FOR_EXCEPTION_VOID() \ + do { \ + if (UNLIKELY(ARG_globalData->exception != noValue())) { \ + VM_THROW_EXCEPTION_AT_END(); \ + return; \ + } \ + } while (0) + +JSObject* JITStubs::cti_op_convert_this(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr v1 = ARG_src1; + CallFrame* callFrame = ARG_callFrame; + + JSObject* result = v1.toThisObject(callFrame); + CHECK_FOR_EXCEPTION_AT_END(); + return result; +} + +void JITStubs::cti_op_end(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + ScopeChainNode* scopeChain = ARG_callFrame->scopeChain(); + ASSERT(scopeChain->refCount > 1); + scopeChain->deref(); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_add(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr v1 = ARG_src1; + JSValuePtr v2 = ARG_src2; + + double left; + double right = 0.0; + + bool rightIsNumber = v2.getNumber(right); + if (rightIsNumber && v1.getNumber(left)) + return JSValuePtr::encode(jsNumber(ARG_globalData, left + right)); + + CallFrame* callFrame = ARG_callFrame; + + bool leftIsString = v1.isString(); + if (leftIsString && v2.isString()) { + RefPtr<UString::Rep> value = concatenate(asString(v1)->value().rep(), asString(v2)->value().rep()); + if (UNLIKELY(!value)) { + throwOutOfMemoryError(callFrame); + VM_THROW_EXCEPTION(); + } + + return JSValuePtr::encode(jsString(ARG_globalData, value.release())); + } + + if (rightIsNumber & leftIsString) { + RefPtr<UString::Rep> value = v2.isInt32Fast() ? + concatenate(asString(v1)->value().rep(), v2.getInt32Fast()) : + concatenate(asString(v1)->value().rep(), right); + + if (UNLIKELY(!value)) { + throwOutOfMemoryError(callFrame); + VM_THROW_EXCEPTION(); + } + return JSValuePtr::encode(jsString(ARG_globalData, value.release())); + } + + // All other cases are pretty uncommon + JSValuePtr result = jsAddSlowCase(callFrame, v1, v2); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_pre_inc(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr v = ARG_src1; + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, v.toNumber(callFrame) + 1); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +int JITStubs::cti_timeout_check(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSGlobalData* globalData = ARG_globalData; + TimeoutChecker& timeoutChecker = globalData->timeoutChecker; + + if (timeoutChecker.didTimeOut(ARG_callFrame)) { + globalData->exception = createInterruptedExecutionException(globalData); + VM_THROW_EXCEPTION_AT_END(); + } + + return timeoutChecker.ticksUntilNextCheck(); +} + +void JITStubs::cti_register_file_check(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + if (LIKELY(ARG_registerFile->grow(ARG_callFrame + ARG_callFrame->codeBlock()->m_numCalleeRegisters))) + return; + + // Rewind to the previous call frame because op_call already optimistically + // moved the call frame forward. + CallFrame* oldCallFrame = ARG_callFrame->callerFrame(); + ARG_setCallFrame(oldCallFrame); + throwStackOverflowError(oldCallFrame, ARG_globalData, oldCallFrame->returnPC(), STUB_RETURN_ADDRESS); +} + +int JITStubs::cti_op_loop_if_less(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + CallFrame* callFrame = ARG_callFrame; + + bool result = jsLess(callFrame, src1, src2); + CHECK_FOR_EXCEPTION_AT_END(); + return result; +} + +int JITStubs::cti_op_loop_if_lesseq(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + CallFrame* callFrame = ARG_callFrame; + + bool result = jsLessEq(callFrame, src1, src2); + CHECK_FOR_EXCEPTION_AT_END(); + return result; +} + +JSObject* JITStubs::cti_op_new_object(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return constructEmptyObject(ARG_callFrame); +} + +void JITStubs::cti_op_put_by_id_generic(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + PutPropertySlot slot; + ARG_src1.put(ARG_callFrame, *ARG_id2, ARG_src3, slot); + CHECK_FOR_EXCEPTION_AT_END(); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_generic(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + Identifier& ident = *ARG_id2; + + JSValuePtr baseValue = ARG_src1; + PropertySlot slot(baseValue); + JSValuePtr result = baseValue.get(callFrame, ident, slot); + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + +void JITStubs::cti_op_put_by_id(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + Identifier& ident = *ARG_id2; + + PutPropertySlot slot; + ARG_src1.put(callFrame, ident, ARG_src3, slot); + + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_put_by_id_second)); + + CHECK_FOR_EXCEPTION_AT_END(); +} + +void JITStubs::cti_op_put_by_id_second(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + PutPropertySlot slot; + ARG_src1.put(ARG_callFrame, *ARG_id2, ARG_src3, slot); + tryCachePutByID(ARG_callFrame, ARG_callFrame->codeBlock(), STUB_RETURN_ADDRESS, ARG_src1, slot); + CHECK_FOR_EXCEPTION_AT_END(); +} + +void JITStubs::cti_op_put_by_id_fail(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + Identifier& ident = *ARG_id2; + + PutPropertySlot slot; + ARG_src1.put(callFrame, ident, ARG_src3, slot); + + CHECK_FOR_EXCEPTION_AT_END(); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + Identifier& ident = *ARG_id2; + + JSValuePtr baseValue = ARG_src1; + PropertySlot slot(baseValue); + JSValuePtr result = baseValue.get(callFrame, ident, slot); + + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_second)); + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_second(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + Identifier& ident = *ARG_id2; + + JSValuePtr baseValue = ARG_src1; + PropertySlot slot(baseValue); + JSValuePtr result = baseValue.get(callFrame, ident, slot); + + tryCacheGetByID(callFrame, callFrame->codeBlock(), STUB_RETURN_ADDRESS, baseValue, ident, slot); + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_self_fail(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + Identifier& ident = *ARG_id2; + + JSValuePtr baseValue = ARG_src1; + PropertySlot slot(baseValue); + JSValuePtr result = baseValue.get(callFrame, ident, slot); + + CHECK_FOR_EXCEPTION(); + + if (baseValue.isCell() + && slot.isCacheable() + && !asCell(baseValue)->structure()->isDictionary() + && slot.slotBase() == baseValue) { + + CodeBlock* codeBlock = callFrame->codeBlock(); + StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS); + + ASSERT(slot.slotBase().isObject()); + + PolymorphicAccessStructureList* polymorphicStructureList; + int listIndex = 1; + + if (stubInfo->opcodeID == op_get_by_id_self) { + ASSERT(!stubInfo->stubRoutine); + polymorphicStructureList = new PolymorphicAccessStructureList(MacroAssembler::CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure); + stubInfo->initGetByIdSelfList(polymorphicStructureList, 2); + } else { + polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList; + listIndex = stubInfo->u.getByIdSelfList.listSize; + stubInfo->u.getByIdSelfList.listSize++; + } + + JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, asCell(baseValue)->structure(), slot.cachedOffset()); + + if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1)) + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_generic)); + } else { + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_generic)); + } + return JSValuePtr::encode(result); +} + +static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(StructureStubInfo* stubInfo, int& listIndex) +{ + PolymorphicAccessStructureList* prototypeStructureList = 0; + listIndex = 1; + + switch (stubInfo->opcodeID) { + case op_get_by_id_proto: + prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure, stubInfo->u.getByIdProto.prototypeStructure); + stubInfo->stubRoutine.reset(); + stubInfo->initGetByIdProtoList(prototypeStructureList, 2); + break; + case op_get_by_id_chain: + prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain); + stubInfo->stubRoutine.reset(); + stubInfo->initGetByIdProtoList(prototypeStructureList, 2); + break; + case op_get_by_id_proto_list: + prototypeStructureList = stubInfo->u.getByIdProtoList.structureList; + listIndex = stubInfo->u.getByIdProtoList.listSize; + stubInfo->u.getByIdProtoList.listSize++; + break; + default: + ASSERT_NOT_REACHED(); + } + + ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE); + return prototypeStructureList; +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_list(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + + JSValuePtr baseValue = ARG_src1; + PropertySlot slot(baseValue); + JSValuePtr result = baseValue.get(callFrame, *ARG_id2, slot); + + CHECK_FOR_EXCEPTION(); + + if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) { + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_fail)); + return JSValuePtr::encode(result); + } + + Structure* structure = asCell(baseValue)->structure(); + CodeBlock* codeBlock = callFrame->codeBlock(); + StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS); + + ASSERT(slot.slotBase().isObject()); + JSObject* slotBaseObject = asObject(slot.slotBase()); + + if (slot.slotBase() == baseValue) + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_fail)); + else if (slot.slotBase() == asCell(baseValue)->structure()->prototypeForLookup(callFrame)) { + // Since we're accessing a prototype in a loop, it's a good bet that it + // should not be treated as a dictionary. + if (slotBaseObject->structure()->isDictionary()) + slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure())); + + int listIndex; + PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex); + + JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), slot.cachedOffset()); + + if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1)) + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_list_full)); + } else if (size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot)) { + int listIndex; + PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex); + JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, structure->prototypeChain(callFrame), count, slot.cachedOffset()); + + if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1)) + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_list_full)); + } else + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_fail)); + + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_list_full(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr baseValue = ARG_src1; + PropertySlot slot(baseValue); + JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot); + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_fail(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr baseValue = ARG_src1; + PropertySlot slot(baseValue); + JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot); + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_array_fail(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr baseValue = ARG_src1; + PropertySlot slot(baseValue); + JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot); + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_string_fail(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr baseValue = ARG_src1; + PropertySlot slot(baseValue); + JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot); + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +#endif + +JSValueEncodedAsPointer* JITStubs::cti_op_instanceof(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr value = ARG_src1; + JSValuePtr baseVal = ARG_src2; + JSValuePtr proto = ARG_src3; + + // at least one of these checks must have failed to get to the slow case + ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell() + || !value.isObject() || !baseVal.isObject() || !proto.isObject() + || (asObject(baseVal)->structure()->typeInfo().flags() & (ImplementsHasInstance | OverridesHasInstance)) != ImplementsHasInstance); + + if (!baseVal.isObject()) { + CallFrame* callFrame = ARG_callFrame; + CodeBlock* codeBlock = callFrame->codeBlock(); + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + ARG_globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal, vPCIndex, codeBlock); + VM_THROW_EXCEPTION(); + } + + JSObject* baseObj = asObject(baseVal); + TypeInfo typeInfo = baseObj->structure()->typeInfo(); + if (!typeInfo.implementsHasInstance()) + return JSValuePtr::encode(jsBoolean(false)); + + if (!typeInfo.overridesHasInstance()) { + if (!proto.isObject()) { + throwError(callFrame, TypeError, "instanceof called on an object with an invalid prototype property."); + VM_THROW_EXCEPTION(); + } + + if (!value.isObject()) + return JSValuePtr::encode(jsBoolean(false)); + } + + JSValuePtr result = jsBoolean(baseObj->hasInstance(callFrame, value, proto)); + CHECK_FOR_EXCEPTION_AT_END(); + + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_del_by_id(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + + JSObject* baseObj = ARG_src1.toObject(callFrame); + + JSValuePtr result = jsBoolean(baseObj->deleteProperty(callFrame, *ARG_id2)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_mul(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + double left; + double right; + if (src1.getNumber(left) && src2.getNumber(right)) + return JSValuePtr::encode(jsNumber(ARG_globalData, left * right)); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, src1.toNumber(callFrame) * src2.toNumber(callFrame)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSObject* JITStubs::cti_op_new_func(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return ARG_func1->makeFunction(ARG_callFrame, ARG_callFrame->scopeChain()); +} + +void* JITStubs::cti_op_call_JSFunction(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + +#ifndef NDEBUG + CallData callData; + ASSERT(ARG_src1.getCallData(callData) == CallTypeJS); +#endif + + ScopeChainNode* callDataScopeChain = asFunction(ARG_src1)->scope().node(); + CodeBlock* newCodeBlock = &asFunction(ARG_src1)->body()->bytecode(callDataScopeChain); + + if (!newCodeBlock->jitCode()) + JIT::compile(ARG_globalData, newCodeBlock); + + return newCodeBlock; +} + +VoidPtrPair JITStubs::cti_op_call_arityCheck(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + CodeBlock* newCodeBlock = ARG_codeBlock4; + int argCount = ARG_int3; + + ASSERT(argCount != newCodeBlock->m_numParameters); + + CallFrame* oldCallFrame = callFrame->callerFrame(); + + if (argCount > newCodeBlock->m_numParameters) { + size_t numParameters = newCodeBlock->m_numParameters; + Register* r = callFrame->registers() + numParameters; + + Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount; + for (size_t i = 0; i < numParameters; ++i) + argv[i + argCount] = argv[i]; + + callFrame = CallFrame::create(r); + callFrame->setCallerFrame(oldCallFrame); + } else { + size_t omittedArgCount = newCodeBlock->m_numParameters - argCount; + Register* r = callFrame->registers() + omittedArgCount; + Register* newEnd = r + newCodeBlock->m_numCalleeRegisters; + if (!ARG_registerFile->grow(newEnd)) { + // Rewind to the previous call frame because op_call already optimistically + // moved the call frame forward. + ARG_setCallFrame(oldCallFrame); + throwStackOverflowError(oldCallFrame, ARG_globalData, ARG_returnAddress2, STUB_RETURN_ADDRESS); + RETURN_PAIR(0, 0); + } + + Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount; + for (size_t i = 0; i < omittedArgCount; ++i) + argv[i] = jsUndefined(); + + callFrame = CallFrame::create(r); + callFrame->setCallerFrame(oldCallFrame); + } + + RETURN_PAIR(newCodeBlock, callFrame); +} + +void* JITStubs::cti_vm_dontLazyLinkCall(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSGlobalData* globalData = ARG_globalData; + JSFunction* callee = asFunction(ARG_src1); + CodeBlock* codeBlock = &callee->body()->bytecode(callee->scope().node()); + if (!codeBlock->jitCode()) + JIT::compile(globalData, codeBlock); + + ctiPatchNearCallByReturnAddress(ARG_returnAddress2, globalData->jitStubs.ctiVirtualCallLink()); + + return codeBlock->jitCode().addressForCall(); +} + +void* JITStubs::cti_vm_lazyLinkCall(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSFunction* callee = asFunction(ARG_src1); + CodeBlock* codeBlock = &callee->body()->bytecode(callee->scope().node()); + if (!codeBlock->jitCode()) + JIT::compile(ARG_globalData, codeBlock); + + CallLinkInfo* callLinkInfo = &ARG_callFrame->callerFrame()->codeBlock()->getCallLinkInfo(ARG_returnAddress2); + JIT::linkCall(callee, codeBlock, codeBlock->jitCode(), callLinkInfo, ARG_int3); + + return codeBlock->jitCode().addressForCall(); +} + +JSObject* JITStubs::cti_op_push_activation(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSActivation* activation = new (ARG_globalData) JSActivation(ARG_callFrame, static_cast<FunctionBodyNode*>(ARG_callFrame->codeBlock()->ownerNode())); + ARG_callFrame->setScopeChain(ARG_callFrame->scopeChain()->copy()->push(activation)); + return activation; +} + +JSValueEncodedAsPointer* JITStubs::cti_op_call_NotJSFunction(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr funcVal = ARG_src1; + + CallData callData; + CallType callType = funcVal.getCallData(callData); + + ASSERT(callType != CallTypeJS); + + if (callType == CallTypeHost) { + int registerOffset = ARG_int2; + int argCount = ARG_int3; + CallFrame* previousCallFrame = ARG_callFrame; + CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset); + + callFrame->init(0, static_cast<Instruction*>(STUB_RETURN_ADDRESS), previousCallFrame->scopeChain(), previousCallFrame, 0, argCount, 0); + ARG_setCallFrame(callFrame); + + Register* argv = ARG_callFrame->registers() - RegisterFile::CallFrameHeaderSize - argCount; + ArgList argList(argv + 1, argCount - 1); + + JSValuePtr returnValue; + { + SamplingTool::HostCallRecord callRecord(CTI_SAMPLER); + + // FIXME: All host methods should be calling toThisObject, but this is not presently the case. + JSValuePtr thisValue = argv[0].jsValue(callFrame); + if (thisValue == jsNull()) + thisValue = callFrame->globalThisValue(); + + returnValue = callData.native.function(callFrame, asObject(funcVal), thisValue, argList); + } + ARG_setCallFrame(previousCallFrame); + CHECK_FOR_EXCEPTION(); + + return JSValuePtr::encode(returnValue); + } + + ASSERT(callType == CallTypeNone); + + CallFrame* callFrame = ARG_callFrame; + CodeBlock* codeBlock = callFrame->codeBlock(); + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + ARG_globalData->exception = createNotAFunctionError(ARG_callFrame, funcVal, vPCIndex, codeBlock); + VM_THROW_EXCEPTION(); +} + +void JITStubs::cti_op_create_arguments(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + Arguments* arguments = new (ARG_globalData) Arguments(ARG_callFrame); + ARG_callFrame->setCalleeArguments(arguments); + ARG_callFrame[RegisterFile::ArgumentsRegister] = arguments; +} + +void JITStubs::cti_op_create_arguments_no_params(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + Arguments* arguments = new (ARG_globalData) Arguments(ARG_callFrame, Arguments::NoParameters); + ARG_callFrame->setCalleeArguments(arguments); + ARG_callFrame[RegisterFile::ArgumentsRegister] = arguments; +} + +void JITStubs::cti_op_tear_off_activation(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + ASSERT(ARG_callFrame->codeBlock()->needsFullScopeChain()); + asActivation(ARG_src1)->copyRegisters(ARG_callFrame->optionalCalleeArguments()); +} + +void JITStubs::cti_op_tear_off_arguments(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + ASSERT(ARG_callFrame->codeBlock()->usesArguments() && !ARG_callFrame->codeBlock()->needsFullScopeChain()); + ARG_callFrame->optionalCalleeArguments()->copyRegisters(); +} + +void JITStubs::cti_op_profile_will_call(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + ASSERT(*ARG_profilerReference); + (*ARG_profilerReference)->willExecute(ARG_callFrame, ARG_src1); +} + +void JITStubs::cti_op_profile_did_call(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + ASSERT(*ARG_profilerReference); + (*ARG_profilerReference)->didExecute(ARG_callFrame, ARG_src1); +} + +void JITStubs::cti_op_ret_scopeChain(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + ASSERT(ARG_callFrame->codeBlock()->needsFullScopeChain()); + ARG_callFrame->scopeChain()->deref(); +} + +JSObject* JITStubs::cti_op_new_array(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + ArgList argList(&ARG_callFrame->registers()[ARG_int1], ARG_int2); + return constructArray(ARG_callFrame, argList); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_resolve(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + ScopeChainNode* scopeChain = callFrame->scopeChain(); + + ScopeChainIterator iter = scopeChain->begin(); + ScopeChainIterator end = scopeChain->end(); + ASSERT(iter != end); + + Identifier& ident = *ARG_id1; + do { + JSObject* o = *iter; + PropertySlot slot(o); + if (o->getPropertySlot(callFrame, ident, slot)) { + JSValuePtr result = slot.getValue(callFrame, ident); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); + } + } while (++iter != end); + + CodeBlock* codeBlock = callFrame->codeBlock(); + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock); + VM_THROW_EXCEPTION(); +} + +JSObject* JITStubs::cti_op_construct_JSConstruct(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + +#ifndef NDEBUG + ConstructData constructData; + ASSERT(asFunction(ARG_src1)->getConstructData(constructData) == ConstructTypeJS); +#endif + + Structure* structure; + if (ARG_src4.isObject()) + structure = asObject(ARG_src4)->inheritorID(); + else + structure = asFunction(ARG_src1)->scope().node()->globalObject()->emptyObjectStructure(); + return new (ARG_globalData) JSObject(structure); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_construct_NotJSConstruct(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + + JSValuePtr constrVal = ARG_src1; + int argCount = ARG_int3; + int thisRegister = ARG_int5; + + ConstructData constructData; + ConstructType constructType = constrVal.getConstructData(constructData); + + if (constructType == ConstructTypeHost) { + ArgList argList(callFrame->registers() + thisRegister + 1, argCount - 1); + + JSValuePtr returnValue; + { + SamplingTool::HostCallRecord callRecord(CTI_SAMPLER); + returnValue = constructData.native.function(callFrame, asObject(constrVal), argList); + } + CHECK_FOR_EXCEPTION(); + + return JSValuePtr::encode(returnValue); + } + + ASSERT(constructType == ConstructTypeNone); + + CodeBlock* codeBlock = callFrame->codeBlock(); + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + ARG_globalData->exception = createNotAConstructorError(callFrame, constrVal, vPCIndex, codeBlock); + VM_THROW_EXCEPTION(); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_val(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSGlobalData* globalData = ARG_globalData; + + JSValuePtr baseValue = ARG_src1; + JSValuePtr subscript = ARG_src2; + + JSValuePtr result; + + if (LIKELY(subscript.isUInt32Fast())) { + uint32_t i = subscript.getUInt32Fast(); + if (isJSArray(globalData, baseValue)) { + JSArray* jsArray = asArray(baseValue); + if (jsArray->canGetIndex(i)) + result = jsArray->getIndex(i); + else + result = jsArray->JSArray::get(callFrame, i); + } else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) + result = asString(baseValue)->getIndex(ARG_globalData, i); + else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { + // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks. + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_val_byte_array)); + return JSValuePtr::encode(asByteArray(baseValue)->getIndex(callFrame, i)); + } else + result = baseValue.get(callFrame, i); + } else { + Identifier property(callFrame, subscript.toString(callFrame)); + result = baseValue.get(callFrame, property); + } + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_get_by_val_byte_array(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSGlobalData* globalData = ARG_globalData; + + JSValuePtr baseValue = ARG_src1; + JSValuePtr subscript = ARG_src2; + + JSValuePtr result; + + if (LIKELY(subscript.isUInt32Fast())) { + uint32_t i = subscript.getUInt32Fast(); + if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { + // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks. + return JSValuePtr::encode(asByteArray(baseValue)->getIndex(callFrame, i)); + } + + result = baseValue.get(callFrame, i); + if (!isJSByteArray(globalData, baseValue)) + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_val)); + } else { + Identifier property(callFrame, subscript.toString(callFrame)); + result = baseValue.get(callFrame, property); + } + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +VoidPtrPair JITStubs::cti_op_resolve_func(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + ScopeChainNode* scopeChain = callFrame->scopeChain(); + + ScopeChainIterator iter = scopeChain->begin(); + ScopeChainIterator end = scopeChain->end(); + + // FIXME: add scopeDepthIsZero optimization + + ASSERT(iter != end); + + Identifier& ident = *ARG_id1; + JSObject* base; + do { + base = *iter; + PropertySlot slot(base); + if (base->getPropertySlot(callFrame, ident, slot)) { + // ECMA 11.2.3 says that if we hit an activation the this value should be null. + // However, section 10.2.3 says that in the case where the value provided + // by the caller is null, the global object should be used. It also says + // that the section does not apply to internal functions, but for simplicity + // of implementation we use the global object anyway here. This guarantees + // that in host objects you always get a valid object for this. + // We also handle wrapper substitution for the global object at the same time. + JSObject* thisObj = base->toThisObject(callFrame); + JSValuePtr result = slot.getValue(callFrame, ident); + CHECK_FOR_EXCEPTION_AT_END(); + + RETURN_PAIR(thisObj, JSValuePtr::encode(result)); + } + ++iter; + } while (iter != end); + + CodeBlock* codeBlock = callFrame->codeBlock(); + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock); + VM_THROW_EXCEPTION_2(); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_sub(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + double left; + double right; + if (src1.getNumber(left) && src2.getNumber(right)) + return JSValuePtr::encode(jsNumber(ARG_globalData, left - right)); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, src1.toNumber(callFrame) - src2.toNumber(callFrame)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +void JITStubs::cti_op_put_by_val(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSGlobalData* globalData = ARG_globalData; + + JSValuePtr baseValue = ARG_src1; + JSValuePtr subscript = ARG_src2; + JSValuePtr value = ARG_src3; + + if (LIKELY(subscript.isUInt32Fast())) { + uint32_t i = subscript.getUInt32Fast(); + if (isJSArray(globalData, baseValue)) { + JSArray* jsArray = asArray(baseValue); + if (jsArray->canSetIndex(i)) + jsArray->setIndex(i, value); + else + jsArray->JSArray::put(callFrame, i, value); + } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { + JSByteArray* jsByteArray = asByteArray(baseValue); + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_put_by_val_byte_array)); + // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks. + if (value.isInt32Fast()) { + jsByteArray->setIndex(i, value.getInt32Fast()); + return; + } else { + double dValue = 0; + if (value.getNumber(dValue)) { + jsByteArray->setIndex(i, dValue); + return; + } + } + + baseValue.put(callFrame, i, value); + } else + baseValue.put(callFrame, i, value); + } else { + Identifier property(callFrame, subscript.toString(callFrame)); + if (!ARG_globalData->exception) { // Don't put to an object if toString threw an exception. + PutPropertySlot slot; + baseValue.put(callFrame, property, value, slot); + } + } + + CHECK_FOR_EXCEPTION_AT_END(); +} + +void JITStubs::cti_op_put_by_val_array(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr baseValue = ARG_src1; + int i = ARG_int2; + JSValuePtr value = ARG_src3; + + ASSERT(isJSArray(ARG_globalData, baseValue)); + + if (LIKELY(i >= 0)) + asArray(baseValue)->JSArray::put(callFrame, i, value); + else { + // This should work since we're re-boxing an immediate unboxed in JIT code. + ASSERT(JSValuePtr::makeInt32Fast(i)); + Identifier property(callFrame, JSValuePtr::makeInt32Fast(i).toString(callFrame)); + // FIXME: can toString throw an exception here? + if (!ARG_globalData->exception) { // Don't put to an object if toString threw an exception. + PutPropertySlot slot; + baseValue.put(callFrame, property, value, slot); + } + } + + CHECK_FOR_EXCEPTION_AT_END(); +} + +void JITStubs::cti_op_put_by_val_byte_array(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSGlobalData* globalData = ARG_globalData; + + JSValuePtr baseValue = ARG_src1; + JSValuePtr subscript = ARG_src2; + JSValuePtr value = ARG_src3; + + if (LIKELY(subscript.isUInt32Fast())) { + uint32_t i = subscript.getUInt32Fast(); + if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { + JSByteArray* jsByteArray = asByteArray(baseValue); + + // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks. + if (value.isInt32Fast()) { + jsByteArray->setIndex(i, value.getInt32Fast()); + return; + } else { + double dValue = 0; + if (value.getNumber(dValue)) { + jsByteArray->setIndex(i, dValue); + return; + } + } + } + + if (!isJSByteArray(globalData, baseValue)) + ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_put_by_val)); + baseValue.put(callFrame, i, value); + } else { + Identifier property(callFrame, subscript.toString(callFrame)); + if (!ARG_globalData->exception) { // Don't put to an object if toString threw an exception. + PutPropertySlot slot; + baseValue.put(callFrame, property, value, slot); + } + } + + CHECK_FOR_EXCEPTION_AT_END(); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_lesseq(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsBoolean(jsLessEq(callFrame, ARG_src1, ARG_src2)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +int JITStubs::cti_op_loop_if_true(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + + CallFrame* callFrame = ARG_callFrame; + + bool result = src1.toBoolean(callFrame); + CHECK_FOR_EXCEPTION_AT_END(); + return result; +} + +JSValueEncodedAsPointer* JITStubs::cti_op_negate(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src = ARG_src1; + + double v; + if (src.getNumber(v)) + return JSValuePtr::encode(jsNumber(ARG_globalData, -v)); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, -src.toNumber(callFrame)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_resolve_base(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return JSValuePtr::encode(JSC::resolveBase(ARG_callFrame, *ARG_id1, ARG_callFrame->scopeChain())); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_resolve_skip(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + ScopeChainNode* scopeChain = callFrame->scopeChain(); + + int skip = ARG_int2; + + ScopeChainIterator iter = scopeChain->begin(); + ScopeChainIterator end = scopeChain->end(); + ASSERT(iter != end); + while (skip--) { + ++iter; + ASSERT(iter != end); + } + Identifier& ident = *ARG_id1; + do { + JSObject* o = *iter; + PropertySlot slot(o); + if (o->getPropertySlot(callFrame, ident, slot)) { + JSValuePtr result = slot.getValue(callFrame, ident); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); + } + } while (++iter != end); + + CodeBlock* codeBlock = callFrame->codeBlock(); + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock); + VM_THROW_EXCEPTION(); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_resolve_global(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSGlobalObject* globalObject = asGlobalObject(ARG_src1); + Identifier& ident = *ARG_id2; + unsigned globalResolveInfoIndex = ARG_int3; + ASSERT(globalObject->isGlobalObject()); + + PropertySlot slot(globalObject); + if (globalObject->getPropertySlot(callFrame, ident, slot)) { + JSValuePtr result = slot.getValue(callFrame, ident); + if (slot.isCacheable() && !globalObject->structure()->isDictionary()) { + GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex); + if (globalResolveInfo.structure) + globalResolveInfo.structure->deref(); + globalObject->structure()->ref(); + globalResolveInfo.structure = globalObject->structure(); + globalResolveInfo.offset = slot.cachedOffset(); + return JSValuePtr::encode(result); + } + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); + } + + unsigned vPCIndex = callFrame->codeBlock()->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, callFrame->codeBlock()); + VM_THROW_EXCEPTION(); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_div(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + double left; + double right; + if (src1.getNumber(left) && src2.getNumber(right)) + return JSValuePtr::encode(jsNumber(ARG_globalData, left / right)); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, src1.toNumber(callFrame) / src2.toNumber(callFrame)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_pre_dec(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr v = ARG_src1; + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, v.toNumber(callFrame) - 1); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +int JITStubs::cti_op_jless(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + CallFrame* callFrame = ARG_callFrame; + + bool result = jsLess(callFrame, src1, src2); + CHECK_FOR_EXCEPTION_AT_END(); + return result; +} + +JSValueEncodedAsPointer* JITStubs::cti_op_not(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src = ARG_src1; + + CallFrame* callFrame = ARG_callFrame; + + JSValuePtr result = jsBoolean(!src.toBoolean(callFrame)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +int JITStubs::cti_op_jtrue(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + + CallFrame* callFrame = ARG_callFrame; + + bool result = src1.toBoolean(callFrame); + CHECK_FOR_EXCEPTION_AT_END(); + return result; +} + +VoidPtrPair JITStubs::cti_op_post_inc(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr v = ARG_src1; + + CallFrame* callFrame = ARG_callFrame; + + JSValuePtr number = v.toJSNumber(callFrame); + CHECK_FOR_EXCEPTION_AT_END(); + + RETURN_PAIR(JSValuePtr::encode(number), JSValuePtr::encode(jsNumber(ARG_globalData, number.uncheckedGetNumber() + 1))); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_eq(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + CallFrame* callFrame = ARG_callFrame; + + ASSERT(!JSValuePtr::areBothInt32Fast(src1, src2)); + JSValuePtr result = jsBoolean(JSValuePtr::equalSlowCaseInline(callFrame, src1, src2)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_lshift(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr val = ARG_src1; + JSValuePtr shift = ARG_src2; + + int32_t left; + uint32_t right; + if (JSValuePtr::areBothInt32Fast(val, shift)) + return JSValuePtr::encode(jsNumber(ARG_globalData, val.getInt32Fast() << (shift.getInt32Fast() & 0x1f))); + if (val.numberToInt32(left) && shift.numberToUInt32(right)) + return JSValuePtr::encode(jsNumber(ARG_globalData, left << (right & 0x1f))); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, (val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_bitand(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + int32_t left; + int32_t right; + if (src1.numberToInt32(left) && src2.numberToInt32(right)) + return JSValuePtr::encode(jsNumber(ARG_globalData, left & right)); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, src1.toInt32(callFrame) & src2.toInt32(callFrame)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_rshift(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr val = ARG_src1; + JSValuePtr shift = ARG_src2; + + int32_t left; + uint32_t right; + if (JSFastMath::canDoFastRshift(val, shift)) + return JSValuePtr::encode(JSFastMath::rightShiftImmediateNumbers(val, shift)); + if (val.numberToInt32(left) && shift.numberToUInt32(right)) + return JSValuePtr::encode(jsNumber(ARG_globalData, left >> (right & 0x1f))); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, (val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_bitnot(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src = ARG_src1; + + int value; + if (src.numberToInt32(value)) + return JSValuePtr::encode(jsNumber(ARG_globalData, ~value)); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsNumber(ARG_globalData, ~src.toInt32(callFrame)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +VoidPtrPair JITStubs::cti_op_resolve_with_base(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + ScopeChainNode* scopeChain = callFrame->scopeChain(); + + ScopeChainIterator iter = scopeChain->begin(); + ScopeChainIterator end = scopeChain->end(); + + // FIXME: add scopeDepthIsZero optimization + + ASSERT(iter != end); + + Identifier& ident = *ARG_id1; + JSObject* base; + do { + base = *iter; + PropertySlot slot(base); + if (base->getPropertySlot(callFrame, ident, slot)) { + JSValuePtr result = slot.getValue(callFrame, ident); + CHECK_FOR_EXCEPTION_AT_END(); + + RETURN_PAIR(base, JSValuePtr::encode(result)); + } + ++iter; + } while (iter != end); + + CodeBlock* codeBlock = callFrame->codeBlock(); + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock); + VM_THROW_EXCEPTION_2(); +} + +JSObject* JITStubs::cti_op_new_func_exp(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return ARG_funcexp1->makeFunction(ARG_callFrame, ARG_callFrame->scopeChain()); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_mod(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr dividendValue = ARG_src1; + JSValuePtr divisorValue = ARG_src2; + + CallFrame* callFrame = ARG_callFrame; + double d = dividendValue.toNumber(callFrame); + JSValuePtr result = jsNumber(ARG_globalData, fmod(d, divisorValue.toNumber(callFrame))); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_less(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsBoolean(jsLess(callFrame, ARG_src1, ARG_src2)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_neq(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + ASSERT(!JSValuePtr::areBothInt32Fast(src1, src2)); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr result = jsBoolean(!JSValuePtr::equalSlowCaseInline(callFrame, src1, src2)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +VoidPtrPair JITStubs::cti_op_post_dec(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr v = ARG_src1; + + CallFrame* callFrame = ARG_callFrame; + + JSValuePtr number = v.toJSNumber(callFrame); + CHECK_FOR_EXCEPTION_AT_END(); + + RETURN_PAIR(JSValuePtr::encode(number), JSValuePtr::encode(jsNumber(ARG_globalData, number.uncheckedGetNumber() - 1))); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_urshift(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr val = ARG_src1; + JSValuePtr shift = ARG_src2; + + CallFrame* callFrame = ARG_callFrame; + + if (JSFastMath::canDoFastUrshift(val, shift)) + return JSValuePtr::encode(JSFastMath::rightShiftImmediateNumbers(val, shift)); + else { + JSValuePtr result = jsNumber(ARG_globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); + } +} + +JSValueEncodedAsPointer* JITStubs::cti_op_bitxor(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + CallFrame* callFrame = ARG_callFrame; + + JSValuePtr result = jsNumber(ARG_globalData, src1.toInt32(callFrame) ^ src2.toInt32(callFrame)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSObject* JITStubs::cti_op_new_regexp(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return new (ARG_globalData) RegExpObject(ARG_callFrame->lexicalGlobalObject()->regExpStructure(), ARG_regexp1); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_bitor(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + CallFrame* callFrame = ARG_callFrame; + + JSValuePtr result = jsNumber(ARG_globalData, src1.toInt32(callFrame) | src2.toInt32(callFrame)); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_call_eval(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + RegisterFile* registerFile = ARG_registerFile; + + Interpreter* interpreter = ARG_globalData->interpreter; + + JSValuePtr funcVal = ARG_src1; + int registerOffset = ARG_int2; + int argCount = ARG_int3; + + Register* newCallFrame = callFrame->registers() + registerOffset; + Register* argv = newCallFrame - RegisterFile::CallFrameHeaderSize - argCount; + JSValuePtr thisValue = argv[0].jsValue(callFrame); + JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject(); + + if (thisValue == globalObject && funcVal == globalObject->evalFunction()) { + JSValuePtr exceptionValue = noValue(); + JSValuePtr result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset, exceptionValue); + if (UNLIKELY(exceptionValue != noValue())) { + ARG_globalData->exception = exceptionValue; + VM_THROW_EXCEPTION_AT_END(); + } + return JSValuePtr::encode(result); + } + + return JSValuePtr::encode(jsImpossibleValue()); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_throw(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + CodeBlock* codeBlock = callFrame->codeBlock(); + + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + + JSValuePtr exceptionValue = ARG_src1; + ASSERT(exceptionValue); + + HandlerInfo* handler = ARG_globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, true); + + if (!handler) { + *ARG_exception = exceptionValue; + return JSValuePtr::encode(jsNull()); + } + + ARG_setCallFrame(callFrame); + void* catchRoutine = handler->nativeCode.addressForExceptionHandler(); + ASSERT(catchRoutine); + STUB_SET_RETURN_ADDRESS(catchRoutine); + return JSValuePtr::encode(exceptionValue); +} + +JSPropertyNameIterator* JITStubs::cti_op_get_pnames(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return JSPropertyNameIterator::create(ARG_callFrame, ARG_src1); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_next_pname(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSPropertyNameIterator* it = ARG_pni1; + JSValuePtr temp = it->next(ARG_callFrame); + if (!temp) + it->invalidate(); + return JSValuePtr::encode(temp); +} + +JSObject* JITStubs::cti_op_push_scope(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSObject* o = ARG_src1.toObject(ARG_callFrame); + CHECK_FOR_EXCEPTION(); + ARG_callFrame->setScopeChain(ARG_callFrame->scopeChain()->push(o)); + return o; +} + +void JITStubs::cti_op_pop_scope(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + ARG_callFrame->setScopeChain(ARG_callFrame->scopeChain()->pop()); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_typeof(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return JSValuePtr::encode(jsTypeStringForValue(ARG_callFrame, ARG_src1)); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_is_undefined(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr v = ARG_src1; + return JSValuePtr::encode(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined())); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_is_boolean(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return JSValuePtr::encode(jsBoolean(ARG_src1.isBoolean())); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_is_number(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return JSValuePtr::encode(jsBoolean(ARG_src1.isNumber())); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_is_string(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return JSValuePtr::encode(jsBoolean(isJSString(ARG_globalData, ARG_src1))); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_is_object(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return JSValuePtr::encode(jsBoolean(jsIsObjectType(ARG_src1))); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_is_function(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + return JSValuePtr::encode(jsBoolean(jsIsFunctionType(ARG_src1))); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_stricteq(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + return JSValuePtr::encode(jsBoolean(JSValuePtr::strictEqual(src1, src2))); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_nstricteq(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src1 = ARG_src1; + JSValuePtr src2 = ARG_src2; + + return JSValuePtr::encode(jsBoolean(!JSValuePtr::strictEqual(src1, src2))); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_to_jsnumber(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr src = ARG_src1; + CallFrame* callFrame = ARG_callFrame; + + JSValuePtr result = src.toJSNumber(callFrame); + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +JSValueEncodedAsPointer* JITStubs::cti_op_in(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + JSValuePtr baseVal = ARG_src2; + + if (!baseVal.isObject()) { + CallFrame* callFrame = ARG_callFrame; + CodeBlock* codeBlock = callFrame->codeBlock(); + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS); + ARG_globalData->exception = createInvalidParamError(callFrame, "in", baseVal, vPCIndex, codeBlock); + VM_THROW_EXCEPTION(); + } + + JSValuePtr propName = ARG_src1; + JSObject* baseObj = asObject(baseVal); + + uint32_t i; + if (propName.getUInt32(i)) + return JSValuePtr::encode(jsBoolean(baseObj->hasProperty(callFrame, i))); + + Identifier property(callFrame, propName.toString(callFrame)); + CHECK_FOR_EXCEPTION(); + return JSValuePtr::encode(jsBoolean(baseObj->hasProperty(callFrame, property))); +} + +JSObject* JITStubs::cti_op_push_new_scope(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSObject* scope = new (ARG_globalData) JSStaticScopeObject(ARG_callFrame, *ARG_id1, ARG_src2, DontDelete); + + CallFrame* callFrame = ARG_callFrame; + callFrame->setScopeChain(callFrame->scopeChain()->push(scope)); + return scope; +} + +void JITStubs::cti_op_jmp_scopes(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + unsigned count = ARG_int1; + CallFrame* callFrame = ARG_callFrame; + + ScopeChainNode* tmp = callFrame->scopeChain(); + while (count--) + tmp = tmp->pop(); + callFrame->setScopeChain(tmp); +} + +void JITStubs::cti_op_put_by_index(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + unsigned property = ARG_int2; + + ARG_src1.put(callFrame, property, ARG_src3); +} + +void* JITStubs::cti_op_switch_imm(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr scrutinee = ARG_src1; + unsigned tableIndex = ARG_int2; + CallFrame* callFrame = ARG_callFrame; + CodeBlock* codeBlock = callFrame->codeBlock(); + + if (scrutinee.isInt32Fast()) + return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.getInt32Fast()).addressForSwitch(); + else { + double value; + int32_t intValue; + if (scrutinee.getNumber(value) && ((intValue = static_cast<int32_t>(value)) == value)) + return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(intValue).addressForSwitch(); + else + return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch(); + } +} + +void* JITStubs::cti_op_switch_char(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr scrutinee = ARG_src1; + unsigned tableIndex = ARG_int2; + CallFrame* callFrame = ARG_callFrame; + CodeBlock* codeBlock = callFrame->codeBlock(); + + void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch(); + + if (scrutinee.isString()) { + UString::Rep* value = asString(scrutinee)->value().rep(); + if (value->size() == 1) + result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).addressForSwitch(); + } + + return result; +} + +void* JITStubs::cti_op_switch_string(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + JSValuePtr scrutinee = ARG_src1; + unsigned tableIndex = ARG_int2; + CallFrame* callFrame = ARG_callFrame; + CodeBlock* codeBlock = callFrame->codeBlock(); + + void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch(); + + if (scrutinee.isString()) { + UString::Rep* value = asString(scrutinee)->value().rep(); + result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).addressForSwitch(); + } + + return result; +} + +JSValueEncodedAsPointer* JITStubs::cti_op_del_by_val(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + + JSValuePtr baseValue = ARG_src1; + JSObject* baseObj = baseValue.toObject(callFrame); // may throw + + JSValuePtr subscript = ARG_src2; + JSValuePtr result; + uint32_t i; + if (subscript.getUInt32(i)) + result = jsBoolean(baseObj->deleteProperty(callFrame, i)); + else { + CHECK_FOR_EXCEPTION(); + Identifier property(callFrame, subscript.toString(callFrame)); + CHECK_FOR_EXCEPTION(); + result = jsBoolean(baseObj->deleteProperty(callFrame, property)); + } + + CHECK_FOR_EXCEPTION_AT_END(); + return JSValuePtr::encode(result); +} + +void JITStubs::cti_op_put_getter(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + + ASSERT(ARG_src1.isObject()); + JSObject* baseObj = asObject(ARG_src1); + ASSERT(ARG_src3.isObject()); + baseObj->defineGetter(callFrame, *ARG_id2, asObject(ARG_src3)); +} + +void JITStubs::cti_op_put_setter(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + + ASSERT(ARG_src1.isObject()); + JSObject* baseObj = asObject(ARG_src1); + ASSERT(ARG_src3.isObject()); + baseObj->defineSetter(callFrame, *ARG_id2, asObject(ARG_src3)); +} + +JSObject* JITStubs::cti_op_new_error(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + CodeBlock* codeBlock = callFrame->codeBlock(); + unsigned type = ARG_int1; + JSValuePtr message = ARG_src2; + unsigned bytecodeOffset = ARG_int3; + + unsigned lineNumber = codeBlock->lineNumberForBytecodeOffset(callFrame, bytecodeOffset); + return Error::create(callFrame, static_cast<ErrorType>(type), message.toString(callFrame), lineNumber, codeBlock->ownerNode()->sourceID(), codeBlock->ownerNode()->sourceURL()); +} + +void JITStubs::cti_op_debug(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + + int debugHookID = ARG_int1; + int firstLine = ARG_int2; + int lastLine = ARG_int3; + + ARG_globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine); +} + +JSValueEncodedAsPointer* JITStubs::cti_vm_throw(STUB_ARGS) +{ + BEGIN_STUB_FUNCTION(); + + CallFrame* callFrame = ARG_callFrame; + CodeBlock* codeBlock = callFrame->codeBlock(); + JSGlobalData* globalData = ARG_globalData; + + unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, globalData->exceptionLocation); + + JSValuePtr exceptionValue = globalData->exception; + ASSERT(exceptionValue); + globalData->exception = noValue(); + + HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, false); + + if (!handler) { + *ARG_exception = exceptionValue; + return JSValuePtr::encode(jsNull()); + } + + ARG_setCallFrame(callFrame); + void* catchRoutine = handler->nativeCode.addressForExceptionHandler(); + ASSERT(catchRoutine); + STUB_SET_RETURN_ADDRESS(catchRoutine); + return JSValuePtr::encode(exceptionValue); +} + +#undef STUB_RETURN_ADDRESS +#undef STUB_SET_RETURN_ADDRESS +#undef BEGIN_STUB_FUNCTION +#undef CHECK_FOR_EXCEPTION +#undef CHECK_FOR_EXCEPTION_AT_END +#undef CHECK_FOR_EXCEPTION_VOID +#undef VM_THROW_EXCEPTION +#undef VM_THROW_EXCEPTION_2 +#undef VM_THROW_EXCEPTION_AT_END + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h new file mode 100644 index 0000000..b7b8f35 --- /dev/null +++ b/JavaScriptCore/jit/JITStubs.h @@ -0,0 +1,226 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JITStubs_h +#define JITStubs_h + +#include "Register.h" +#include <wtf/Platform.h> + +#if ENABLE(JIT) + +namespace JSC { + + class ExecutablePool; + class JSGlobalData; + class JSObject; + class JSPropertyNameIterator; + class JSValueEncodedAsPointer; + class CodeBlock; + class JSValuePtr; + class Identifier; + class PropertySlot; + class PutPropertySlot; + +#if USE(JIT_STUB_ARGUMENT_VA_LIST) + #define STUB_ARGS void* args, ... + #define ARGS (reinterpret_cast<void**>(vl_args) - 1) +#else // JIT_STUB_ARGUMENT_REGISTER or JIT_STUB_ARGUMENT_STACK + #define STUB_ARGS void** args + #define ARGS (args) +#endif + +#if USE(JIT_STUB_ARGUMENT_REGISTER) + #if PLATFORM(X86_64) + #define JIT_STUB + #elif COMPILER(MSVC) + #define JIT_STUB __fastcall + #elif COMPILER(GCC) + #define JIT_STUB __attribute__ ((fastcall)) + #else + #error Need to support register calling convention in this compiler + #endif +#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK + #if COMPILER(MSVC) + #define JIT_STUB __cdecl + #else + #define JIT_STUB + #endif +#endif + +// The Mac compilers are fine with this, +#if PLATFORM(MAC) + struct VoidPtrPair { + void* first; + void* second; + }; +#define RETURN_PAIR(a,b) VoidPtrPair pair = { a, b }; return pair +#else + typedef uint64_t VoidPtrPair; + union VoidPtrPairValue { + struct { void* first; void* second; } s; + VoidPtrPair i; + }; +#define RETURN_PAIR(a,b) VoidPtrPairValue pair = {{ a, b }}; return pair.i +#endif + + class JITStubs { + public: + JITStubs(JSGlobalData*); + + static JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS); + static JSObject* JIT_STUB cti_op_convert_this(STUB_ARGS); + static JSObject* JIT_STUB cti_op_new_array(STUB_ARGS); + static JSObject* JIT_STUB cti_op_new_error(STUB_ARGS); + static JSObject* JIT_STUB cti_op_new_func(STUB_ARGS); + static JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS); + static JSObject* JIT_STUB cti_op_new_object(STUB_ARGS); + static JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS); + static JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS); + static JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS); + static JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS); + static JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_add(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_bitand(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_bitnot(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_bitor(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_bitxor(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_call_eval(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_del_by_id(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_del_by_val(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_div(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_eq(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_generic(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_second(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_val(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_in(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_instanceof(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_is_boolean(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_is_function(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_is_number(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_is_object(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_is_string(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_is_undefined(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_less(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_lesseq(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_lshift(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_mod(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_mul(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_negate(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_neq(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_next_pname(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_not(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_nstricteq(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_pre_dec(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_pre_inc(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_base(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_global(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_skip(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_rshift(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_stricteq(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_sub(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_throw(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_to_jsnumber(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_typeof(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_op_urshift(STUB_ARGS); + static JSValueEncodedAsPointer* JIT_STUB cti_vm_throw(STUB_ARGS); + static VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS); + static VoidPtrPair JIT_STUB cti_op_post_dec(STUB_ARGS); + static VoidPtrPair JIT_STUB cti_op_post_inc(STUB_ARGS); + static VoidPtrPair JIT_STUB cti_op_resolve_func(STUB_ARGS); + static VoidPtrPair JIT_STUB cti_op_resolve_with_base(STUB_ARGS); + static int JIT_STUB cti_op_jless(STUB_ARGS); + static int JIT_STUB cti_op_jtrue(STUB_ARGS); + static int JIT_STUB cti_op_loop_if_less(STUB_ARGS); + static int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS); + static int JIT_STUB cti_op_loop_if_true(STUB_ARGS); + static int JIT_STUB cti_timeout_check(STUB_ARGS); + static void JIT_STUB cti_op_create_arguments(STUB_ARGS); + static void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS); + static void JIT_STUB cti_op_debug(STUB_ARGS); + static void JIT_STUB cti_op_end(STUB_ARGS); + static void JIT_STUB cti_op_jmp_scopes(STUB_ARGS); + static void JIT_STUB cti_op_pop_scope(STUB_ARGS); + static void JIT_STUB cti_op_profile_did_call(STUB_ARGS); + static void JIT_STUB cti_op_profile_will_call(STUB_ARGS); + static void JIT_STUB cti_op_put_by_id(STUB_ARGS); + static void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS); + static void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS); + static void JIT_STUB cti_op_put_by_id_second(STUB_ARGS); + static void JIT_STUB cti_op_put_by_index(STUB_ARGS); + static void JIT_STUB cti_op_put_by_val(STUB_ARGS); + static void JIT_STUB cti_op_put_by_val_array(STUB_ARGS); + static void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS); + static void JIT_STUB cti_op_put_getter(STUB_ARGS); + static void JIT_STUB cti_op_put_setter(STUB_ARGS); + static void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS); + static void JIT_STUB cti_op_tear_off_activation(STUB_ARGS); + static void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS); + static void JIT_STUB cti_register_file_check(STUB_ARGS); + static void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS); + static void* JIT_STUB cti_op_switch_char(STUB_ARGS); + static void* JIT_STUB cti_op_switch_imm(STUB_ARGS); + static void* JIT_STUB cti_op_switch_string(STUB_ARGS); + static void* JIT_STUB cti_vm_dontLazyLinkCall(STUB_ARGS); + static void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS); + + static void tryCacheGetByID(CallFrame*, CodeBlock*, void* returnAddress, JSValuePtr baseValue, const Identifier& propertyName, const PropertySlot&); + static void tryCachePutByID(CallFrame*, CodeBlock*, void* returnAddress, JSValuePtr baseValue, const PutPropertySlot&); + + void* ctiArrayLengthTrampoline() { return m_ctiArrayLengthTrampoline; } + void* ctiStringLengthTrampoline() { return m_ctiStringLengthTrampoline; } + void* ctiVirtualCallPreLink() { return m_ctiVirtualCallPreLink; } + void* ctiVirtualCallLink() { return m_ctiVirtualCallLink; } + void* ctiVirtualCall() { return m_ctiVirtualCall; } + + private: + RefPtr<ExecutablePool> m_executablePool; + + void* m_ctiArrayLengthTrampoline; + void* m_ctiStringLengthTrampoline; + void* m_ctiVirtualCallPreLink; + void* m_ctiVirtualCallLink; + void* m_ctiVirtualCall; + }; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // JITStubs_h diff --git a/JavaScriptCore/jsc.cpp b/JavaScriptCore/jsc.cpp index 666bd58..746868b 100644 --- a/JavaScriptCore/jsc.cpp +++ b/JavaScriptCore/jsc.cpp @@ -54,6 +54,7 @@ #if COMPILER(MSVC) && !PLATFORM(WIN_CE) #include <crtdbg.h> #include <windows.h> +#include <mmsystem.h> #endif #if PLATFORM(QT) @@ -76,6 +77,17 @@ static JSValuePtr functionLoad(ExecState*, JSObject*, JSValuePtr, const ArgList& static JSValuePtr functionReadline(ExecState*, JSObject*, JSValuePtr, const ArgList&); static NO_RETURN JSValuePtr functionQuit(ExecState*, JSObject*, JSValuePtr, const ArgList&); +struct Script { + bool isFile; + char *argument; + + Script(bool isFile, char *argument) + : isFile(isFile) + , argument(argument) + { + } +}; + struct Options { Options() : interactive(false) @@ -85,7 +97,7 @@ struct Options { bool interactive; bool dump; - Vector<UString> fileNames; + Vector<Script> scripts; Vector<UString> arguments; }; @@ -233,9 +245,10 @@ JSValuePtr functionLoad(ExecState* exec, JSObject*, JSValuePtr, const ArgList& a return throwError(exec, GeneralError, "Could not open file."); JSGlobalObject* globalObject = exec->lexicalGlobalObject(); - evaluate(globalObject->globalExec(), globalObject->globalScopeChain(), makeSource(script.data(), fileName)); - - return jsUndefined(); + Completion result = evaluate(globalObject->globalExec(), globalObject->globalScopeChain(), makeSource(script.data(), fileName)); + if (result.complType() == Throw) + exec->setException(result.value()); + return result.value(); } JSValuePtr functionReadline(ExecState* exec, JSObject*, JSValuePtr, const ArgList&) @@ -284,6 +297,10 @@ int main(int argc, char** argv) _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE); #endif +#if COMPILER(MSVC) && !PLATFORM(WIN_CE) + timeBeginPeriod(1); +#endif + #if PLATFORM(QT) QCoreApplication app(argc, argv); #endif @@ -310,9 +327,11 @@ static void cleanupGlobalData(JSGlobalData* globalData) globalData->deref(); } -static bool runWithScripts(GlobalObject* globalObject, const Vector<UString>& fileNames, bool dump) +static bool runWithScripts(GlobalObject* globalObject, const Vector<Script>& scripts, bool dump) { - Vector<char> script; + UString script; + UString fileName; + Vector<char> scriptBuffer; if (dump) BytecodeGenerator::setDumpsGeneratedCode(true); @@ -323,16 +342,21 @@ static bool runWithScripts(GlobalObject* globalObject, const Vector<UString>& fi #endif bool success = true; - for (size_t i = 0; i < fileNames.size(); i++) { - UString fileName = fileNames[i]; - - if (!fillBufferWithContentsOfFile(fileName, script)) - return false; // fail early so we can catch missing files + for (size_t i = 0; i < scripts.size(); i++) { + if (scripts[i].isFile) { + fileName = scripts[i].argument; + if (!fillBufferWithContentsOfFile(fileName, scriptBuffer)) + return false; // fail early so we can catch missing files + script = scriptBuffer.data(); + } else { + script = scripts[i].argument; + fileName = "[Command Line]"; + } #if ENABLE(OPCODE_SAMPLING) interpreter->sampler()->start(); #endif - Completion completion = evaluate(globalObject->globalExec(), globalObject->globalScopeChain(), makeSource(script.data(), fileName)); + Completion completion = evaluate(globalObject->globalExec(), globalObject->globalScopeChain(), makeSource(script, fileName)); success = success && completion.complType() != Throw; if (dump) { if (completion.complType() == Throw) @@ -355,7 +379,11 @@ static bool runWithScripts(GlobalObject* globalObject, const Vector<UString>& fi return success; } -static void runInteractive(GlobalObject* globalObject) +static +#if !HAVE(READLINE) +NO_RETURN +#endif +void runInteractive(GlobalObject* globalObject) { while (true) { #if HAVE(READLINE) @@ -389,15 +417,16 @@ static void runInteractive(GlobalObject* globalObject) printf("\n"); } -static NO_RETURN void printUsageStatement() +static NO_RETURN void printUsageStatement(bool help = false) { fprintf(stderr, "Usage: jsc [options] [files] [-- arguments]\n"); fprintf(stderr, " -d Dumps bytecode (debug builds only)\n"); + fprintf(stderr, " -e Evaluate argument as script code\n"); fprintf(stderr, " -f Specifies a source file (deprecated)\n"); fprintf(stderr, " -h|--help Prints this help message\n"); fprintf(stderr, " -i Enables interactive mode (default if no files are specified)\n"); fprintf(stderr, " -s Installs signal handlers that exit on a crash (Unix platforms only)\n"); - exit(EXIT_FAILURE); + exit(help ? EXIT_SUCCESS : EXIT_FAILURE); } static void parseArguments(int argc, char** argv, Options& options) @@ -408,11 +437,17 @@ static void parseArguments(int argc, char** argv, Options& options) if (strcmp(arg, "-f") == 0) { if (++i == argc) printUsageStatement(); - options.fileNames.append(argv[i]); + options.scripts.append(Script(true, argv[i])); + continue; + } + if (strcmp(arg, "-e") == 0) { + if (++i == argc) + printUsageStatement(); + options.scripts.append(Script(false, argv[i])); continue; } if (strcmp(arg, "-h") == 0 || strcmp(arg, "--help") == 0) { - printUsageStatement(); + printUsageStatement(true); } if (strcmp(arg, "-i") == 0) { options.interactive = true; @@ -435,10 +470,10 @@ static void parseArguments(int argc, char** argv, Options& options) ++i; break; } - options.fileNames.append(argv[i]); + options.scripts.append(Script(true, argv[i])); } - if (options.fileNames.isEmpty()) + if (options.scripts.isEmpty()) options.interactive = true; for (; i < argc; ++i) @@ -453,7 +488,7 @@ int jscmain(int argc, char** argv, JSGlobalData* globalData) parseArguments(argc, argv, options); GlobalObject* globalObject = new (globalData) GlobalObject(options.arguments); - bool success = runWithScripts(globalObject, options.fileNames, options.dump); + bool success = runWithScripts(globalObject, options.scripts, options.dump); if (options.interactive && success) runInteractive(globalObject); diff --git a/JavaScriptCore/jscore.bkl b/JavaScriptCore/jscore.bkl index 262c883..e88b9f0 100644 --- a/JavaScriptCore/jscore.bkl +++ b/JavaScriptCore/jscore.bkl @@ -55,6 +55,7 @@ JavaScriptCore Bakefile project file. <include>$(SRCDIR)</include> <include>$(SRCDIR)/..</include> <include>$(SRCDIR)/API</include> + <include>$(SRCDIR)/assembler</include> <include>$(SRCDIR)/bytecompiler</include> <include>$(SRCDIR)/DerivedSources/JavaScriptCore</include> <include>$(SRCDIR)/ForwardingHeaders</include> @@ -95,6 +96,7 @@ JavaScriptCore Bakefile project file. <depends>jscore</depends> <include>$(SRCDIR)</include> <include>$(WK_ROOT)/JavaScriptCore</include> + <include>$(WK_ROOT)/JavaScriptCore/assembler</include> <include>$(WK_ROOT)/JavaScriptCore/bytecompiler</include> <include>$(WK_ROOT)/JavaScriptCore/debugger</include> <include>$(WK_ROOT)/JavaScriptCore/parser</include> diff --git a/JavaScriptCore/parser/Lexer.cpp b/JavaScriptCore/parser/Lexer.cpp index 22de4a0..6f65096 100644 --- a/JavaScriptCore/parser/Lexer.cpp +++ b/JavaScriptCore/parser/Lexer.cpp @@ -33,7 +33,6 @@ #include <string.h> #include <wtf/ASCIICType.h> #include <wtf/Assertions.h> -#include <wtf/unicode/Unicode.h> using namespace WTF; using namespace Unicode; @@ -80,8 +79,8 @@ Lexer::Lexer(JSGlobalData* globalData) , m_globalData(globalData) , m_mainTable(JSC::mainTable) { - m_buffer8.reserveCapacity(initialReadBufferCapacity); - m_buffer16.reserveCapacity(initialReadBufferCapacity); + m_buffer8.reserveInitialCapacity(initialReadBufferCapacity); + m_buffer16.reserveInitialCapacity(initialReadBufferCapacity); } Lexer::~Lexer() @@ -589,7 +588,7 @@ int Lexer::lex(void* p1, void* p2) bool Lexer::isWhiteSpace() const { - return m_current == '\t' || m_current == 0x0b || m_current == 0x0c || isSeparatorSpace(m_current); + return isWhiteSpace(m_current); } bool Lexer::isLineTerminator() @@ -884,11 +883,11 @@ void Lexer::clear() m_identifiers.clear(); Vector<char> newBuffer8; - newBuffer8.reserveCapacity(initialReadBufferCapacity); + newBuffer8.reserveInitialCapacity(initialReadBufferCapacity); m_buffer8.swap(newBuffer8); Vector<UChar> newBuffer16; - newBuffer16.reserveCapacity(initialReadBufferCapacity); + newBuffer16.reserveInitialCapacity(initialReadBufferCapacity); m_buffer16.swap(newBuffer16); m_isReparsing = false; diff --git a/JavaScriptCore/parser/Lexer.h b/JavaScriptCore/parser/Lexer.h index 63d3892..63c2da9 100644 --- a/JavaScriptCore/parser/Lexer.h +++ b/JavaScriptCore/parser/Lexer.h @@ -27,6 +27,7 @@ #include "SegmentedVector.h" #include "SourceCode.h" #include <wtf/Vector.h> +#include <wtf/unicode/Unicode.h> namespace JSC { @@ -90,6 +91,16 @@ namespace JSC { void clear(); SourceCode sourceCode(int openBrace, int closeBrace, int firstLine) { return SourceCode(m_source->provider(), openBrace, closeBrace + 1, firstLine); } + static inline bool isWhiteSpace(int ch) + { + return ch == '\t' || ch == 0x0b || ch == 0x0c || WTF::Unicode::isSeparatorSpace(ch); + } + + static inline bool isLineTerminator(int ch) + { + return ch == '\r' || ch == '\n' || ch == 0x2028 || ch == 0x2029; + } + private: friend class JSGlobalData; Lexer(JSGlobalData*); diff --git a/JavaScriptCore/parser/Nodes.cpp b/JavaScriptCore/parser/Nodes.cpp index 96fe50b..f7fa739 100644 --- a/JavaScriptCore/parser/Nodes.cpp +++ b/JavaScriptCore/parser/Nodes.cpp @@ -640,7 +640,7 @@ RegisterID* FunctionCallResolveNode::emitBytecode(BytecodeGenerator& generator, return generator.emitCall(generator.finalDestination(dst, func.get()), func.get(), thisRegister.get(), m_args.get(), divot(), startOffset(), endOffset()); } - RefPtr<RegisterID> func = generator.tempDestination(dst); + RefPtr<RegisterID> func = generator.newTemporary(); RefPtr<RegisterID> thisRegister = generator.newTemporary(); int identifierStart = divot() - startOffset(); generator.emitExpressionInfo(identifierStart + m_ident.size(), m_ident.size(), 0); @@ -1579,6 +1579,7 @@ void ConstStatementNode::releaseNodes(NodeReleaser& releaser) RegisterID* ConstStatementNode::emitBytecode(BytecodeGenerator& generator, RegisterID*) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); return generator.emitNode(m_next.get()); } @@ -1589,8 +1590,6 @@ static inline RegisterID* statementListEmitCode(const StatementVector& statement StatementVector::const_iterator end = statements.end(); for (StatementVector::const_iterator it = statements.begin(); it != end; ++it) { StatementNode* n = it->get(); - if (!n->isLoop()) - generator.emitDebugHook(WillExecuteStatement, n->firstLine(), n->lastLine()); generator.emitNode(dst, n); } return 0; @@ -1624,8 +1623,9 @@ RegisterID* BlockNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds // ------------------------------ EmptyStatementNode --------------------------- -RegisterID* EmptyStatementNode::emitBytecode(BytecodeGenerator&, RegisterID* dst) +RegisterID* EmptyStatementNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); return dst; } @@ -1642,6 +1642,7 @@ RegisterID* DebuggerStatementNode::emitBytecode(BytecodeGenerator& generator, Re RegisterID* ExprStatementNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { ASSERT(m_expr); + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); return generator.emitNode(dst, m_expr.get()); } @@ -1660,6 +1661,7 @@ void VarStatementNode::releaseNodes(NodeReleaser& releaser) RegisterID* VarStatementNode::emitBytecode(BytecodeGenerator& generator, RegisterID*) { ASSERT(m_expr); + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); return generator.emitNode(m_expr.get()); } @@ -1678,14 +1680,13 @@ void IfNode::releaseNodes(NodeReleaser& releaser) RegisterID* IfNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); + RefPtr<Label> afterThen = generator.newLabel(); RegisterID* cond = generator.emitNode(m_condition.get()); generator.emitJumpIfFalse(cond, afterThen.get()); - if (!m_ifBlock->isBlock()) - generator.emitDebugHook(WillExecuteStatement, m_ifBlock->firstLine(), m_ifBlock->lastLine()); - generator.emitNode(dst, m_ifBlock.get()); generator.emitLabel(afterThen.get()); @@ -1708,23 +1709,19 @@ void IfElseNode::releaseNodes(NodeReleaser& releaser) RegisterID* IfElseNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); + RefPtr<Label> beforeElse = generator.newLabel(); RefPtr<Label> afterElse = generator.newLabel(); RegisterID* cond = generator.emitNode(m_condition.get()); generator.emitJumpIfFalse(cond, beforeElse.get()); - if (!m_ifBlock->isBlock()) - generator.emitDebugHook(WillExecuteStatement, m_ifBlock->firstLine(), m_ifBlock->lastLine()); - generator.emitNode(dst, m_ifBlock.get()); generator.emitJump(afterElse.get()); generator.emitLabel(beforeElse.get()); - if (!m_elseBlock->isBlock()) - generator.emitDebugHook(WillExecuteStatement, m_elseBlock->firstLine(), m_elseBlock->lastLine()); - generator.emitNode(dst, m_elseBlock.get()); generator.emitLabel(afterElse.get()); @@ -1754,10 +1751,7 @@ RegisterID* DoWhileNode::emitBytecode(BytecodeGenerator& generator, RegisterID* generator.emitLabel(topOfLoop.get()); generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); - - if (!m_statement->isBlock()) - generator.emitDebugHook(WillExecuteStatement, m_statement->firstLine(), m_statement->lastLine()); - + RefPtr<RegisterID> result = generator.emitNode(dst, m_statement.get()); generator.emitLabel(scope->continueTarget()); @@ -1790,10 +1784,7 @@ RegisterID* WhileNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds RefPtr<Label> topOfLoop = generator.newLabel(); generator.emitLabel(topOfLoop.get()); - - if (!m_statement->isBlock()) - generator.emitDebugHook(WillExecuteStatement, m_statement->firstLine(), m_statement->lastLine()); - + generator.emitNode(dst, m_statement.get()); generator.emitLabel(scope->continueTarget()); @@ -1840,11 +1831,10 @@ RegisterID* ForNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) RefPtr<Label> topOfLoop = generator.newLabel(); generator.emitLabel(topOfLoop.get()); - if (!m_statement->isBlock()) - generator.emitDebugHook(WillExecuteStatement, m_statement->firstLine(), m_statement->lastLine()); RefPtr<RegisterID> result = generator.emitNode(dst, m_statement.get()); generator.emitLabel(scope->continueTarget()); + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); if (m_expr3) generator.emitNode(generator.ignoredResult(), m_expr3.get()); @@ -1953,12 +1943,11 @@ RegisterID* ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds generator.emitPutByVal(base.get(), subscript, propertyName); } - if (!m_statement->isBlock()) - generator.emitDebugHook(WillExecuteStatement, m_statement->firstLine(), m_statement->lastLine()); generator.emitNode(dst, m_statement.get()); generator.emitLabel(scope->continueTarget()); generator.emitNextPropertyName(propertyName, iter.get(), loopStart.get()); + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); generator.emitLabel(scope->breakTarget()); return dst; } @@ -1968,6 +1957,8 @@ RegisterID* ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds // ECMA 12.7 RegisterID* ContinueNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); + LabelScope* scope = generator.continueTarget(m_ident); if (!scope) @@ -1984,6 +1975,8 @@ RegisterID* ContinueNode::emitBytecode(BytecodeGenerator& generator, RegisterID* // ECMA 12.8 RegisterID* BreakNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); + LabelScope* scope = generator.breakTarget(m_ident); if (!scope) @@ -2009,14 +2002,20 @@ void ReturnNode::releaseNodes(NodeReleaser& releaser) RegisterID* ReturnNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); if (generator.codeType() != FunctionCode) return emitThrowError(generator, SyntaxError, "Invalid return statement."); if (dst == generator.ignoredResult()) dst = 0; RegisterID* r0 = m_value ? generator.emitNode(dst, m_value.get()) : generator.emitLoad(dst, jsUndefined()); + RefPtr<RegisterID> returnRegister; if (generator.scopeDepth()) { RefPtr<Label> l0 = generator.newLabel(); + if (generator.hasFinaliser() && !r0->isTemporary()) { + returnRegister = generator.emitMove(generator.newTemporary(), r0); + r0 = returnRegister.get(); + } generator.emitJumpScopes(l0.get(), 0); generator.emitLabel(l0.get()); } @@ -2039,6 +2038,8 @@ void WithNode::releaseNodes(NodeReleaser& releaser) RegisterID* WithNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); + RefPtr<RegisterID> scope = generator.newTemporary(); generator.emitNode(scope.get(), m_expr.get()); // scope must be protected until popped generator.emitExpressionInfo(m_divot, m_expressionLength, 0); @@ -2244,6 +2245,8 @@ void SwitchNode::releaseNodes(NodeReleaser& releaser) RegisterID* SwitchNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); + RefPtr<LabelScope> scope = generator.newLabelScope(LabelScope::Switch); RefPtr<RegisterID> r0 = generator.emitNode(m_expr.get()); @@ -2267,6 +2270,8 @@ void LabelNode::releaseNodes(NodeReleaser& releaser) RegisterID* LabelNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); + if (generator.breakTarget(m_name)) return emitThrowError(generator, SyntaxError, "Duplicate label: %s.", m_name); @@ -2291,12 +2296,14 @@ void ThrowNode::releaseNodes(NodeReleaser& releaser) RegisterID* ThrowNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); + if (dst == generator.ignoredResult()) dst = 0; - RefPtr<RegisterID> expr = generator.emitNode(dst, m_expr.get()); + RefPtr<RegisterID> expr = generator.emitNode(m_expr.get()); generator.emitExpressionInfo(divot(), startOffset(), endOffset()); generator.emitThrow(expr.get()); - return dst; + return 0; } // ------------------------------ TryNode -------------------------------------- @@ -2315,6 +2322,8 @@ void TryNode::releaseNodes(NodeReleaser& releaser) RegisterID* TryNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst) { + generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine()); + RefPtr<Label> tryStartLabel = generator.newLabel(); RefPtr<Label> tryEndLabel = generator.newLabel(); RefPtr<Label> finallyStart; diff --git a/JavaScriptCore/parser/Nodes.h b/JavaScriptCore/parser/Nodes.h index baa9984..f209133 100644 --- a/JavaScriptCore/parser/Nodes.h +++ b/JavaScriptCore/parser/Nodes.h @@ -205,7 +205,6 @@ namespace JSC { virtual bool isExprStatement() const JSC_FAST_CALL { return false; } virtual bool isBlock() const JSC_FAST_CALL { return false; } - virtual bool isLoop() const JSC_FAST_CALL { return false; } private: int m_lastLine; @@ -1831,8 +1830,6 @@ namespace JSC { virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0) JSC_FAST_CALL; - virtual bool isLoop() const JSC_FAST_CALL { return true; } - private: RefPtr<StatementNode> m_statement; RefPtr<ExpressionNode> m_expr; @@ -1852,8 +1849,6 @@ namespace JSC { virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0) JSC_FAST_CALL; - virtual bool isLoop() const JSC_FAST_CALL { return true; } - private: RefPtr<ExpressionNode> m_expr; RefPtr<StatementNode> m_statement; @@ -1877,8 +1872,6 @@ namespace JSC { virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0) JSC_FAST_CALL; - virtual bool isLoop() const JSC_FAST_CALL { return true; } - private: RefPtr<ExpressionNode> m_expr1; RefPtr<ExpressionNode> m_expr2; @@ -1897,8 +1890,6 @@ namespace JSC { virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0) JSC_FAST_CALL; - virtual bool isLoop() const JSC_FAST_CALL { return true; } - private: Identifier m_ident; RefPtr<ExpressionNode> m_init; diff --git a/JavaScriptCore/pcre/pcre_exec.cpp b/JavaScriptCore/pcre/pcre_exec.cpp index 34e2786..80a092a 100644 --- a/JavaScriptCore/pcre/pcre_exec.cpp +++ b/JavaScriptCore/pcre/pcre_exec.cpp @@ -56,7 +56,7 @@ the JavaScript specification. There are also some supporting functions. */ using namespace WTF; -#ifdef __GNUC__ +#if COMPILER(GCC) #define USE_COMPUTED_GOTO_FOR_MATCH_RECURSION //#define USE_COMPUTED_GOTO_FOR_MATCH_OPCODE_LOOP #endif @@ -175,7 +175,7 @@ reqByte match. */ /* The below limit restricts the number of "recursive" match calls in order to avoid spending exponential time on complex regular expressions. */ -static const unsigned matchLimit = 100000; +static const unsigned matchLimit = 1000000; #ifdef DEBUG /************************************************* @@ -447,6 +447,7 @@ static int match(const UChar* subjectPtr, const unsigned char* instructionPtr, i int min; bool minimize = false; /* Initialization not really needed, but some compilers think so. */ unsigned remainingMatchCount = matchLimit; + int othercase; /* Declare here to avoid errors during jumps */ MatchStack stack; @@ -1186,7 +1187,7 @@ RECURSE: stack.currentFrame->args.instructionPtr += stack.currentFrame->locals.length; if (stack.currentFrame->locals.fc <= 0xFFFF) { - int othercase = md.ignoreCase ? jsc_pcre_ucp_othercase(stack.currentFrame->locals.fc) : -1; + othercase = md.ignoreCase ? jsc_pcre_ucp_othercase(stack.currentFrame->locals.fc) : -1; for (int i = 1; i <= min; i++) { if (*stack.currentFrame->args.subjectPtr != stack.currentFrame->locals.fc && *stack.currentFrame->args.subjectPtr != othercase) diff --git a/JavaScriptCore/pcre/pcre_internal.h b/JavaScriptCore/pcre/pcre_internal.h index 2916765..0016bb5 100644 --- a/JavaScriptCore/pcre/pcre_internal.h +++ b/JavaScriptCore/pcre/pcre_internal.h @@ -85,7 +85,7 @@ total length. */ offsets within the compiled regex. The default is 2, which allows for compiled patterns up to 64K long. */ -#define LINK_SIZE 2 +#define LINK_SIZE 3 /* Define DEBUG to get debugging output on stdout. */ @@ -124,28 +124,60 @@ static inline void put2ByteValue(unsigned char* opcodePtr, int value) opcodePtr[1] = value; } +static inline void put3ByteValue(unsigned char* opcodePtr, int value) +{ + ASSERT(value >= 0 && value <= 0xFFFFFF); + opcodePtr[0] = value >> 16; + opcodePtr[1] = value >> 8; + opcodePtr[2] = value; +} + static inline int get2ByteValue(const unsigned char* opcodePtr) { return (opcodePtr[0] << 8) | opcodePtr[1]; } +static inline int get3ByteValue(const unsigned char* opcodePtr) +{ + return (opcodePtr[0] << 16) | (opcodePtr[1] << 8) | opcodePtr[2]; +} + static inline void put2ByteValueAndAdvance(unsigned char*& opcodePtr, int value) { put2ByteValue(opcodePtr, value); opcodePtr += 2; } +static inline void put3ByteValueAndAdvance(unsigned char*& opcodePtr, int value) +{ + put3ByteValue(opcodePtr, value); + opcodePtr += 3; +} + static inline void putLinkValueAllowZero(unsigned char* opcodePtr, int value) { +#if LINK_SIZE == 3 + put3ByteValue(opcodePtr, value); +#elif LINK_SIZE == 2 put2ByteValue(opcodePtr, value); +#else +# error LINK_SIZE not supported. +#endif } static inline int getLinkValueAllowZero(const unsigned char* opcodePtr) { +#if LINK_SIZE == 3 + return get3ByteValue(opcodePtr); +#elif LINK_SIZE == 2 return get2ByteValue(opcodePtr); +#else +# error LINK_SIZE not supported. +#endif } -#define MAX_PATTERN_SIZE (1 << 16) +#define MAX_PATTERN_SIZE 1024 * 1024 // Derived by empirical testing of compile time in PCRE and WREC. +COMPILE_ASSERT(MAX_PATTERN_SIZE < (1 << (8 * LINK_SIZE)), pcre_max_pattern_fits_in_bytecode); static inline void putLinkValue(unsigned char* opcodePtr, int value) { diff --git a/JavaScriptCore/profiler/Profiler.cpp b/JavaScriptCore/profiler/Profiler.cpp index 2de8f84..ace0a33 100644 --- a/JavaScriptCore/profiler/Profiler.cpp +++ b/JavaScriptCore/profiler/Profiler.cpp @@ -58,6 +58,8 @@ Profiler* Profiler::profiler() void Profiler::startProfiling(ExecState* exec, const UString& title) { + ASSERT_ARG(title, !title.isNull()); + // Check if we currently have a Profile for this global ExecState and title. // If so return early and don't create a new Profile. ExecState* globalExec = exec ? exec->lexicalGlobalObject()->globalExec() : 0; diff --git a/JavaScriptCore/runtime/Arguments.cpp b/JavaScriptCore/runtime/Arguments.cpp index b0429a9..ea4b4f0 100644 --- a/JavaScriptCore/runtime/Arguments.cpp +++ b/JavaScriptCore/runtime/Arguments.cpp @@ -71,6 +71,13 @@ void Arguments::mark() void Arguments::fillArgList(ExecState* exec, ArgList& args) { + if (UNLIKELY(d->overrodeLength)) { + unsigned length = get(exec, exec->propertyNames().length).toUInt32(exec); + for (unsigned i = 0; i < length; i++) + args.append(get(exec, i)); + return; + } + if (LIKELY(!d->deletedArguments)) { if (LIKELY(!d->numParameters)) { args.initialize(d->extraArguments, d->numArguments); diff --git a/JavaScriptCore/runtime/ArrayPrototype.cpp b/JavaScriptCore/runtime/ArrayPrototype.cpp index 4cd229a..654386b 100644 --- a/JavaScriptCore/runtime/ArrayPrototype.cpp +++ b/JavaScriptCore/runtime/ArrayPrototype.cpp @@ -26,6 +26,7 @@ #include "CodeBlock.h" #include "Interpreter.h" +#include "JIT.h" #include "ObjectPrototype.h" #include "Lookup.h" #include "Operations.h" @@ -67,8 +68,16 @@ static inline bool isNumericCompareFunction(CallType callType, const CallData& c { if (callType != CallTypeJS) return false; - - return callData.js.functionBody->bytecode(callData.js.scopeChain).isNumericCompareFunction(); + + CodeBlock& codeBlock = callData.js.functionBody->bytecode(callData.js.scopeChain); +#if ENABLE(JIT) + // If the JIT is enabled then we need to preserve the invariant that every + // function with a CodeBlock also has JIT code. + if (!codeBlock.jitCode()) + JIT::compile(callData.js.scopeChain->globalData, &codeBlock); +#endif + + return codeBlock.isNumericCompareFunction(); } // ------------------------------ ArrayPrototype ---------------------------- @@ -278,10 +287,10 @@ JSValuePtr arrayProtoFuncConcat(ExecState* exec, JSObject*, JSValuePtr thisValue ArgList::const_iterator end = args.end(); while (1) { if (curArg.isObject(&JSArray::info)) { - JSArray* curArray = asArray(curArg); - unsigned length = curArray->length(); + unsigned length = curArg.get(exec, exec->propertyNames().length).toUInt32(exec); + JSObject* curObject = curArg.toObject(exec); for (unsigned k = 0; k < length; ++k) { - if (JSValuePtr v = getProperty(exec, curArray, k)) + if (JSValuePtr v = getProperty(exec, curObject, k)) arr->put(exec, n, v); n++; } @@ -300,7 +309,7 @@ JSValuePtr arrayProtoFuncConcat(ExecState* exec, JSObject*, JSValuePtr thisValue JSValuePtr arrayProtoFuncPop(ExecState* exec, JSObject*, JSValuePtr thisValue, const ArgList&) { - if (exec->interpreter()->isJSArray(thisValue)) + if (isJSArray(&exec->globalData(), thisValue)) return asArray(thisValue)->pop(); JSObject* thisObj = thisValue.toThisObject(exec); @@ -319,7 +328,7 @@ JSValuePtr arrayProtoFuncPop(ExecState* exec, JSObject*, JSValuePtr thisValue, c JSValuePtr arrayProtoFuncPush(ExecState* exec, JSObject*, JSValuePtr thisValue, const ArgList& args) { - if (exec->interpreter()->isJSArray(thisValue) && args.size() == 1) { + if (isJSArray(&exec->globalData(), thisValue) && args.size() == 1) { JSArray* array = asArray(thisValue); array->push(exec, args.begin()->jsValue(exec)); return jsNumber(exec, array->length()); diff --git a/JavaScriptCore/runtime/Collector.cpp b/JavaScriptCore/runtime/Collector.cpp index 13e7f51..a4fea7d 100644 --- a/JavaScriptCore/runtime/Collector.cpp +++ b/JavaScriptCore/runtime/Collector.cpp @@ -349,6 +349,9 @@ collect: // didn't find a block, and GC didn't reclaim anything, need to allocate a new block size_t numBlocks = heap.numBlocks; if (usedBlocks == numBlocks) { + static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR; + if (numBlocks > maxNumBlocks) + CRASH(); numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR); heap.numBlocks = numBlocks; heap.blocks = static_cast<CollectorBlock**>(fastRealloc(heap.blocks, numBlocks * sizeof(CollectorBlock*))); diff --git a/JavaScriptCore/runtime/Completion.cpp b/JavaScriptCore/runtime/Completion.cpp index 0231a15..5655fa5 100644 --- a/JavaScriptCore/runtime/Completion.cpp +++ b/JavaScriptCore/runtime/Completion.cpp @@ -68,7 +68,7 @@ Completion evaluate(ExecState* exec, ScopeChain& scopeChain, const SourceCode& s if (exception) { if (exception.isObject() && asObject(exception)->isWatchdogException()) - return Completion(Interrupted, result); + return Completion(Interrupted, exception); return Completion(Throw, exception); } return Completion(Normal, result); diff --git a/JavaScriptCore/runtime/DateMath.cpp b/JavaScriptCore/runtime/DateMath.cpp index b452963..356d7a1 100644 --- a/JavaScriptCore/runtime/DateMath.cpp +++ b/JavaScriptCore/runtime/DateMath.cpp @@ -919,14 +919,14 @@ UString formatTime(const GregorianDateTime &t, bool utc) snprintf(buffer, sizeof(buffer), "%02d:%02d:%02d GMT", t.hour, t.minute, t.second); } else { int offset = abs(gmtoffset(t)); - char tzname[70]; + char timeZoneName[70]; struct tm gtm = t; - strftime(tzname, sizeof(tzname), "%Z", >m); + strftime(timeZoneName, sizeof(timeZoneName), "%Z", >m); - if (tzname[0]) { + if (timeZoneName[0]) { snprintf(buffer, sizeof(buffer), "%02d:%02d:%02d GMT%c%02d%02d (%s)", t.hour, t.minute, t.second, - gmtoffset(t) < 0 ? '-' : '+', offset / (60*60), (offset / 60) % 60, tzname); + gmtoffset(t) < 0 ? '-' : '+', offset / (60*60), (offset / 60) % 60, timeZoneName); } else { snprintf(buffer, sizeof(buffer), "%02d:%02d:%02d GMT%c%02d%02d", t.hour, t.minute, t.second, diff --git a/JavaScriptCore/runtime/DatePrototype.cpp b/JavaScriptCore/runtime/DatePrototype.cpp index b325070..fad1d55 100644 --- a/JavaScriptCore/runtime/DatePrototype.cpp +++ b/JavaScriptCore/runtime/DatePrototype.cpp @@ -27,6 +27,11 @@ #include "ObjectPrototype.h" #include "DateInstance.h" #include <float.h> + +#if !PLATFORM(MAC) && HAVE(LANGINFO_H) +#include <langinfo.h> +#endif + #include <limits.h> #include <locale.h> #include <math.h> @@ -181,7 +186,11 @@ static JSCell* formatLocaleDate(ExecState* exec, DateInstance*, double timeInMil static JSCell* formatLocaleDate(ExecState* exec, const GregorianDateTime& gdt, LocaleDateTimeFormat format) { +#if HAVE(LANGINFO_H) + static const nl_item formats[] = { D_T_FMT, D_FMT, T_FMT }; +#else static const char* const formatStrings[] = { "%#c", "%#x", "%X" }; +#endif // Offset year if needed struct tm localTM = gdt; @@ -190,10 +199,26 @@ static JSCell* formatLocaleDate(ExecState* exec, const GregorianDateTime& gdt, L if (yearNeedsOffset) localTM.tm_year = equivalentYearForDST(year) - 1900; +#if HAVE(LANGINFO_H) + // We do not allow strftime to generate dates with 2-digits years, + // both to avoid ambiguity, and a crash in strncpy, for years that + // need offset. + char* formatString = strdup(nl_langinfo(formats[format])); + char* yPos = strchr(formatString, 'y'); + if (yPos) + *yPos = 'Y'; +#endif + // Do the formatting const int bufsize = 128; char timebuffer[bufsize]; + +#if HAVE(LANGINFO_H) + size_t ret = strftime(timebuffer, bufsize, formatString, &localTM); + free(formatString); +#else size_t ret = strftime(timebuffer, bufsize, formatStrings[format], &localTM); +#endif if (ret == 0) return jsEmptyString(exec); diff --git a/JavaScriptCore/runtime/ExceptionHelpers.cpp b/JavaScriptCore/runtime/ExceptionHelpers.cpp index d1b5aac..30f1503 100644 --- a/JavaScriptCore/runtime/ExceptionHelpers.cpp +++ b/JavaScriptCore/runtime/ExceptionHelpers.cpp @@ -47,6 +47,8 @@ public: } virtual bool isWatchdogException() const { return true; } + + virtual UString toString(ExecState*) const { return "JavaScript execution exceeded timeout."; } }; JSValuePtr createInterruptedExecutionException(JSGlobalData* globalData) diff --git a/JavaScriptCore/runtime/FunctionConstructor.cpp b/JavaScriptCore/runtime/FunctionConstructor.cpp index d58334a..ff77b9d 100644 --- a/JavaScriptCore/runtime/FunctionConstructor.cpp +++ b/JavaScriptCore/runtime/FunctionConstructor.cpp @@ -93,16 +93,19 @@ FunctionBodyNode* extractFunctionBody(ProgramNode* program) // ECMA 15.3.2 The Function Constructor JSObject* constructFunction(ExecState* exec, const ArgList& args, const Identifier& functionName, const UString& sourceURL, int lineNumber) { + // Functions need to have a space following the opening { due to for web compatibility + // see https://bugs.webkit.org/show_bug.cgi?id=24350 + // We also need \n before the closing } to handle // comments at the end of the last line UString program; if (args.isEmpty()) - program = "(function(){})"; + program = "(function() { \n})"; else if (args.size() == 1) - program = "(function(){" + args.at(exec, 0).toString(exec) + "})"; + program = "(function() { " + args.at(exec, 0).toString(exec) + "\n})"; else { program = "(function(" + args.at(exec, 0).toString(exec); for (size_t i = 1; i < args.size() - 1; i++) program += "," + args.at(exec, i).toString(exec); - program += "){" + args.at(exec, args.size() - 1).toString(exec) + "})"; + program += ") { " + args.at(exec, args.size() - 1).toString(exec) + "\n})"; } int errLine; diff --git a/JavaScriptCore/runtime/FunctionPrototype.cpp b/JavaScriptCore/runtime/FunctionPrototype.cpp index 7be2685..01fc57c 100644 --- a/JavaScriptCore/runtime/FunctionPrototype.cpp +++ b/JavaScriptCore/runtime/FunctionPrototype.cpp @@ -26,6 +26,7 @@ #include "JSFunction.h" #include "JSString.h" #include "Interpreter.h" +#include "Lexer.h" #include "PrototypeFunction.h" namespace JSC { @@ -63,11 +64,29 @@ CallType FunctionPrototype::getCallData(CallData& callData) // Functions +// Compatibility hack for the Optimost JavaScript library. (See <rdar://problem/6595040>.) +static inline void insertSemicolonIfNeeded(UString& functionBody) +{ + ASSERT(functionBody[0] == '{'); + ASSERT(functionBody[functionBody.size() - 1] == '}'); + + for (size_t i = functionBody.size() - 2; i > 0; --i) { + UChar ch = functionBody[i]; + if (!Lexer::isWhiteSpace(ch) && !Lexer::isLineTerminator(ch)) { + if (ch != ';' && ch != '}') + functionBody = functionBody.substr(0, i + 1) + ";" + functionBody.substr(i + 1, functionBody.size() - (i + 1)); + return; + } + } +} + JSValuePtr functionProtoFuncToString(ExecState* exec, JSObject*, JSValuePtr thisValue, const ArgList&) { if (thisValue.isObject(&JSFunction::info)) { JSFunction* function = asFunction(thisValue); - return jsString(exec, "function " + function->name(&exec->globalData()) + "(" + function->body()->paramString() + ") " + function->body()->toSourceString()); + UString functionBody = function->body()->toSourceString(); + insertSemicolonIfNeeded(functionBody); + return jsString(exec, "function " + function->name(&exec->globalData()) + "(" + function->body()->paramString() + ") " + functionBody); } if (thisValue.isObject(&InternalFunction::info)) { @@ -85,32 +104,25 @@ JSValuePtr functionProtoFuncApply(ExecState* exec, JSObject*, JSValuePtr thisVal if (callType == CallTypeNone) return throwError(exec, TypeError); - JSValuePtr thisArg = args.at(exec, 0); - JSValuePtr argArray = args.at(exec, 1); - - JSValuePtr applyThis; - if (thisArg.isUndefinedOrNull()) - applyThis = exec->globalThisValue(); - else - applyThis = thisArg.toObject(exec); + JSValuePtr array = args.at(exec, 1); ArgList applyArgs; - if (!argArray.isUndefinedOrNull()) { - if (!argArray.isObject()) + if (!array.isUndefinedOrNull()) { + if (!array.isObject()) return throwError(exec, TypeError); - if (asObject(argArray)->classInfo() == &Arguments::info) - asArguments(argArray)->fillArgList(exec, applyArgs); - else if (exec->interpreter()->isJSArray(argArray)) - asArray(argArray)->fillArgList(exec, applyArgs); - else if (asObject(argArray)->inherits(&JSArray::info)) { - unsigned length = asArray(argArray)->get(exec, exec->propertyNames().length).toUInt32(exec); + if (asObject(array)->classInfo() == &Arguments::info) + asArguments(array)->fillArgList(exec, applyArgs); + else if (isJSArray(&exec->globalData(), array)) + asArray(array)->fillArgList(exec, applyArgs); + else if (asObject(array)->inherits(&JSArray::info)) { + unsigned length = asArray(array)->get(exec, exec->propertyNames().length).toUInt32(exec); for (unsigned i = 0; i < length; ++i) - applyArgs.append(asArray(argArray)->get(exec, i)); + applyArgs.append(asArray(array)->get(exec, i)); } else return throwError(exec, TypeError); } - return call(exec, thisValue, callType, callData, applyThis, applyArgs); + return call(exec, thisValue, callType, callData, args.at(exec, 0), applyArgs); } JSValuePtr functionProtoFuncCall(ExecState* exec, JSObject*, JSValuePtr thisValue, const ArgList& args) @@ -120,17 +132,9 @@ JSValuePtr functionProtoFuncCall(ExecState* exec, JSObject*, JSValuePtr thisValu if (callType == CallTypeNone) return throwError(exec, TypeError); - JSValuePtr thisArg = args.at(exec, 0); - - JSObject* callThis; - if (thisArg.isUndefinedOrNull()) - callThis = exec->globalThisValue(); - else - callThis = thisArg.toObject(exec); - - ArgList argsTail; - args.getSlice(1, argsTail); - return call(exec, thisValue, callType, callData, callThis, argsTail); + ArgList callArgs; + args.getSlice(1, callArgs); + return call(exec, thisValue, callType, callData, args.at(exec, 0), callArgs); } } // namespace JSC diff --git a/JavaScriptCore/runtime/JSArray.h b/JavaScriptCore/runtime/JSArray.h index 7eecf33..f873f13 100644 --- a/JavaScriptCore/runtime/JSArray.h +++ b/JavaScriptCore/runtime/JSArray.h @@ -122,6 +122,8 @@ namespace JSC { return static_cast<JSArray*>(asObject(value)); } + inline bool isJSArray(JSGlobalData* globalData, JSValuePtr v) { return v.isCell() && v.asCell()->vptr() == globalData->jsArrayVPtr; } + } // namespace JSC #endif // JSArray_h diff --git a/JavaScriptCore/runtime/JSByteArray.h b/JavaScriptCore/runtime/JSByteArray.h index 19c8f0e..eb8e0ac 100644 --- a/JavaScriptCore/runtime/JSByteArray.h +++ b/JavaScriptCore/runtime/JSByteArray.h @@ -33,7 +33,7 @@ namespace JSC { class JSByteArray : public JSObject { - friend class Interpreter; + friend class VPtrSet; public: bool canAccessIndex(unsigned i) { return i < m_storage->length(); } JSValuePtr getIndex(ExecState* exec, unsigned i) @@ -88,6 +88,8 @@ namespace JSC { size_t length() const { return m_storage->length(); } + WTF::ByteArray* storage() const { return m_storage.get(); } + private: enum VPtrStealingHackType { VPtrStealingHack }; JSByteArray(VPtrStealingHackType) @@ -105,6 +107,9 @@ namespace JSC { { return static_cast<JSByteArray*>(asCell(value)); } -} -#endif + inline bool isJSByteArray(JSGlobalData* globalData, JSValuePtr v) { return v.isCell() && v.asCell()->vptr() == globalData->jsByteArrayVPtr; } + +} // namespace JSC + +#endif // JSByteArray_h diff --git a/JavaScriptCore/runtime/JSCell.h b/JavaScriptCore/runtime/JSCell.h index 43d81a5..1973c54 100644 --- a/JavaScriptCore/runtime/JSCell.h +++ b/JavaScriptCore/runtime/JSCell.h @@ -32,15 +32,15 @@ namespace JSC { class JSCell : Noncopyable { - friend class JIT; friend class GetterSetter; friend class Heap; + friend class JIT; friend class JSNumberCell; friend class JSObject; friend class JSPropertyNameIterator; friend class JSString; friend class JSValuePtr; - friend class Interpreter; + friend class VPtrSet; private: explicit JSCell(Structure*); diff --git a/JavaScriptCore/runtime/JSFunction.h b/JavaScriptCore/runtime/JSFunction.h index 6a43737..87ca2a2 100644 --- a/JavaScriptCore/runtime/JSFunction.h +++ b/JavaScriptCore/runtime/JSFunction.h @@ -39,7 +39,8 @@ namespace JSC { class JSFunction : public InternalFunction { friend class JIT; - friend class Interpreter; + friend class JITStubs; + friend class VPtrSet; typedef InternalFunction Base; diff --git a/JavaScriptCore/runtime/JSGlobalData.cpp b/JavaScriptCore/runtime/JSGlobalData.cpp index 10b584d..3a2f7c0 100644 --- a/JavaScriptCore/runtime/JSGlobalData.cpp +++ b/JavaScriptCore/runtime/JSGlobalData.cpp @@ -35,6 +35,8 @@ #include "FunctionConstructor.h" #include "Interpreter.h" #include "JSActivation.h" +#include "JSArray.h" +#include "JSByteArray.h" #include "JSClassRef.h" #include "JSLock.h" #include "JSNotAnObject.h" @@ -64,10 +66,42 @@ extern const HashTable regExpTable; extern const HashTable regExpConstructorTable; extern const HashTable stringTable; -JSGlobalData::JSGlobalData(bool isShared) - : initializingLazyNumericCompareFunction(false) - , interpreter(new Interpreter) - , exception(noValue()) +struct VPtrSet { + VPtrSet(); + + void* jsArrayVPtr; + void* jsByteArrayVPtr; + void* jsStringVPtr; + void* jsFunctionVPtr; +}; + +VPtrSet::VPtrSet() +{ + // Bizarrely, calling fastMalloc here is faster than allocating space on the stack. + void* storage = fastMalloc(sizeof(CollectorBlock)); + + JSCell* jsArray = new (storage) JSArray(JSArray::createStructure(jsNull())); + jsArrayVPtr = jsArray->vptr(); + jsArray->~JSCell(); + + JSCell* jsByteArray = new (storage) JSByteArray(JSByteArray::VPtrStealingHack); + jsByteArrayVPtr = jsByteArray->vptr(); + jsByteArray->~JSCell(); + + JSCell* jsString = new (storage) JSString(JSString::VPtrStealingHack); + jsStringVPtr = jsString->vptr(); + jsString->~JSCell(); + + JSCell* jsFunction = new (storage) JSFunction(JSFunction::createStructure(jsNull())); + jsFunctionVPtr = jsFunction->vptr(); + jsFunction->~JSCell(); + + fastFree(storage); +} + +JSGlobalData::JSGlobalData(bool isShared, const VPtrSet& vptrSet) + : isSharedInstance(isShared) + , clientData(0) , arrayTable(new HashTable(JSC::arrayTable)) , dateTable(new HashTable(JSC::dateTable)) , mathTable(new HashTable(JSC::mathTable)) @@ -84,24 +118,31 @@ JSGlobalData::JSGlobalData(bool isShared) #if !USE(ALTERNATE_JSIMMEDIATE) , numberStructure(JSNumberCell::createStructure(jsNull())) #endif + , jsArrayVPtr(vptrSet.jsArrayVPtr) + , jsByteArrayVPtr(vptrSet.jsByteArrayVPtr) + , jsStringVPtr(vptrSet.jsStringVPtr) + , jsFunctionVPtr(vptrSet.jsFunctionVPtr) , identifierTable(createIdentifierTable()) , propertyNames(new CommonIdentifiers(this)) , emptyList(new ArgList) - , newParserObjects(0) - , parserObjectExtraRefCounts(0) , lexer(new Lexer(this)) , parser(new Parser) + , interpreter(new Interpreter) +#if ENABLE(JIT) + , jitStubs(this) +#endif + , heap(this) + , exception(noValue()) + , initializingLazyNumericCompareFunction(false) + , newParserObjects(0) + , parserObjectExtraRefCounts(0) , head(0) , dynamicGlobalObject(0) - , isSharedInstance(isShared) - , clientData(0) , scopeNodeBeingReparsed(0) - , heap(this) { #if PLATFORM(MAC) startProfilerServerIfNeeded(); #endif - interpreter->initialize(this); } JSGlobalData::~JSGlobalData() @@ -145,9 +186,9 @@ JSGlobalData::~JSGlobalData() delete clientData; } -PassRefPtr<JSGlobalData> JSGlobalData::create() +PassRefPtr<JSGlobalData> JSGlobalData::create(bool isShared) { - return adoptRef(new JSGlobalData); + return adoptRef(new JSGlobalData(isShared, VPtrSet())); } PassRefPtr<JSGlobalData> JSGlobalData::createLeaked() @@ -171,7 +212,7 @@ JSGlobalData& JSGlobalData::sharedInstance() { JSGlobalData*& instance = sharedInstanceInternal(); if (!instance) { - instance = new JSGlobalData(true); + instance = create(true).releaseRef(); #if ENABLE(JSC_MULTIPLE_THREADS) instance->makeUsableFromMultipleThreads(); #endif diff --git a/JavaScriptCore/runtime/JSGlobalData.h b/JavaScriptCore/runtime/JSGlobalData.h index 4223191..1936f1f 100644 --- a/JavaScriptCore/runtime/JSGlobalData.h +++ b/JavaScriptCore/runtime/JSGlobalData.h @@ -29,13 +29,15 @@ #ifndef JSGlobalData_h #define JSGlobalData_h -#include <wtf/Forward.h> -#include <wtf/HashMap.h> -#include <wtf/RefCounted.h> #include "Collector.h" #include "ExecutableAllocator.h" -#include "SmallStrings.h" +#include "JITStubs.h" #include "JSValue.h" +#include "SmallStrings.h" +#include "TimeoutChecker.h" +#include <wtf/Forward.h> +#include <wtf/HashMap.h> +#include <wtf/RefCounted.h> struct OpaqueJSClass; struct OpaqueJSClassContextData; @@ -57,13 +59,18 @@ namespace JSC { class Structure; class UString; struct HashTable; + struct VPtrSet; class JSGlobalData : public RefCounted<JSGlobalData> { public: + struct ClientData { + virtual ~ClientData() = 0; + }; + static bool sharedInstanceExists(); static JSGlobalData& sharedInstance(); - static PassRefPtr<JSGlobalData> create(); + static PassRefPtr<JSGlobalData> create(bool isShared = false); static PassRefPtr<JSGlobalData> createLeaked(); ~JSGlobalData(); @@ -72,16 +79,8 @@ namespace JSC { void makeUsableFromMultipleThreads() { heap.makeUsableFromMultipleThreads(); } #endif - const Vector<Instruction>& numericCompareFunction(ExecState*); - Vector<Instruction> lazyNumericCompareFunction; - bool initializingLazyNumericCompareFunction; - - Interpreter* interpreter; - - JSValuePtr exception; -#if ENABLE(JIT) - void* exceptionLocation; -#endif + bool isSharedInstance; + ClientData* clientData; const HashTable* arrayTable; const HashTable* dateTable; @@ -101,48 +100,54 @@ namespace JSC { RefPtr<Structure> numberStructure; #endif + void* jsArrayVPtr; + void* jsByteArrayVPtr; + void* jsStringVPtr; + void* jsFunctionVPtr; + IdentifierTable* identifierTable; CommonIdentifiers* propertyNames; const ArgList* emptyList; // Lists are supposed to be allocated on the stack to have their elements properly marked, which is not the case here - but this list has nothing to mark. - SmallStrings smallStrings; - - HashMap<OpaqueJSClass*, OpaqueJSClassContextData*> opaqueJSClassData; - HashSet<ParserRefCounted*>* newParserObjects; - HashCountedSet<ParserRefCounted*>* parserObjectExtraRefCounts; +#if ENABLE(ASSEMBLER) + ExecutableAllocator executableAllocator; +#endif Lexer* lexer; Parser* parser; + Interpreter* interpreter; +#if ENABLE(JIT) + JITStubs jitStubs; +#endif + TimeoutChecker timeoutChecker; + Heap heap; - JSGlobalObject* head; - JSGlobalObject* dynamicGlobalObject; + JSValuePtr exception; +#if ENABLE(JIT) + void* exceptionLocation; +#endif - bool isSharedInstance; + const Vector<Instruction>& numericCompareFunction(ExecState*); + Vector<Instruction> lazyNumericCompareFunction; + bool initializingLazyNumericCompareFunction; - struct ClientData { - virtual ~ClientData() = 0; - }; + HashMap<OpaqueJSClass*, OpaqueJSClassContextData*> opaqueJSClassData; - ClientData* clientData; + HashSet<ParserRefCounted*>* newParserObjects; + HashCountedSet<ParserRefCounted*>* parserObjectExtraRefCounts; + + JSGlobalObject* head; + JSGlobalObject* dynamicGlobalObject; HashSet<JSObject*> arrayVisitedElements; ScopeNode* scopeNodeBeingReparsed; - Heap heap; -#if ENABLE(ASSEMBLER) - PassRefPtr<ExecutablePool> poolForSize(size_t n) { return m_executableAllocator.poolForSize(n); } -#endif private: - JSGlobalData(bool isShared = false); -#if ENABLE(ASSEMBLER) - ExecutableAllocator m_executableAllocator; -#endif - + JSGlobalData(bool isShared, const VPtrSet&); static JSGlobalData*& sharedInstanceInternal(); }; +} // namespace JSC -} - -#endif +#endif // JSGlobalData_h diff --git a/JavaScriptCore/runtime/JSGlobalObject.cpp b/JavaScriptCore/runtime/JSGlobalObject.cpp index eb2b349..d6ad295 100644 --- a/JavaScriptCore/runtime/JSGlobalObject.cpp +++ b/JavaScriptCore/runtime/JSGlobalObject.cpp @@ -341,22 +341,11 @@ void JSGlobalObject::reset(JSValuePtr prototype) void JSGlobalObject::resetPrototype(JSValuePtr prototype) { setPrototype(prototype); - lastInPrototypeChain(this)->setPrototype(d()->objectPrototype); -} - -void JSGlobalObject::setTimeoutTime(unsigned timeoutTime) -{ - globalData()->interpreter->setTimeoutTime(timeoutTime); -} -void JSGlobalObject::startTimeoutCheck() -{ - globalData()->interpreter->startTimeoutCheck(); -} - -void JSGlobalObject::stopTimeoutCheck() -{ - globalData()->interpreter->stopTimeoutCheck(); + JSObject* oldLastInPrototypeChain = lastInPrototypeChain(this); + JSObject* objectPrototype = d()->objectPrototype; + if (oldLastInPrototypeChain != objectPrototype) + oldLastInPrototypeChain->setPrototype(objectPrototype); } void JSGlobalObject::mark() diff --git a/JavaScriptCore/runtime/JSGlobalObject.h b/JavaScriptCore/runtime/JSGlobalObject.h index 4a10f64..da8b7bf 100644 --- a/JavaScriptCore/runtime/JSGlobalObject.h +++ b/JavaScriptCore/runtime/JSGlobalObject.h @@ -214,10 +214,6 @@ namespace JSC { void setProfileGroup(unsigned value) { d()->profileGroup = value; } unsigned profileGroup() const { return d()->profileGroup; } - void setTimeoutTime(unsigned timeoutTime); - void startTimeoutCheck(); - void stopTimeoutCheck(); - Debugger* debugger() const { return d()->debugger; } void setDebugger(Debugger* debugger) { d()->debugger = debugger; } diff --git a/JavaScriptCore/runtime/JSImmediate.cpp b/JavaScriptCore/runtime/JSImmediate.cpp index c6cca80..a407ec3 100644 --- a/JavaScriptCore/runtime/JSImmediate.cpp +++ b/JavaScriptCore/runtime/JSImmediate.cpp @@ -39,12 +39,8 @@ JSObject* JSImmediate::toThisObject(JSValuePtr v, ExecState* exec) return constructNumber(exec, v); if (isBoolean(v)) return constructBooleanFromImmediateBoolean(exec, v); - if (v.isNull()) - return exec->globalThisValue(); - - JSNotAnObjectErrorStub* exception = createNotAnObjectErrorStub(exec, v.isNull()); - exec->setException(exception); - return new (exec) JSNotAnObject(exec, exception); + ASSERT(v.isUndefinedOrNull()); + return exec->globalThisValue(); } JSObject* JSImmediate::toObject(JSValuePtr v, ExecState* exec) diff --git a/JavaScriptCore/runtime/JSString.h b/JavaScriptCore/runtime/JSString.h index e4baa73..7398d50 100644 --- a/JavaScriptCore/runtime/JSString.h +++ b/JavaScriptCore/runtime/JSString.h @@ -60,7 +60,7 @@ namespace JSC { class JSString : public JSCell { friend class JIT; - friend class Interpreter; + friend class VPtrSet; public: JSString(JSGlobalData* globalData, const UString& value) @@ -202,6 +202,8 @@ namespace JSC { return false; } + inline bool isJSString(JSGlobalData* globalData, JSValuePtr v) { return v.isCell() && v.asCell()->vptr() == globalData->jsStringVPtr; } + // --- JSValue inlines ---------------------------- inline JSString* JSValuePtr::toThisJSString(ExecState* exec) diff --git a/JavaScriptCore/runtime/NumberConstructor.cpp b/JavaScriptCore/runtime/NumberConstructor.cpp index caa4a70..8bd424d 100644 --- a/JavaScriptCore/runtime/NumberConstructor.cpp +++ b/JavaScriptCore/runtime/NumberConstructor.cpp @@ -68,27 +68,27 @@ bool NumberConstructor::getOwnPropertySlot(ExecState* exec, const Identifier& pr return getStaticValueSlot<NumberConstructor, InternalFunction>(exec, ExecState::numberTable(exec), this, propertyName, slot); } -JSValuePtr numberConstructorNaNValue(ExecState* exec, const Identifier&, const PropertySlot&) +static JSValuePtr numberConstructorNaNValue(ExecState* exec, const Identifier&, const PropertySlot&) { return jsNaN(exec); } -JSValuePtr numberConstructorNegInfinity(ExecState* exec, const Identifier&, const PropertySlot&) +static JSValuePtr numberConstructorNegInfinity(ExecState* exec, const Identifier&, const PropertySlot&) { return jsNumber(exec, -Inf); } -JSValuePtr numberConstructorPosInfinity(ExecState* exec, const Identifier&, const PropertySlot&) +static JSValuePtr numberConstructorPosInfinity(ExecState* exec, const Identifier&, const PropertySlot&) { return jsNumber(exec, Inf); } -JSValuePtr numberConstructorMaxValue(ExecState* exec, const Identifier&, const PropertySlot&) +static JSValuePtr numberConstructorMaxValue(ExecState* exec, const Identifier&, const PropertySlot&) { return jsNumber(exec, 1.7976931348623157E+308); } -JSValuePtr numberConstructorMinValue(ExecState* exec, const Identifier&, const PropertySlot&) +static JSValuePtr numberConstructorMinValue(ExecState* exec, const Identifier&, const PropertySlot&) { return jsNumber(exec, 5E-324); } diff --git a/JavaScriptCore/runtime/Operations.cpp b/JavaScriptCore/runtime/Operations.cpp index 550d3f6..fe516fe 100644 --- a/JavaScriptCore/runtime/Operations.cpp +++ b/JavaScriptCore/runtime/Operations.cpp @@ -52,4 +52,70 @@ NEVER_INLINE JSValuePtr throwOutOfMemoryError(ExecState* exec) return error; } +NEVER_INLINE JSValuePtr jsAddSlowCase(CallFrame* callFrame, JSValuePtr v1, JSValuePtr v2) +{ + // exception for the Date exception in defaultValue() + JSValuePtr p1 = v1.toPrimitive(callFrame); + JSValuePtr p2 = v2.toPrimitive(callFrame); + + if (p1.isString() || p2.isString()) { + RefPtr<UString::Rep> value = concatenate(p1.toString(callFrame).rep(), p2.toString(callFrame).rep()); + if (!value) + return throwOutOfMemoryError(callFrame); + return jsString(callFrame, value.release()); + } + + return jsNumber(callFrame, p1.toNumber(callFrame) + p2.toNumber(callFrame)); +} + +JSValuePtr jsTypeStringForValue(CallFrame* callFrame, JSValuePtr v) +{ + if (v.isUndefined()) + return jsNontrivialString(callFrame, "undefined"); + if (v.isBoolean()) + return jsNontrivialString(callFrame, "boolean"); + if (v.isNumber()) + return jsNontrivialString(callFrame, "number"); + if (v.isString()) + return jsNontrivialString(callFrame, "string"); + if (v.isObject()) { + // Return "undefined" for objects that should be treated + // as null when doing comparisons. + if (asObject(v)->structure()->typeInfo().masqueradesAsUndefined()) + return jsNontrivialString(callFrame, "undefined"); + CallData callData; + if (asObject(v)->getCallData(callData) != CallTypeNone) + return jsNontrivialString(callFrame, "function"); + } + return jsNontrivialString(callFrame, "object"); +} + +bool jsIsObjectType(JSValuePtr v) +{ + if (!v.isCell()) + return v.isNull(); + + JSType type = asCell(v)->structure()->typeInfo().type(); + if (type == NumberType || type == StringType) + return false; + if (type == ObjectType) { + if (asObject(v)->structure()->typeInfo().masqueradesAsUndefined()) + return false; + CallData callData; + if (asObject(v)->getCallData(callData) != CallTypeNone) + return false; + } + return true; +} + +bool jsIsFunctionType(JSValuePtr v) +{ + if (v.isObject()) { + CallData callData; + if (asObject(v)->getCallData(callData) != CallTypeNone) + return true; + } + return false; +} + } // namespace JSC diff --git a/JavaScriptCore/runtime/Operations.h b/JavaScriptCore/runtime/Operations.h index c6a7e7a..85dee99 100644 --- a/JavaScriptCore/runtime/Operations.h +++ b/JavaScriptCore/runtime/Operations.h @@ -22,12 +22,19 @@ #ifndef Operations_h #define Operations_h +#include "Interpreter.h" #include "JSImmediate.h" #include "JSNumberCell.h" #include "JSString.h" namespace JSC { + NEVER_INLINE JSValuePtr throwOutOfMemoryError(ExecState*); + NEVER_INLINE JSValuePtr jsAddSlowCase(CallFrame*, JSValuePtr, JSValuePtr); + JSValuePtr jsTypeStringForValue(CallFrame*, JSValuePtr); + bool jsIsObjectType(JSValuePtr); + bool jsIsFunctionType(JSValuePtr); + // ECMA 11.9.3 inline bool JSValuePtr::equal(ExecState* exec, JSValuePtr v1, JSValuePtr v2) { @@ -129,7 +136,147 @@ namespace JSC { return v1 == v2; } - JSValuePtr throwOutOfMemoryError(ExecState*); + inline bool jsLess(CallFrame* callFrame, JSValuePtr v1, JSValuePtr v2) + { + if (JSValuePtr::areBothInt32Fast(v1, v2)) + return v1.getInt32Fast() < v2.getInt32Fast(); + + double n1; + double n2; + if (v1.getNumber(n1) && v2.getNumber(n2)) + return n1 < n2; + + JSGlobalData* globalData = &callFrame->globalData(); + if (isJSString(globalData, v1) && isJSString(globalData, v2)) + return asString(v1)->value() < asString(v2)->value(); + + JSValuePtr p1; + JSValuePtr p2; + bool wasNotString1 = v1.getPrimitiveNumber(callFrame, n1, p1); + bool wasNotString2 = v2.getPrimitiveNumber(callFrame, n2, p2); + + if (wasNotString1 | wasNotString2) + return n1 < n2; + + return asString(p1)->value() < asString(p2)->value(); + } + + inline bool jsLessEq(CallFrame* callFrame, JSValuePtr v1, JSValuePtr v2) + { + if (JSValuePtr::areBothInt32Fast(v1, v2)) + return v1.getInt32Fast() <= v2.getInt32Fast(); + + double n1; + double n2; + if (v1.getNumber(n1) && v2.getNumber(n2)) + return n1 <= n2; + + JSGlobalData* globalData = &callFrame->globalData(); + if (isJSString(globalData, v1) && isJSString(globalData, v2)) + return !(asString(v2)->value() < asString(v1)->value()); + + JSValuePtr p1; + JSValuePtr p2; + bool wasNotString1 = v1.getPrimitiveNumber(callFrame, n1, p1); + bool wasNotString2 = v2.getPrimitiveNumber(callFrame, n2, p2); + + if (wasNotString1 | wasNotString2) + return n1 <= n2; + + return !(asString(p2)->value() < asString(p1)->value()); + } + + // Fast-path choices here are based on frequency data from SunSpider: + // <times> Add case: <t1> <t2> + // --------------------------- + // 5626160 Add case: 3 3 (of these, 3637690 are for immediate values) + // 247412 Add case: 5 5 + // 20900 Add case: 5 6 + // 13962 Add case: 5 3 + // 4000 Add case: 3 5 + + ALWAYS_INLINE JSValuePtr jsAdd(CallFrame* callFrame, JSValuePtr v1, JSValuePtr v2) + { + double left; + double right = 0.0; + + bool rightIsNumber = v2.getNumber(right); + if (rightIsNumber && v1.getNumber(left)) + return jsNumber(callFrame, left + right); + + bool leftIsString = v1.isString(); + if (leftIsString && v2.isString()) { + RefPtr<UString::Rep> value = concatenate(asString(v1)->value().rep(), asString(v2)->value().rep()); + if (!value) + return throwOutOfMemoryError(callFrame); + return jsString(callFrame, value.release()); + } + + if (rightIsNumber & leftIsString) { + RefPtr<UString::Rep> value = v2.isInt32Fast() ? + concatenate(asString(v1)->value().rep(), v2.getInt32Fast()) : + concatenate(asString(v1)->value().rep(), right); + + if (!value) + return throwOutOfMemoryError(callFrame); + return jsString(callFrame, value.release()); + } + + // All other cases are pretty uncommon + return jsAddSlowCase(callFrame, v1, v2); + } + + inline size_t countPrototypeChainEntriesAndCheckForProxies(CallFrame* callFrame, JSValuePtr baseValue, const PropertySlot& slot) + { + JSCell* cell = asCell(baseValue); + size_t count = 0; + + while (slot.slotBase() != cell) { + JSValuePtr v = cell->structure()->prototypeForLookup(callFrame); + + // If we didn't find slotBase in baseValue's prototype chain, then baseValue + // must be a proxy for another object. + + if (v.isNull()) + return 0; + + cell = asCell(v); + + // Since we're accessing a prototype in a loop, it's a good bet that it + // should not be treated as a dictionary. + if (cell->structure()->isDictionary()) + asObject(cell)->setStructure(Structure::fromDictionaryTransition(cell->structure())); + + ++count; + } + + ASSERT(count); + return count; + } + + ALWAYS_INLINE JSValuePtr resolveBase(CallFrame* callFrame, Identifier& property, ScopeChainNode* scopeChain) + { + ScopeChainIterator iter = scopeChain->begin(); + ScopeChainIterator next = iter; + ++next; + ScopeChainIterator end = scopeChain->end(); + ASSERT(iter != end); + + PropertySlot slot; + JSObject* base; + while (true) { + base = *iter; + if (next == end || base->getPropertySlot(callFrame, property, slot)) + return base; + + iter = next; + ++next; + } + + ASSERT_NOT_REACHED(); + return noValue(); + } + +} // namespace JSC -} -#endif +#endif // Operations_h diff --git a/JavaScriptCore/runtime/RegExp.cpp b/JavaScriptCore/runtime/RegExp.cpp index b8251d2..5d05f11 100644 --- a/JavaScriptCore/runtime/RegExp.cpp +++ b/JavaScriptCore/runtime/RegExp.cpp @@ -123,6 +123,7 @@ int RegExp::match(const UString& s, int startOffset, OwnArrayPtr<int>* ovector) if (m_wrecFunction) { int offsetVectorSize = (m_numSubpatterns + 1) * 2; int* offsetVector = new int [offsetVectorSize]; + ASSERT(offsetVector); for (int j = 0; j < offsetVectorSize; ++j) offsetVector[j] = -1; diff --git a/JavaScriptCore/runtime/Structure.cpp b/JavaScriptCore/runtime/Structure.cpp index 86ee76c..f702221 100644 --- a/JavaScriptCore/runtime/Structure.cpp +++ b/JavaScriptCore/runtime/Structure.cpp @@ -65,15 +65,15 @@ static const unsigned newTableSize = 16; static WTF::RefCountedLeakCounter structureCounter("Structure"); #if ENABLE(JSC_MULTIPLE_THREADS) -static Mutex ignoreSetMutex; +static Mutex& ignoreSetMutex = *(new Mutex); #endif static bool shouldIgnoreLeaks; -static HashSet<Structure*> ignoreSet; +static HashSet<Structure*>& ignoreSet = *(new HashSet<Structure*>); #endif #if DUMP_STRUCTURE_ID_STATISTICS -static HashSet<Structure*> liveStructureSet; +static HashSet<Structure*>& liveStructureSet = *(new HashSet<Structure*>); #endif void Structure::dumpStatistics() diff --git a/JavaScriptCore/runtime/TimeoutChecker.cpp b/JavaScriptCore/runtime/TimeoutChecker.cpp new file mode 100644 index 0000000..30ba6e9 --- /dev/null +++ b/JavaScriptCore/runtime/TimeoutChecker.cpp @@ -0,0 +1,154 @@ +/* + * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "TimeoutChecker.h" + +#include "CallFrame.h" +#include "JSGlobalObject.h" + +#if PLATFORM(DARWIN) +#include <mach/mach.h> +#endif + +#if HAVE(SYS_TIME_H) +#include <sys/time.h> +#endif + +#if PLATFORM(WIN_OS) +#include <windows.h> +#endif + +#if PLATFORM(QT) +#include <QDateTime> +#endif + +using namespace std; + +namespace JSC { + +// Number of ticks before the first timeout check is done. +static const int ticksUntilFirstCheck = 1024; + +// Number of milliseconds between each timeout check. +static const int intervalBetweenChecks = 1000; + +// Returns the time the current thread has spent executing, in milliseconds. +static inline unsigned getCPUTime() +{ +#if PLATFORM(DARWIN) + mach_msg_type_number_t infoCount = THREAD_BASIC_INFO_COUNT; + thread_basic_info_data_t info; + + // Get thread information + mach_port_t threadPort = mach_thread_self(); + thread_info(threadPort, THREAD_BASIC_INFO, reinterpret_cast<thread_info_t>(&info), &infoCount); + mach_port_deallocate(mach_task_self(), threadPort); + + unsigned time = info.user_time.seconds * 1000 + info.user_time.microseconds / 1000; + time += info.system_time.seconds * 1000 + info.system_time.microseconds / 1000; + + return time; +#elif HAVE(SYS_TIME_H) + // FIXME: This should probably use getrusage with the RUSAGE_THREAD flag. + struct timeval tv; + gettimeofday(&tv, 0); + return tv.tv_sec * 1000 + tv.tv_usec / 1000; +#elif PLATFORM(QT) + QDateTime t = QDateTime::currentDateTime(); + return t.toTime_t() * 1000 + t.time().msec(); +#elif PLATFORM(WIN_OS) + union { + FILETIME fileTime; + unsigned long long fileTimeAsLong; + } userTime, kernelTime; + + // GetThreadTimes won't accept NULL arguments so we pass these even though + // they're not used. + FILETIME creationTime, exitTime; + + GetThreadTimes(GetCurrentThread(), &creationTime, &exitTime, &kernelTime.fileTime, &userTime.fileTime); + + return userTime.fileTimeAsLong / 10000 + kernelTime.fileTimeAsLong / 10000; +#else +#error Platform does not have getCurrentTime function +#endif +} + +TimeoutChecker::TimeoutChecker() + : m_timeoutInterval(0) + , m_startCount(0) +{ + reset(); +} + +void TimeoutChecker::reset() +{ + m_ticksUntilNextCheck = ticksUntilFirstCheck; + m_timeAtLastCheck = 0; + m_timeExecuting = 0; +} + +bool TimeoutChecker::didTimeOut(ExecState* exec) +{ + unsigned currentTime = getCPUTime(); + + if (!m_timeAtLastCheck) { + // Suspicious amount of looping in a script -- start timing it + m_timeAtLastCheck = currentTime; + return false; + } + + unsigned timeDiff = currentTime - m_timeAtLastCheck; + + if (timeDiff == 0) + timeDiff = 1; + + m_timeExecuting += timeDiff; + m_timeAtLastCheck = currentTime; + + // Adjust the tick threshold so we get the next checkTimeout call in the + // interval specified in intervalBetweenChecks. + m_ticksUntilNextCheck = static_cast<unsigned>((static_cast<float>(intervalBetweenChecks) / timeDiff) * m_ticksUntilNextCheck); + // If the new threshold is 0 reset it to the default threshold. This can happen if the timeDiff is higher than the + // preferred script check time interval. + if (m_ticksUntilNextCheck == 0) + m_ticksUntilNextCheck = ticksUntilFirstCheck; + + if (m_timeoutInterval && m_timeExecuting > m_timeoutInterval) { + if (exec->dynamicGlobalObject()->shouldInterruptScript()) + return true; + + reset(); + } + + return false; +} + +} // namespace JSC diff --git a/JavaScriptCore/runtime/TimeoutChecker.h b/JavaScriptCore/runtime/TimeoutChecker.h new file mode 100644 index 0000000..7bfa6d0 --- /dev/null +++ b/JavaScriptCore/runtime/TimeoutChecker.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef TimeoutChecker_h +#define TimeoutChecker_h + +#include <wtf/Assertions.h> + +namespace JSC { + + class ExecState; + + class TimeoutChecker { + public: + TimeoutChecker(); + + void setTimeoutInterval(unsigned timeoutInterval) { m_timeoutInterval = timeoutInterval; } + + unsigned ticksUntilNextCheck() { return m_ticksUntilNextCheck; } + + void start() + { + if (!m_startCount) + reset(); + ++m_startCount; + } + + void stop() + { + ASSERT(m_startCount); + --m_startCount; + } + + void reset(); + + bool didTimeOut(ExecState*); + + private: + unsigned m_timeoutInterval; + unsigned m_timeAtLastCheck; + unsigned m_timeExecuting; + unsigned m_startCount; + unsigned m_ticksUntilNextCheck; + }; + +} // namespace JSC + +#endif // TimeoutChecker_h diff --git a/JavaScriptCore/runtime/UString.cpp b/JavaScriptCore/runtime/UString.cpp index 8cb12cf..024d6a1 100644 --- a/JavaScriptCore/runtime/UString.cpp +++ b/JavaScriptCore/runtime/UString.cpp @@ -93,7 +93,7 @@ static inline void copyChars(UChar* destination, const UChar* source, unsigned n memcpy(destination, source, numCharacters * sizeof(UChar)); } -COMPILE_ASSERT(sizeof(UChar) == 2, uchar_is_2_bytes) +COMPILE_ASSERT(sizeof(UChar) == 2, uchar_is_2_bytes); CString::CString(const char* c) : m_length(strlen(c)) diff --git a/JavaScriptCore/wrec/WREC.cpp b/JavaScriptCore/wrec/WREC.cpp index 099931b..145a1ce 100644 --- a/JavaScriptCore/wrec/WREC.cpp +++ b/JavaScriptCore/wrec/WREC.cpp @@ -40,12 +40,9 @@ using namespace WTF; namespace JSC { namespace WREC { -// Patterns longer than this can hang the compiler. -static const int MaxPatternSize = (1 << 13); - CompiledRegExp Generator::compileRegExp(JSGlobalData* globalData, const UString& pattern, unsigned* numSubpatterns_ptr, const char** error_ptr, RefPtr<ExecutablePool>& pool, bool ignoreCase, bool multiline) { - if (pattern.size() > MaxPatternSize) { + if (pattern.size() > MAX_PATTERN_SIZE) { *error_ptr = "regular expression too large"; return 0; } @@ -80,7 +77,7 @@ CompiledRegExp Generator::compileRegExp(JSGlobalData* globalData, const UString& } *numSubpatterns_ptr = parser.numSubpatterns(); - pool = globalData->poolForSize(generator.size()); + pool = globalData->executableAllocator.poolForSize(generator.size()); return reinterpret_cast<CompiledRegExp>(generator.copyCode(pool.get())); } diff --git a/JavaScriptCore/wrec/WRECGenerator.cpp b/JavaScriptCore/wrec/WRECGenerator.cpp index 4b5f3d4..e2e8aba 100644 --- a/JavaScriptCore/wrec/WRECGenerator.cpp +++ b/JavaScriptCore/wrec/WRECGenerator.cpp @@ -40,16 +40,7 @@ namespace JSC { namespace WREC { void Generator::generateEnter() { -#if PLATFORM(X86_64) - // On x86-64 edi and esi are caller preserved, so nothing to do here. - // The four arguments have been passed in the registers %rdi, %rsi, - // %rdx, %rcx - shuffle these into the expected locations. - move(X86::edi, input); // (arg 1) edi -> eax - move(X86::ecx, output); // (arg 4) ecx -> edi - move(X86::edx, length); // (arg 3) edx -> ecx - move(X86::esi, index); // (arg 2) esi -> edx - -#else +#if PLATFORM(X86) // On x86 edi & esi are callee preserved registers. push(X86::edi); push(X86::esi); @@ -67,24 +58,20 @@ void Generator::generateEnter() peek(output, 3); #endif #endif - -#ifndef NDEBUG - // ASSERT that the output register is not null. - Jump outputNotNull = jnzPtr(output); - breakpoint(); - outputNotNull.link(this); -#endif } void Generator::generateReturnSuccess() { + ASSERT(returnRegister != index); + ASSERT(returnRegister != output); + // Set return value. - pop(X86::eax); // match begin - store32(X86::eax, output); + pop(returnRegister); // match begin + store32(returnRegister, output); store32(index, Address(output, 4)); // match end // Restore callee save registers. -#if !PLATFORM(X86_64) +#if PLATFORM(X86) pop(X86::esi); pop(X86::edi); #endif @@ -100,14 +87,14 @@ void Generator::generateIncrementIndex(Jump* failure) { peek(index); if (failure) - *failure = je32(length, index); + *failure = branch32(Equal, length, index); add32(Imm32(1), index); poke(index); } void Generator::generateLoadCharacter(JumpList& failures) { - failures.append(je32(length, index)); + failures.append(branch32(Equal, length, index)); load16(BaseIndex(input, index, TimesTwo), character); } @@ -115,14 +102,15 @@ void Generator::generateLoadCharacter(JumpList& failures) // were part of the input string. void Generator::generateJumpIfNotEndOfInput(Label target) { - jle32(index, length, target); + branch32(LessThanOrEqual, index, length, target); } void Generator::generateReturnFailure() { pop(); - move(Imm32(-1), X86::eax); -#if !PLATFORM(X86_64) + move(Imm32(-1), returnRegister); + +#if PLATFORM(X86) pop(X86::esi); pop(X86::edi); #endif @@ -145,7 +133,7 @@ void Generator::generateBackreferenceQuantifier(JumpList& failures, Quantifier:: GenerateBackreferenceFunctor functor(subpatternId); load32(Address(output, (2 * subpatternId) * sizeof(int)), character); - Jump skipIfEmpty = je32(Address(output, ((2 * subpatternId) + 1) * sizeof(int)), character); + Jump skipIfEmpty = branch32(Equal, Address(output, ((2 * subpatternId) + 1) * sizeof(int)), character); ASSERT(quantifierType == Quantifier::Greedy || quantifierType == Quantifier::NonGreedy); if (quantifierType == Quantifier::Greedy) @@ -175,7 +163,7 @@ void Generator::generateNonGreedyQuantifier(JumpList& failures, GenerateAtomFunc Label alternativeFailed(this); pop(index); if (max != Quantifier::Infinity) - je32(repeatCount, Imm32(max), quantifierFailed); + branch32(Equal, repeatCount, Imm32(max), quantifierFailed); // (1) Read an atom. if (min) @@ -187,7 +175,7 @@ void Generator::generateNonGreedyQuantifier(JumpList& failures, GenerateAtomFunc // (2) Keep reading if we're under the minimum. if (min > 1) - jl32(repeatCount, Imm32(min), readAtom); + branch32(LessThan, repeatCount, Imm32(min), readAtom); // (3) Test the rest of the alternative. if (!min) @@ -221,7 +209,7 @@ void Generator::generateGreedyQuantifier(JumpList& failures, GenerateAtomFunctor else if (max == 1) doneReadingAtomsList.append(jump()); else { - jne32(repeatCount, Imm32(max), readAtom); + branch32(NotEqual, repeatCount, Imm32(max), readAtom); doneReadingAtomsList.append(jump()); } @@ -238,7 +226,7 @@ void Generator::generateGreedyQuantifier(JumpList& failures, GenerateAtomFunctor // (2) Verify that we have enough atoms. doneReadingAtomsList.link(this); - jl32(repeatCount, Imm32(min), quantifierFailed); + branch32(LessThan, repeatCount, Imm32(min), quantifierFailed); // (3) Test the rest of the alternative. push(index); @@ -277,7 +265,7 @@ bool Generator::generatePatternCharacterPair(JumpList& failures, int ch1, int ch // Optimistically consume 2 characters. add32(Imm32(2), index); - failures.append(jg32(index, length)); + failures.append(branch32(GreaterThan, index, length)); // Load the characters we just consumed, offset -2 characters from index. load32(BaseIndex(input, index, TimesTwo, -2 * 2), character); @@ -306,7 +294,7 @@ bool Generator::generatePatternCharacterPair(JumpList& failures, int ch1, int ch } int pair = ch1 | (ch2 << 16); - failures.append(jne32(character, Imm32(pair))); + failures.append(branch32(NotEqual, character, Imm32(pair))); return true; } @@ -328,14 +316,14 @@ void Generator::generatePatternCharacter(JumpList& failures, int ch) ch |= 32; } else if (!isASCII(ch) && ((lower = Unicode::toLower(ch)) != (upper = Unicode::toUpper(ch)))) { // handle unicode case sentitive characters - branch to success on upper - isUpper = je32(character, Imm32(upper)); + isUpper = branch32(Equal, character, Imm32(upper)); hasUpper = true; ch = lower; } } // checks for ch, or lower case version of ch, if insensitive - failures.append(jne32(character, Imm32((unsigned short)ch))); + failures.append(branch32(NotEqual, character, Imm32((unsigned short)ch))); if (m_parser.ignoreCase() && hasUpper) { // for unicode case insensitive matches, branch here if upper matches. @@ -357,33 +345,33 @@ void Generator::generateCharacterClassInvertedRange(JumpList& failures, JumpList // check if there are any ranges or matches below lo. If not, just jl to failure - // if there is anything else to check, check that first, if it falls through jmp to failure. if ((*matchIndex < matchCount) && (matches[*matchIndex] < lo)) { - Jump loOrAbove = jge32(character, Imm32((unsigned short)lo)); + Jump loOrAbove = branch32(GreaterThanOrEqual, character, Imm32((unsigned short)lo)); // generate code for all ranges before this one if (which) generateCharacterClassInvertedRange(failures, matchDest, ranges, which, matchIndex, matches, matchCount); while ((*matchIndex < matchCount) && (matches[*matchIndex] < lo)) { - matchDest.append(je32(character, Imm32((unsigned short)matches[*matchIndex]))); + matchDest.append(branch32(Equal, character, Imm32((unsigned short)matches[*matchIndex]))); ++*matchIndex; } failures.append(jump()); loOrAbove.link(this); } else if (which) { - Jump loOrAbove = jge32(character, Imm32((unsigned short)lo)); + Jump loOrAbove = branch32(GreaterThanOrEqual, character, Imm32((unsigned short)lo)); generateCharacterClassInvertedRange(failures, matchDest, ranges, which, matchIndex, matches, matchCount); failures.append(jump()); loOrAbove.link(this); } else - failures.append(jl32(character, Imm32((unsigned short)lo))); + failures.append(branch32(LessThan, character, Imm32((unsigned short)lo))); while ((*matchIndex < matchCount) && (matches[*matchIndex] <= hi)) ++*matchIndex; - matchDest.append(jle32(character, Imm32((unsigned short)hi))); + matchDest.append(branch32(LessThanOrEqual, character, Imm32((unsigned short)hi))); // fall through to here, the value is above hi. // shuffle along & loop around if there are any more matches to handle. @@ -397,12 +385,12 @@ void Generator::generateCharacterClassInverted(JumpList& matchDest, const Charac { Jump unicodeFail; if (charClass.numMatchesUnicode || charClass.numRangesUnicode) { - Jump isAscii = jle32(character, Imm32(0x7f)); + Jump isAscii = branch32(LessThanOrEqual, character, Imm32(0x7f)); if (charClass.numMatchesUnicode) { for (unsigned i = 0; i < charClass.numMatchesUnicode; ++i) { UChar ch = charClass.matchesUnicode[i]; - matchDest.append(je32(character, Imm32(ch))); + matchDest.append(branch32(Equal, character, Imm32(ch))); } } @@ -411,8 +399,8 @@ void Generator::generateCharacterClassInverted(JumpList& matchDest, const Charac UChar lo = charClass.rangesUnicode[i].begin; UChar hi = charClass.rangesUnicode[i].end; - Jump below = jl32(character, Imm32(lo)); - matchDest.append(jle32(character, Imm32(hi))); + Jump below = branch32(LessThan, character, Imm32(lo)); + matchDest.append(branch32(LessThanOrEqual, character, Imm32(hi))); below.link(this); } } @@ -426,7 +414,7 @@ void Generator::generateCharacterClassInverted(JumpList& matchDest, const Charac JumpList failures; generateCharacterClassInvertedRange(failures, matchDest, charClass.ranges, charClass.numRanges, &matchIndex, charClass.matches, charClass.numMatches); while (matchIndex < charClass.numMatches) - matchDest.append(je32(character, Imm32((unsigned short)charClass.matches[matchIndex++]))); + matchDest.append(branch32(Equal, character, Imm32((unsigned short)charClass.matches[matchIndex++]))); failures.link(this); } else if (charClass.numMatches) { @@ -443,13 +431,13 @@ void Generator::generateCharacterClassInverted(JumpList& matchDest, const Charac if (isASCIIUpper(ch)) continue; } - matchDest.append(je32(character, Imm32((unsigned short)ch))); + matchDest.append(branch32(Equal, character, Imm32((unsigned short)ch))); } if (unsigned countAZaz = matchesAZaz.size()) { or32(Imm32(32), character); for (unsigned i = 0; i < countAZaz; ++i) - matchDest.append(je32(character, Imm32(matchesAZaz[i]))); + matchDest.append(branch32(Equal, character, Imm32(matchesAZaz[i]))); } } @@ -533,7 +521,7 @@ void Generator::generateAssertionBOL(JumpList& failures) JumpList previousIsNewline; // begin of input == success - previousIsNewline.append(je32(index, Imm32(0))); + previousIsNewline.append(branch32(Equal, index, Imm32(0))); // now check prev char against newline characters. load16(BaseIndex(input, index, TimesTwo, -2), character); @@ -543,7 +531,7 @@ void Generator::generateAssertionBOL(JumpList& failures) previousIsNewline.link(this); } else - failures.append(jne32(index, Imm32(0))); + failures.append(branch32(NotEqual, index, Imm32(0))); } void Generator::generateAssertionEOL(JumpList& failures) @@ -556,7 +544,7 @@ void Generator::generateAssertionEOL(JumpList& failures) failures.append(jump()); nextIsNewline.link(this); } else { - failures.append(jne32(length, index)); + failures.append(branch32(NotEqual, length, index)); } } @@ -568,7 +556,7 @@ void Generator::generateAssertionWordBoundary(JumpList& failures, bool invert) // (1) Check if the previous value was a word char // (1.1) check for begin of input - Jump atBegin = je32(index, Imm32(0)); + Jump atBegin = branch32(Equal, index, Imm32(0)); // (1.2) load the last char, and chck if is word character load16(BaseIndex(input, index, TimesTwo, -2), character); JumpList previousIsWord; @@ -625,14 +613,14 @@ void Generator::generateBackreference(JumpList& failures, unsigned subpatternId) skipIncrement.link(this); // check if we're at the end of backref (if we are, success!) - Jump endOfBackRef = je32(Address(output, ((2 * subpatternId) + 1) * sizeof(int)), repeatCount); + Jump endOfBackRef = branch32(Equal, Address(output, ((2 * subpatternId) + 1) * sizeof(int)), repeatCount); load16(BaseIndex(input, repeatCount, MacroAssembler::TimesTwo), character); // check if we've run out of input (this would be a can o'fail) - Jump endOfInput = je32(length, index); + Jump endOfInput = branch32(Equal, length, index); - je16(character, BaseIndex(input, index, TimesTwo), topOfLoop); + branch16(Equal, BaseIndex(input, index, TimesTwo), character, topOfLoop); endOfInput.link(this); diff --git a/JavaScriptCore/wrec/WRECGenerator.h b/JavaScriptCore/wrec/WRECGenerator.h index af4101a..8562cac 100644 --- a/JavaScriptCore/wrec/WRECGenerator.h +++ b/JavaScriptCore/wrec/WRECGenerator.h @@ -62,13 +62,29 @@ namespace JSC { { } +#if PLATFORM(X86) static const RegisterID input = X86::eax; - static const RegisterID length = X86::ecx; static const RegisterID index = X86::edx; - static const RegisterID character = X86::esi; + static const RegisterID length = X86::ecx; static const RegisterID output = X86::edi; + + static const RegisterID character = X86::esi; static const RegisterID repeatCount = X86::ebx; // How many times the current atom repeats in the current match. - + + static const RegisterID returnRegister = X86::eax; +#endif +#if PLATFORM(X86_64) + static const RegisterID input = X86::edi; + static const RegisterID index = X86::esi; + static const RegisterID length = X86::edx; + static const RegisterID output = X86::ecx; + + static const RegisterID character = X86::eax; + static const RegisterID repeatCount = X86::ebx; // How many times the current atom repeats in the current match. + + static const RegisterID returnRegister = X86::eax; +#endif + void generateEnter(); void generateSaveIndex(); void generateIncrementIndex(Jump* failure = 0); diff --git a/JavaScriptCore/wtf/Assertions.h b/JavaScriptCore/wtf/Assertions.h index c17e501..9643517 100644 --- a/JavaScriptCore/wtf/Assertions.h +++ b/JavaScriptCore/wtf/Assertions.h @@ -194,7 +194,7 @@ while (0) /* COMPILE_ASSERT */ #ifndef COMPILE_ASSERT -#define COMPILE_ASSERT(exp, name) typedef int dummy##name [(exp) ? 1 : -1]; +#define COMPILE_ASSERT(exp, name) typedef int dummy##name [(exp) ? 1 : -1] #endif /* FATAL */ diff --git a/JavaScriptCore/wtf/ByteArray.h b/JavaScriptCore/wtf/ByteArray.h index 865c30e..33f0877 100644 --- a/JavaScriptCore/wtf/ByteArray.h +++ b/JavaScriptCore/wtf/ByteArray.h @@ -69,8 +69,7 @@ namespace WTF { private: ByteArray(size_t size) - : RefCountedBase(1) - , m_size(size) + : m_size(size) { } size_t m_size; diff --git a/JavaScriptCore/wtf/CrossThreadRefCounted.h b/JavaScriptCore/wtf/CrossThreadRefCounted.h new file mode 100644 index 0000000..82f1ba1 --- /dev/null +++ b/JavaScriptCore/wtf/CrossThreadRefCounted.h @@ -0,0 +1,165 @@ +/* + * Copyright (C) 2009 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CrossThreadRefCounted_h +#define CrossThreadRefCounted_h + +#include <wtf/Noncopyable.h> +#include <wtf/PassRefPtr.h> +#include <wtf/RefCounted.h> +#include <wtf/Threading.h> +#include <wtf/TypeTraits.h> + +namespace WTF { + + // Used to allowing sharing data across classes and threads (like ThreadedSafeShared). + // + // Why not just use ThreadSafeShared? + // ThreadSafeShared can have a significant perf impact when used in low level classes + // (like UString) that get ref/deref'ed a lot. This class has the benefit of doing fast ref + // counts like RefPtr whenever possible, but it has the downside that you need to copy it + // to use it on another thread. + // + // Is this class threadsafe? + // While each instance of the class is not threadsafe, the copied instance is threadsafe + // with respect to the original and any other copies. The underlying m_data is jointly + // owned by the original instance and all copies. + template<class T> + class CrossThreadRefCounted : Noncopyable { + public: + static PassRefPtr<CrossThreadRefCounted<T> > create(T* data) + { + return adoptRef(new CrossThreadRefCounted<T>(data, 0)); + } + + // Used to make an instance that can be used on another thread. + PassRefPtr<CrossThreadRefCounted<T> > crossThreadCopy(); + + void ref(); + void deref(); + T* release(); + +#ifndef NDEBUG + bool mayBePassedToAnotherThread() const { ASSERT(!m_threadId); return m_refCounter.hasOneRef(); } +#endif + + private: + CrossThreadRefCounted(T* data, ThreadSafeSharedBase* threadedCounter) + : m_threadSafeRefCounter(threadedCounter) + , m_data(data) +#ifndef NDEBUG + , m_threadId(0) +#endif + { + } + + ~CrossThreadRefCounted() + { + if (!m_threadSafeRefCounter) + delete m_data; + } + + void threadSafeDeref(); + + RefCountedBase m_refCounter; + ThreadSafeSharedBase* m_threadSafeRefCounter; + T* m_data; +#ifndef NDEBUG + ThreadIdentifier m_threadId; +#endif + }; + + template<class T> + void CrossThreadRefCounted<T>::ref() + { + ASSERT(!m_threadId || m_threadId == currentThread()); + m_refCounter.ref(); +#ifndef NDEBUG + // Store the threadId as soon as the ref count gets to 2. + // The class gets created with a ref count of 1 and then passed + // to another thread where to ref count get increased. This + // is a heuristic but it seems to always work and has helped + // find some bugs. + if (!m_threadId && m_refCounter.refCount() == 2) + m_threadId = currentThread(); +#endif + } + + template<class T> + void CrossThreadRefCounted<T>::deref() + { + ASSERT(!m_threadId || m_threadId == currentThread()); + if (m_refCounter.derefBase()) { + threadSafeDeref(); + delete this; + } else { +#ifndef NDEBUG + // Clear the threadId when the ref goes to 1 because it + // is safe to be passed to another thread at this point. + if (m_threadId && m_refCounter.refCount() == 1) + m_threadId = 0; +#endif + } + } + + template<class T> + T* CrossThreadRefCounted<T>::release() + { + ASSERT(!isShared()); + + T* data = m_data; + m_data = 0; + return data; + } + + template<class T> + PassRefPtr<CrossThreadRefCounted<T> > CrossThreadRefCounted<T>::crossThreadCopy() + { + if (m_threadSafeRefCounter) + m_threadSafeRefCounter->ref(); + else + m_threadSafeRefCounter = new ThreadSafeSharedBase(2); + return adoptRef(new CrossThreadRefCounted<T>(m_data, m_threadSafeRefCounter)); + } + + + template<class T> + void CrossThreadRefCounted<T>::threadSafeDeref() + { + if (m_threadSafeRefCounter && m_threadSafeRefCounter->derefBase()) { + delete m_threadSafeRefCounter; + m_threadSafeRefCounter = 0; + } + } +} // namespace WTF + +using WTF::CrossThreadRefCounted; + +#endif // CrossThreadRefCounted_h diff --git a/JavaScriptCore/wtf/CurrentTime.cpp b/JavaScriptCore/wtf/CurrentTime.cpp index d9ea448..74984c1 100644 --- a/JavaScriptCore/wtf/CurrentTime.cpp +++ b/JavaScriptCore/wtf/CurrentTime.cpp @@ -32,13 +32,9 @@ #include "config.h" #include "CurrentTime.h" -#if PLATFORM(MAC) -#include <CoreFoundation/CFDate.h> -#elif PLATFORM(GTK) -#include <glib.h> -#elif PLATFORM(WX) -#include <wx/datetime.h> -#elif PLATFORM(WIN_OS) +#if PLATFORM(WIN_OS) +// Windows is first since we want to use hires timers, despite PLATFORM(CF) +// being defined. // If defined, WIN32_LEAN_AND_MEAN disables timeBeginPeriod/timeEndPeriod. #undef WIN32_LEAN_AND_MEAN #include <windows.h> @@ -47,6 +43,12 @@ #include <sys/timeb.h> #include <sys/types.h> #include <time.h> +#elif PLATFORM(CF) +#include <CoreFoundation/CFDate.h> +#elif PLATFORM(GTK) +#include <glib.h> +#elif PLATFORM(WX) +#include <wx/datetime.h> #else // Posix systems relying on the gettimeofday() #include <sys/time.h> #endif @@ -55,35 +57,7 @@ namespace WTF { const double msPerSecond = 1000.0; -#if PLATFORM(MAC) - -double currentTime() -{ - return CFAbsoluteTimeGetCurrent() + kCFAbsoluteTimeIntervalSince1970; -} - -#elif PLATFORM(GTK) - -// Note: GTK on Windows will pick up the PLATFORM(WIN) implementation above which provides -// better accuracy compared with Windows implementation of g_get_current_time: -// (http://www.google.com/codesearch/p?hl=en#HHnNRjks1t0/glib-2.5.2/glib/gmain.c&q=g_get_current_time). -// Non-Windows GTK builds could use gettimeofday() directly but for the sake of consistency lets use GTK function. -double currentTime() -{ - GTimeVal now; - g_get_current_time(&now); - return static_cast<double>(now.tv_sec) + static_cast<double>(now.tv_usec / 1000000.0); -} - -#elif PLATFORM(WX) - -double currentTime() -{ - wxDateTime now = wxDateTime::UNow(); - return (double)now.GetTicks() + (double)(now.GetMillisecond() / 1000.0); -} - -#elif PLATFORM(WIN_OS) +#if PLATFORM(WIN_OS) static LARGE_INTEGER qpcFrequency; static bool syncedTime; @@ -210,6 +184,34 @@ double currentTime() return utc / 1000.0; } +#elif PLATFORM(CF) + +double currentTime() +{ + return CFAbsoluteTimeGetCurrent() + kCFAbsoluteTimeIntervalSince1970; +} + +#elif PLATFORM(GTK) + +// Note: GTK on Windows will pick up the PLATFORM(WIN) implementation above which provides +// better accuracy compared with Windows implementation of g_get_current_time: +// (http://www.google.com/codesearch/p?hl=en#HHnNRjks1t0/glib-2.5.2/glib/gmain.c&q=g_get_current_time). +// Non-Windows GTK builds could use gettimeofday() directly but for the sake of consistency lets use GTK function. +double currentTime() +{ + GTimeVal now; + g_get_current_time(&now); + return static_cast<double>(now.tv_sec) + static_cast<double>(now.tv_usec / 1000000.0); +} + +#elif PLATFORM(WX) + +double currentTime() +{ + wxDateTime now = wxDateTime::UNow(); + return (double)now.GetTicks() + (double)(now.GetMillisecond() / 1000.0); +} + #else // Other Posix systems rely on the gettimeofday(). double currentTime() diff --git a/JavaScriptCore/wtf/Deque.h b/JavaScriptCore/wtf/Deque.h index 70c546b..c371d38 100644 --- a/JavaScriptCore/wtf/Deque.h +++ b/JavaScriptCore/wtf/Deque.h @@ -1,5 +1,6 @@ /* * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2009 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -75,9 +76,14 @@ namespace WTF { template<typename U> void append(const U&); template<typename U> void prepend(const U&); void removeFirst(); + void remove(iterator&); + void remove(const_iterator&); void clear(); + template<typename Predicate> + iterator findIf(Predicate&); + private: friend class DequeIteratorBase<T>; @@ -85,6 +91,7 @@ namespace WTF { typedef VectorTypeOperations<T> TypeOperations; typedef DequeIteratorBase<T> IteratorBase; + void remove(size_t position); void invalidateIterators(); void destroyAll(); void checkValidity() const; @@ -124,6 +131,7 @@ namespace WTF { private: void addToIteratorsList(); + void removeFromIteratorsList(); void checkValidity() const; void checkValidity(const Base&) const; @@ -348,7 +356,7 @@ namespace WTF { destroyAll(); } - template <typename T> + template<typename T> inline void Deque<T>::swap(Deque<T>& other) { checkValidity(); @@ -361,7 +369,7 @@ namespace WTF { other.checkValidity(); } - template <typename T> + template<typename T> inline void Deque<T>::clear() { checkValidity(); @@ -373,6 +381,18 @@ namespace WTF { } template<typename T> + template<typename Predicate> + inline DequeIterator<T> Deque<T>::findIf(Predicate& predicate) + { + iterator end_iterator = end(); + for (iterator it = begin(); it != end_iterator; ++it) { + if (predicate(*it)) + return it; + } + return end_iterator; + } + + template<typename T> inline void Deque<T>::expandCapacityIfNeeded() { if (m_start) { @@ -447,10 +467,48 @@ namespace WTF { checkValidity(); } + template<typename T> + inline void Deque<T>::remove(iterator& it) + { + it.checkValidity(); + remove(it.m_index); + } + + template<typename T> + inline void Deque<T>::remove(const_iterator& it) + { + it.checkValidity(); + remove(it.m_index); + } + + template<typename T> + inline void Deque<T>::remove(size_t position) + { + if (position == m_end) + return; + + checkValidity(); + invalidateIterators(); + + T* buffer = m_buffer.buffer(); + TypeOperations::destruct(&buffer[position], &buffer[position + 1]); + + // Find which segment of the circular buffer contained the remove element, and only move elements in that part. + if (position >= m_start) { + TypeOperations::moveOverlapping(buffer + m_start, buffer + position, buffer + m_start + 1); + m_start = (m_start + 1) % m_buffer.capacity(); + } else { + TypeOperations::moveOverlapping(buffer + position + 1, buffer + m_end, buffer + position); + m_end = (m_end - 1 + m_buffer.capacity()) % m_buffer.capacity(); + } + checkValidity(); + } + #ifdef NDEBUG template<typename T> inline void DequeIteratorBase<T>::checkValidity() const { } template<typename T> inline void DequeIteratorBase<T>::checkValidity(const DequeIteratorBase<T>&) const { } template<typename T> inline void DequeIteratorBase<T>::addToIteratorsList() { } + template<typename T> inline void DequeIteratorBase<T>::removeFromIteratorsList() { } #else template<typename T> void DequeIteratorBase<T>::checkValidity() const @@ -480,6 +538,30 @@ namespace WTF { } m_previous = 0; } + + template<typename T> + void DequeIteratorBase<T>::removeFromIteratorsList() + { + if (!m_deque) { + ASSERT(!m_next); + ASSERT(!m_previous); + } else { + if (m_next) { + ASSERT(m_next->m_previous == this); + m_next->m_previous = m_previous; + } + if (m_previous) { + ASSERT(m_deque->m_iterators != this); + ASSERT(m_previous->m_next == this); + m_previous->m_next = m_next; + } else { + ASSERT(m_deque->m_iterators == this); + m_deque->m_iterators = m_next; + } + } + m_next = 0; + m_previous = 0; + } #endif template<typename T> @@ -507,30 +589,25 @@ namespace WTF { } template<typename T> + inline DequeIteratorBase<T>& DequeIteratorBase<T>::operator=(const Base& other) + { + checkValidity(); + other.checkValidity(); + removeFromIteratorsList(); + + m_deque = other.m_deque; + m_index = other.m_index; + addToIteratorsList(); + checkValidity(); + return *this; + } + + template<typename T> inline DequeIteratorBase<T>::~DequeIteratorBase() { #ifndef NDEBUG - // Delete iterator from doubly-linked list of iterators. - if (!m_deque) { - ASSERT(!m_next); - ASSERT(!m_previous); - } else { - if (m_next) { - ASSERT(m_next->m_previous == this); - m_next->m_previous = m_previous; - } - if (m_previous) { - ASSERT(m_deque->m_iterators != this); - ASSERT(m_previous->m_next == this); - m_previous->m_next = m_next; - } else { - ASSERT(m_deque->m_iterators == this); - m_deque->m_iterators = m_next; - } - } + removeFromIteratorsList(); m_deque = 0; - m_next = 0; - m_previous = 0; #endif } diff --git a/JavaScriptCore/wtf/FastMalloc.cpp b/JavaScriptCore/wtf/FastMalloc.cpp index 88c10ca..bcac242 100644 --- a/JavaScriptCore/wtf/FastMalloc.cpp +++ b/JavaScriptCore/wtf/FastMalloc.cpp @@ -94,7 +94,7 @@ #define FORCE_SYSTEM_MALLOC 1 #endif -#define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC)) +#define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC) || HAVE(MADV_FREE_REUSE)) #ifndef NDEBUG namespace WTF { @@ -321,9 +321,11 @@ namespace WTF { #define CHECK_CONDITION ASSERT #if PLATFORM(DARWIN) +class Span; +class TCMalloc_Central_FreeListPadded; class TCMalloc_PageHeap; class TCMalloc_ThreadCache; -class TCMalloc_Central_FreeListPadded; +template <typename T> class PageHeapAllocator; class FastMallocZone { public: @@ -339,7 +341,7 @@ public: static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); } private: - FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*); + FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*); static size_t size(malloc_zone_t*, const void*); static void* zoneMalloc(malloc_zone_t*, size_t); static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size); @@ -352,6 +354,8 @@ private: TCMalloc_PageHeap* m_pageHeap; TCMalloc_ThreadCache** m_threadHeaps; TCMalloc_Central_FreeListPadded* m_centralCaches; + PageHeapAllocator<Span>* m_spanAllocator; + PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator; }; #endif @@ -820,6 +824,9 @@ class PageHeapAllocator { char* free_area_; size_t free_avail_; + // Linked list of all regions allocated by this allocator + void* allocated_regions_; + // Free list of already carved objects void* free_list_; @@ -830,6 +837,7 @@ class PageHeapAllocator { void Init() { ASSERT(kAlignedSize <= kAllocIncrement); inuse_ = 0; + allocated_regions_ = 0; free_area_ = NULL; free_avail_ = 0; free_list_ = NULL; @@ -844,9 +852,14 @@ class PageHeapAllocator { } else { if (free_avail_ < kAlignedSize) { // Need more room - free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement)); - if (free_area_ == NULL) CRASH(); - free_avail_ = kAllocIncrement; + char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement)); + if (!new_allocation) + CRASH(); + + *(void**)new_allocation = allocated_regions_; + allocated_regions_ = new_allocation; + free_area_ = new_allocation + kAlignedSize; + free_avail_ = kAllocIncrement - kAlignedSize; } result = free_area_; free_area_ += kAlignedSize; @@ -863,6 +876,18 @@ class PageHeapAllocator { } int inuse() const { return inuse_; } + +#if defined(WTF_CHANGES) && PLATFORM(DARWIN) + template <class Recorder> + void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader) + { + vm_address_t adminAllocation = reinterpret_cast<vm_address_t>(allocated_regions_); + while (adminAllocation) { + recorder.recordRegion(adminAllocation, kAllocIncrement); + adminAllocation = *reader(reinterpret_cast<vm_address_t*>(adminAllocation)); + } + } +#endif }; // ------------------------------------------------------------------------- @@ -1378,8 +1403,14 @@ static ALWAYS_INLINE void mergeDecommittedStates(Span*, Span*) { } #else static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other) { - if (other->decommitted) + if (destination->decommitted && !other->decommitted) { + TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift), + static_cast<size_t>(other->length << kPageShift)); + } else if (other->decommitted && !destination->decommitted) { + TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift), + static_cast<size_t>(destination->length << kPageShift)); destination->decommitted = true; + } } #endif @@ -3571,7 +3602,7 @@ extern "C" struct mallinfo mallinfo(void) { #if defined(__GLIBC__) extern "C" { -# if defined(__GNUC__) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__) +#if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__) // Potentially faster variants that use the gcc alias extension. // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check. # define ALIAS(x) __attribute__ ((weak, alias (x))) @@ -3630,6 +3661,7 @@ public: void visit(void* ptr) { m_freeObjects.add(ptr); } bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); } + bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); } size_t freeObjectCount() const { return m_freeObjects.size(); } void findFreeObjects(TCMalloc_ThreadCache* threadCache) @@ -3680,7 +3712,9 @@ class PageMapMemoryUsageRecorder { vm_range_recorder_t* m_recorder; const RemoteMemoryReader& m_reader; const FreeObjectFinder& m_freeObjectFinder; - mutable HashSet<void*> m_seenPointers; + + HashSet<void*> m_seenPointers; + Vector<Span*> m_coalescedSpans; public: PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder) @@ -3692,51 +3726,133 @@ public: , m_freeObjectFinder(freeObjectFinder) { } - int visit(void* ptr) const + ~PageMapMemoryUsageRecorder() + { + ASSERT(!m_coalescedSpans.size()); + } + + void recordPendingRegions() + { + Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1]; + vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 }; + ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize); + + // Mark the memory region the spans represent as a candidate for containing pointers + if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE) + (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1); + + if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) { + m_coalescedSpans.clear(); + return; + } + + Vector<vm_range_t, 1024> allocatedPointers; + for (size_t i = 0; i < m_coalescedSpans.size(); ++i) { + Span *theSpan = m_coalescedSpans[i]; + if (theSpan->free) + continue; + + vm_address_t spanStartAddress = theSpan->start << kPageShift; + vm_size_t spanSizeInBytes = theSpan->length * kPageSize; + + if (!theSpan->sizeclass) { + // If it's an allocated large object span, mark it as in use + if (!m_freeObjectFinder.isFreeObject(spanStartAddress)) + allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes}); + } else { + const size_t objectSize = ByteSizeForClass(theSpan->sizeclass); + + // Mark each allocated small object within the span as in use + const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes; + for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) { + if (!m_freeObjectFinder.isFreeObject(object)) + allocatedPointers.append((vm_range_t){object, objectSize}); + } + } + } + + (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size()); + + m_coalescedSpans.clear(); + } + + int visit(void* ptr) { if (!ptr) return 1; Span* span = m_reader(reinterpret_cast<Span*>(ptr)); + if (!span->start) + return 1; + if (m_seenPointers.contains(ptr)) return span->length; m_seenPointers.add(ptr); - // Mark the memory used for the Span itself as an administrative region - vm_range_t ptrRange = { reinterpret_cast<vm_address_t>(ptr), sizeof(Span) }; - if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) - (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, &ptrRange, 1); + if (!m_coalescedSpans.size()) { + m_coalescedSpans.append(span); + return span->length; + } - ptrRange.address = span->start << kPageShift; - ptrRange.size = span->length * kPageSize; + Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1]; + vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift; + vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize; - // Mark the memory region the span represents as candidates for containing pointers - if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE)) - (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1); + // If the new span is adjacent to the previous span, do nothing for now. + vm_address_t spanStartAddress = span->start << kPageShift; + if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) { + m_coalescedSpans.append(span); + return span->length; + } - if (!span->free && (m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) { - // If it's an allocated large object span, mark it as in use - if (span->sizeclass == 0 && !m_freeObjectFinder.isFreeObject(reinterpret_cast<void*>(ptrRange.address))) - (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &ptrRange, 1); - else if (span->sizeclass) { - const size_t byteSize = ByteSizeForClass(span->sizeclass); - unsigned totalObjects = (span->length << kPageShift) / byteSize; - ASSERT(span->refcount <= totalObjects); - char* ptr = reinterpret_cast<char*>(span->start << kPageShift); + // New span is not adjacent to previous span, so record the spans coalesced so far. + recordPendingRegions(); + m_coalescedSpans.append(span); - // Mark each allocated small object within the span as in use - for (unsigned i = 0; i < totalObjects; i++) { - char* thisObject = ptr + (i * byteSize); - if (m_freeObjectFinder.isFreeObject(thisObject)) - continue; + return span->length; + } +}; - vm_range_t objectRange = { reinterpret_cast<vm_address_t>(thisObject), byteSize }; - (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &objectRange, 1); - } - } +class AdminRegionRecorder { + task_t m_task; + void* m_context; + unsigned m_typeMask; + vm_range_recorder_t* m_recorder; + const RemoteMemoryReader& m_reader; + + Vector<vm_range_t, 1024> m_pendingRegions; + +public: + AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader) + : m_task(task) + , m_context(context) + , m_typeMask(typeMask) + , m_recorder(recorder) + , m_reader(reader) + { } + + void recordRegion(vm_address_t ptr, size_t size) + { + if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE) + m_pendingRegions.append((vm_range_t){ ptr, size }); + } + + void visit(void *ptr, size_t size) + { + recordRegion(reinterpret_cast<vm_address_t>(ptr), size); + } + + void recordPendingRegions() + { + if (m_pendingRegions.size()) { + (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size()); + m_pendingRegions.clear(); } + } - return span->length; + ~AdminRegionRecorder() + { + ASSERT(!m_pendingRegions.size()); } }; @@ -3759,10 +3875,22 @@ kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typ TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_; PageMapFreeObjectFinder pageMapFinder(memoryReader, finder); - pageMap->visit(pageMapFinder, memoryReader); + pageMap->visitValues(pageMapFinder, memoryReader); PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder); - pageMap->visit(usageRecorder, memoryReader); + pageMap->visitValues(usageRecorder, memoryReader); + usageRecorder.recordPendingRegions(); + + AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder, memoryReader); + pageMap->visitAllocations(adminRegionRecorder, memoryReader); + + PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator); + PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator); + + spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader); + pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader); + + adminRegionRecorder.recordPendingRegions(); return 0; } @@ -3803,15 +3931,24 @@ void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t) extern "C" { malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print, - &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics }; + &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics + +#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) + , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher. +#endif + + }; } -FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches) +FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator) : m_pageHeap(pageHeap) , m_threadHeaps(threadHeaps) , m_centralCaches(centralCaches) + , m_spanAllocator(spanAllocator) + , m_pageHeapAllocator(pageHeapAllocator) { memset(&m_zone, 0, sizeof(m_zone)); + m_zone.version = 4; m_zone.zone_name = "JavaScriptCore FastMalloc"; m_zone.size = &FastMallocZone::size; m_zone.malloc = &FastMallocZone::zoneMalloc; @@ -3827,7 +3964,7 @@ FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache void FastMallocZone::init() { - static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache)); + static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator); } #endif diff --git a/JavaScriptCore/wtf/HashTraits.h b/JavaScriptCore/wtf/HashTraits.h index b3c0b7a..c8d40f7 100644 --- a/JavaScriptCore/wtf/HashTraits.h +++ b/JavaScriptCore/wtf/HashTraits.h @@ -21,8 +21,8 @@ #ifndef WTF_HashTraits_h #define WTF_HashTraits_h -#include "Assertions.h" #include "HashFunctions.h" +#include "TypeTraits.h" #include <utility> #include <limits> @@ -31,47 +31,6 @@ namespace WTF { using std::pair; using std::make_pair; - template<typename T> struct IsInteger { static const bool value = false; }; - template<> struct IsInteger<bool> { static const bool value = true; }; - template<> struct IsInteger<char> { static const bool value = true; }; - template<> struct IsInteger<signed char> { static const bool value = true; }; - template<> struct IsInteger<unsigned char> { static const bool value = true; }; - template<> struct IsInteger<short> { static const bool value = true; }; - template<> struct IsInteger<unsigned short> { static const bool value = true; }; - template<> struct IsInteger<int> { static const bool value = true; }; - template<> struct IsInteger<unsigned int> { static const bool value = true; }; - template<> struct IsInteger<long> { static const bool value = true; }; - template<> struct IsInteger<unsigned long> { static const bool value = true; }; - template<> struct IsInteger<long long> { static const bool value = true; }; - template<> struct IsInteger<unsigned long long> { static const bool value = true; }; - -#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) - template<> struct IsInteger<wchar_t> { static const bool value = true; }; -#endif - - COMPILE_ASSERT(IsInteger<bool>::value, WTF_IsInteger_bool_true); - COMPILE_ASSERT(IsInteger<char>::value, WTF_IsInteger_char_true); - COMPILE_ASSERT(IsInteger<signed char>::value, WTF_IsInteger_signed_char_true); - COMPILE_ASSERT(IsInteger<unsigned char>::value, WTF_IsInteger_unsigned_char_true); - COMPILE_ASSERT(IsInteger<short>::value, WTF_IsInteger_short_true); - COMPILE_ASSERT(IsInteger<unsigned short>::value, WTF_IsInteger_unsigned_short_true); - COMPILE_ASSERT(IsInteger<int>::value, WTF_IsInteger_int_true); - COMPILE_ASSERT(IsInteger<unsigned int>::value, WTF_IsInteger_unsigned_int_true); - COMPILE_ASSERT(IsInteger<long>::value, WTF_IsInteger_long_true); - COMPILE_ASSERT(IsInteger<unsigned long>::value, WTF_IsInteger_unsigned_long_true); - COMPILE_ASSERT(IsInteger<long long>::value, WTF_IsInteger_long_long_true); - COMPILE_ASSERT(IsInteger<unsigned long long>::value, WTF_IsInteger_unsigned_long_long_true); - -#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) - COMPILE_ASSERT(IsInteger<wchar_t>::value, WTF_IsInteger_wchar_t_true); -#endif - - COMPILE_ASSERT(!IsInteger<char*>::value, WTF_IsInteger_char_pointer_false); - COMPILE_ASSERT(!IsInteger<const char* >::value, WTF_IsInteger_const_char_pointer_false); - COMPILE_ASSERT(!IsInteger<volatile char* >::value, WTF_IsInteger_volatile_char_pointer__false); - COMPILE_ASSERT(!IsInteger<double>::value, WTF_IsInteger_double_false); - COMPILE_ASSERT(!IsInteger<float>::value, WTF_IsInteger_float_false); - template<typename T> struct HashTraits; template<bool isInteger, typename T> struct GenericHashTraitsBase; diff --git a/JavaScriptCore/wtf/MainThread.cpp b/JavaScriptCore/wtf/MainThread.cpp index c7a6caa..3c19b7a 100644 --- a/JavaScriptCore/wtf/MainThread.cpp +++ b/JavaScriptCore/wtf/MainThread.cpp @@ -29,26 +29,25 @@ #include "config.h" #include "MainThread.h" +#include "CurrentTime.h" +#include "Deque.h" #include "StdLibExtras.h" #include "Threading.h" -#include "Vector.h" namespace WTF { struct FunctionWithContext { MainThreadFunction* function; void* context; - ThreadCondition* syncFlag; - FunctionWithContext(MainThreadFunction* function = 0, void* context = 0, ThreadCondition* syncFlag = 0) + FunctionWithContext(MainThreadFunction* function = 0, void* context = 0) : function(function) , context(context) - , syncFlag(syncFlag) { } }; -typedef Vector<FunctionWithContext> FunctionQueue; +typedef Deque<FunctionWithContext> FunctionQueue; static bool callbacksPaused; // This global variable is only accessed from main thread. @@ -64,12 +63,14 @@ static FunctionQueue& functionQueue() return staticFunctionQueue; } -#if !PLATFORM(WIN) void initializeMainThread() { mainThreadFunctionQueueMutex(); + initializeMainThreadPlatform(); } -#endif + +// 0.1 sec delays in UI is approximate threshold when they become noticeable. Have a limit that's half of that. +static const double maxRunLoopSuspensionTime = 0.05; void dispatchFunctionsFromMainThread() { @@ -78,52 +79,42 @@ void dispatchFunctionsFromMainThread() if (callbacksPaused) return; - FunctionQueue queueCopy; - { - MutexLocker locker(mainThreadFunctionQueueMutex()); - queueCopy.swap(functionQueue()); - } + double startTime = currentTime(); + + FunctionWithContext invocation; + while (true) { + { + MutexLocker locker(mainThreadFunctionQueueMutex()); + if (!functionQueue().size()) + break; + invocation = functionQueue().first(); + functionQueue().removeFirst(); + } - for (unsigned i = 0; i < queueCopy.size(); ++i) { - FunctionWithContext& invocation = queueCopy[i]; invocation.function(invocation.context); - if (invocation.syncFlag) - invocation.syncFlag->signal(); + + // If we are running accumulated functions for too long so UI may become unresponsive, we need to + // yield so the user input can be processed. Otherwise user may not be able to even close the window. + // This code has effect only in case the scheduleDispatchFunctionsOnMainThread() is implemented in a way that + // allows input events to be processed before we are back here. + if (currentTime() - startTime > maxRunLoopSuspensionTime) { + scheduleDispatchFunctionsOnMainThread(); + break; + } } } void callOnMainThread(MainThreadFunction* function, void* context) { ASSERT(function); - + bool needToSchedule = false; { MutexLocker locker(mainThreadFunctionQueueMutex()); + needToSchedule = functionQueue().size() == 0; functionQueue().append(FunctionWithContext(function, context)); } - - scheduleDispatchFunctionsOnMainThread(); -} - -void callOnMainThreadAndWait(MainThreadFunction* function, void* context) -{ - ASSERT(function); - - if (isMainThread()) { - function(context); - return; - } - - ThreadCondition syncFlag; - Mutex conditionMutex; - - { - MutexLocker locker(mainThreadFunctionQueueMutex()); - functionQueue().append(FunctionWithContext(function, context, &syncFlag)); - conditionMutex.lock(); - } - - scheduleDispatchFunctionsOnMainThread(); - syncFlag.wait(conditionMutex); + if (needToSchedule) + scheduleDispatchFunctionsOnMainThread(); } void setMainThreadCallbacksPaused(bool paused) diff --git a/JavaScriptCore/wtf/MainThread.h b/JavaScriptCore/wtf/MainThread.h index 953b986..01ce804 100644 --- a/JavaScriptCore/wtf/MainThread.h +++ b/JavaScriptCore/wtf/MainThread.h @@ -37,7 +37,6 @@ class Mutex; typedef void MainThreadFunction(void*); void callOnMainThread(MainThreadFunction*, void* context); -void callOnMainThreadAndWait(MainThreadFunction*, void* context); void setMainThreadCallbacksPaused(bool paused); @@ -45,9 +44,10 @@ void setMainThreadCallbacksPaused(bool paused); void initializeMainThread(); // These functions are internal to the callOnMainThread implementation. -void dispatchFunctionsFromMainThread(); +void initializeMainThreadPlatform(); void scheduleDispatchFunctionsOnMainThread(); Mutex& mainThreadFunctionQueueMutex(); +void dispatchFunctionsFromMainThread(); } // namespace WTF diff --git a/JavaScriptCore/wtf/MessageQueue.h b/JavaScriptCore/wtf/MessageQueue.h index 19c5c10..9549f37 100644 --- a/JavaScriptCore/wtf/MessageQueue.h +++ b/JavaScriptCore/wtf/MessageQueue.h @@ -1,5 +1,6 @@ /* * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2009 Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,6 +30,7 @@ #ifndef MessageQueue_h #define MessageQueue_h +#include <limits> #include <wtf/Assertions.h> #include <wtf/Deque.h> #include <wtf/Noncopyable.h> @@ -50,7 +52,8 @@ namespace WTF { void append(const DataType&); void prepend(const DataType&); bool waitForMessage(DataType&); - MessageQueueWaitResult waitForMessageTimed(DataType&, double absoluteTime); + template<typename Predicate> + MessageQueueWaitResult waitForMessageFilteredWithTimeout(DataType&, Predicate&, double absoluteTime); void kill(); bool tryGetMessage(DataType&); @@ -59,7 +62,11 @@ namespace WTF { // The result of isEmpty() is only valid if no other thread is manipulating the queue at the same time. bool isEmpty(); + static double infiniteTime() { return std::numeric_limits<double>::max(); } + private: + static bool alwaysTruePredicate(DataType&) { return true; } + mutable Mutex m_mutex; ThreadCondition m_condition; Deque<DataType> m_queue; @@ -85,38 +92,33 @@ namespace WTF { template<typename DataType> inline bool MessageQueue<DataType>::waitForMessage(DataType& result) { - MutexLocker lock(m_mutex); - - while (!m_killed && m_queue.isEmpty()) - m_condition.wait(m_mutex); - - if (m_killed) - return false; - - ASSERT(!m_queue.isEmpty()); - result = m_queue.first(); - m_queue.removeFirst(); - return true; + MessageQueueWaitResult exitReason = waitForMessageFilteredWithTimeout(result, MessageQueue<DataType>::alwaysTruePredicate, infiniteTime()); + ASSERT(exitReason == MessageQueueTerminated || exitReason == MessageQueueMessageReceived); + return exitReason == MessageQueueMessageReceived; } template<typename DataType> - inline MessageQueueWaitResult MessageQueue<DataType>::waitForMessageTimed(DataType& result, double absoluteTime) + template<typename Predicate> + inline MessageQueueWaitResult MessageQueue<DataType>::waitForMessageFilteredWithTimeout(DataType& result, Predicate& predicate, double absoluteTime) { MutexLocker lock(m_mutex); bool timedOut = false; - while (!m_killed && !timedOut && m_queue.isEmpty()) + DequeConstIterator<DataType> found = m_queue.end(); + while (!m_killed && !timedOut && (found = m_queue.findIf(predicate)) == m_queue.end()) timedOut = !m_condition.timedWait(m_mutex, absoluteTime); + ASSERT(!timedOut || absoluteTime != infiniteTime()); + if (m_killed) return MessageQueueTerminated; if (timedOut) return MessageQueueTimeout; - ASSERT(!m_queue.isEmpty()); - result = m_queue.first(); - m_queue.removeFirst(); + ASSERT(found != m_queue.end()); + result = *found; + m_queue.remove(found); return MessageQueueMessageReceived; } @@ -157,7 +159,7 @@ namespace WTF { MutexLocker lock(m_mutex); return m_killed; } -} +} // namespace WTF using WTF::MessageQueue; // MessageQueueWaitResult enum and all its values. diff --git a/JavaScriptCore/wtf/OwnPtr.h b/JavaScriptCore/wtf/OwnPtr.h index 256b55c..af939e7 100644 --- a/JavaScriptCore/wtf/OwnPtr.h +++ b/JavaScriptCore/wtf/OwnPtr.h @@ -21,10 +21,11 @@ #ifndef WTF_OwnPtr_h #define WTF_OwnPtr_h +#include "Assertions.h" +#include "Noncopyable.h" +#include "TypeTraits.h" #include <algorithm> #include <memory> -#include <wtf/Assertions.h> -#include <wtf/Noncopyable.h> #if PLATFORM(WIN) @@ -41,10 +42,6 @@ namespace WTF { // Unlike most of our smart pointers, OwnPtr can take either the pointer type or the pointed-to type. - // FIXME: Share a single RemovePointer class template with RetainPtr. - template <typename T> struct OwnPtrRemovePointer { typedef T type; }; - template <typename T> struct OwnPtrRemovePointer<T*> { typedef T type; }; - template <typename T> inline void deleteOwnedPtr(T* ptr) { typedef char known[sizeof(T) ? 1 : -1]; @@ -63,7 +60,7 @@ namespace WTF { template <typename T> class OwnPtr : Noncopyable { public: - typedef typename OwnPtrRemovePointer<T>::type ValueType; + typedef typename RemovePointer<T>::Type ValueType; typedef ValueType* PtrType; explicit OwnPtr(PtrType ptr = 0) : m_ptr(ptr) { } diff --git a/JavaScriptCore/wtf/Platform.h b/JavaScriptCore/wtf/Platform.h index fea00c4..1813114 100644 --- a/JavaScriptCore/wtf/Platform.h +++ b/JavaScriptCore/wtf/Platform.h @@ -40,6 +40,12 @@ /* be used regardless of operating environment */ #ifdef __APPLE__ #define WTF_PLATFORM_DARWIN 1 +#include <AvailabilityMacros.h> +#if !defined(MAC_OS_X_VERSION_10_5) || MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_5 +#define BUILDING_ON_TIGER 1 +#elif !defined(MAC_OS_X_VERSION_10_6) || MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6 +#define BUILDING_ON_LEOPARD 1 +#endif #endif /* PLATFORM(WIN_OS) */ @@ -238,6 +244,11 @@ #define WTF_PLATFORM_X86_64 1 #endif +/* PLATFORM(SH4) */ +#if defined(__SH4__) +#define WTF_PLATFORM_SH4 1 +#endif + /* PLATFORM(SPARC64) */ #if defined(__sparc64__) #define WTF_PLATFORM_SPARC64 1 @@ -268,8 +279,14 @@ #endif #endif +/* COMPILER(RVCT) */ +#if defined(__CC_ARM) || defined(__ARMCC__) +#define WTF_COMPILER_RVCT 1 +#endif + /* COMPILER(GCC) */ -#if defined(__GNUC__) +/* --gnu option of the RVCT compiler also defines __GNUC__ */ +#if defined(__GNUC__) && !COMPILER(RVCT) #define WTF_COMPILER_GCC 1 #endif @@ -290,11 +307,6 @@ #define WTF_COMPILER_CYGWIN 1 #endif -/* COMPILER(RVCT) */ -#if defined(__CC_ARM) || defined(__ARMCC__) -#define WTF_COMPILER_RVCT 1 -#endif - /* COMPILER(WINSCW) */ #if defined(__WINSCW__) #define WTF_COMPILER_WINSCW 1 @@ -325,9 +337,7 @@ #define ENABLE_DASHBOARD_SUPPORT 1 #endif #define HAVE_READLINE 1 -#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) -#define HAVE_DTRACE 1 -#endif +#define HAVE_RUNLOOP_TIMER 1 #endif #if PLATFORM(CHROMIUM) && PLATFORM(DARWIN) @@ -363,6 +373,7 @@ #if PLATFORM(DARWIN) #define HAVE_ERRNO_H 1 +#define HAVE_LANGINFO_H 1 #define HAVE_MMAP 1 #define HAVE_MERGESORT 1 #define HAVE_SBRK 1 @@ -371,6 +382,10 @@ #define HAVE_SYS_TIME_H 1 #define HAVE_SYS_TIMEB_H 1 +#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) +#define HAVE_MADV_FREE_REUSE 1 +#endif + #elif PLATFORM(WIN_OS) #define HAVE_FLOAT_H 1 @@ -394,11 +409,22 @@ #define HAVE_SYS_PARAM_H 1 #endif +#elif PLATFORM(ANDROID) + +#define HAVE_ERRNO_H 1 +#define HAVE_LANGINFO_H 0 +#define HAVE_MMAP 1 +#define HAVE_SBRK 1 +#define HAVE_STRINGS_H 1 +#define HAVE_SYS_PARAM_H 1 +#define HAVE_SYS_TIME_H 1 + #else /* FIXME: is this actually used or do other platforms generate their own config.h? */ #define HAVE_ERRNO_H 1 +#define HAVE_LANGINFO_H 1 #define HAVE_MMAP 1 #define HAVE_SBRK 1 #define HAVE_STRINGS_H 1 @@ -466,14 +492,22 @@ #define ENABLE_ARCHIVE 1 #endif +#if !defined(ENABLE_ON_FIRST_TEXTAREA_FOCUS_SELECT_ALL) +#define ENABLE_ON_FIRST_TEXTAREA_FOCUS_SELECT_ALL 0 +#endif + #if !defined(WTF_USE_ALTERNATE_JSIMMEDIATE) && PLATFORM(X86_64) && PLATFORM(MAC) #define WTF_USE_ALTERNATE_JSIMMEDIATE 1 #endif +#if !defined(ENABLE_REPAINT_THROTTLING) +#define ENABLE_REPAINT_THROTTLING 0 +#endif + #if !defined(ENABLE_JIT) -/* x86-64 support is under development. */ +/* The JIT is tested & working on x86_64 Mac */ #if PLATFORM(X86_64) && PLATFORM(MAC) - #define ENABLE_JIT 0 + #define ENABLE_JIT 1 #define WTF_USE_JIT_STUB_ARGUMENT_REGISTER 1 /* The JIT is tested & working on x86 Mac */ #elif PLATFORM(X86) && PLATFORM(MAC) diff --git a/JavaScriptCore/wtf/RefCounted.h b/JavaScriptCore/wtf/RefCounted.h index ac8e167..93ee0da 100644 --- a/JavaScriptCore/wtf/RefCounted.h +++ b/JavaScriptCore/wtf/RefCounted.h @@ -49,8 +49,8 @@ public: } protected: - RefCountedBase(int initialRefCount) - : m_refCount(initialRefCount) + RefCountedBase() + : m_refCount(1) #ifndef NDEBUG , m_deletionHasBegun(false) #endif @@ -76,6 +76,9 @@ protected: } protected: + template<class T> + friend class CrossThreadRefCounted; + int m_refCount; #ifndef NDEBUG bool m_deletionHasBegun; @@ -85,11 +88,6 @@ protected: template<class T> class RefCounted : public RefCountedBase { public: - RefCounted(int initialRefCount = 1) - : RefCountedBase(initialRefCount) - { - } - void deref() { if (derefBase()) diff --git a/JavaScriptCore/wtf/RetainPtr.h b/JavaScriptCore/wtf/RetainPtr.h index a66a127..77f25e0 100644 --- a/JavaScriptCore/wtf/RetainPtr.h +++ b/JavaScriptCore/wtf/RetainPtr.h @@ -21,6 +21,7 @@ #ifndef RetainPtr_h #define RetainPtr_h +#include "TypeTraits.h" #include <algorithm> #include <CoreFoundation/CoreFoundation.h> @@ -30,14 +31,6 @@ namespace WTF { - template <typename T> struct RemovePointer { - typedef T type; - }; - - template <typename T> struct RemovePointer<T*> { - typedef T type; - }; - // Unlike most most of our smart pointers, RetainPtr can take either the pointer type or the pointed-to type, // so both RetainPtr<NSDictionary> and RetainPtr<CFDictionaryRef> will work. @@ -56,7 +49,7 @@ namespace WTF { template <typename T> class RetainPtr { public: - typedef typename RemovePointer<T>::type ValueType; + typedef typename RemovePointer<T>::Type ValueType; typedef ValueType* PtrType; RetainPtr() : m_ptr(0) {} diff --git a/JavaScriptCore/wtf/TCPageMap.h b/JavaScriptCore/wtf/TCPageMap.h index 3e6b80e..9ffd77b 100644 --- a/JavaScriptCore/wtf/TCPageMap.h +++ b/JavaScriptCore/wtf/TCPageMap.h @@ -54,7 +54,6 @@ #endif #include <string.h> - #include "Assertions.h" // Single-level array @@ -164,7 +163,7 @@ class TCMalloc_PageMap2 { #ifdef WTF_CHANGES template<class Visitor, class MemoryReader> - void visit(const Visitor& visitor, const MemoryReader& reader) + void visitValues(Visitor& visitor, const MemoryReader& reader) { for (int i = 0; i < ROOT_LENGTH; i++) { if (!root_[i]) @@ -175,6 +174,14 @@ class TCMalloc_PageMap2 { ; } } + + template<class Visitor, class MemoryReader> + void visitAllocations(Visitor& visitor, const MemoryReader&) { + for (int i = 0; i < ROOT_LENGTH; i++) { + if (root_[i]) + visitor.visit(root_[i], sizeof(Leaf)); + } + } #endif }; @@ -266,7 +273,7 @@ class TCMalloc_PageMap3 { #ifdef WTF_CHANGES template<class Visitor, class MemoryReader> - void visit(const Visitor& visitor, const MemoryReader& reader) { + void visitValues(Visitor& visitor, const MemoryReader& reader) { Node* root = reader(root_); for (int i = 0; i < INTERIOR_LENGTH; i++) { if (!root->ptrs[i]) @@ -283,6 +290,26 @@ class TCMalloc_PageMap3 { } } } + + template<class Visitor, class MemoryReader> + void visitAllocations(Visitor& visitor, const MemoryReader& reader) { + visitor.visit(root_, sizeof(Node)); + + Node* root = reader(root_); + for (int i = 0; i < INTERIOR_LENGTH; i++) { + if (!root->ptrs[i]) + continue; + + visitor.visit(root->ptrs[i], sizeof(Node)); + Node* n = reader(root->ptrs[i]); + for (int j = 0; j < INTERIOR_LENGTH; j++) { + if (!n->ptrs[j]) + continue; + + visitor.visit(n->ptrs[j], sizeof(Leaf)); + } + } + } #endif }; diff --git a/JavaScriptCore/wtf/TCSystemAlloc.cpp b/JavaScriptCore/wtf/TCSystemAlloc.cpp index 3a8908d..bf2dcb1 100644 --- a/JavaScriptCore/wtf/TCSystemAlloc.cpp +++ b/JavaScriptCore/wtf/TCSystemAlloc.cpp @@ -381,9 +381,17 @@ void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { return NULL; } +#if HAVE(MADV_FREE_REUSE) + +void TCMalloc_SystemRelease(void* start, size_t length) +{ + while (madvise(start, length, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { } +} + +#elif HAVE(MADV_DONTNEED) + void TCMalloc_SystemRelease(void* start, size_t length) { -#if HAVE(MADV_DONTNEED) if (FLAGS_malloc_devmem_start) { // It's not safe to use MADV_DONTNEED if we've been mapping // /dev/mem for heap memory @@ -414,25 +422,41 @@ void TCMalloc_SystemRelease(void* start, size_t length) errno == EAGAIN) { // NOP } - return; } -#endif +} -#if HAVE(MMAP) +#elif HAVE(MMAP) + +void TCMalloc_SystemRelease(void* start, size_t length) +{ void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); // If the mmap failed then that's ok, we just won't return the memory to the system. ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cast<void*>(MAP_FAILED)); - return; -#endif +} + +#else + +// Platforms that don't support returning memory use an empty inline version of TCMalloc_SystemRelease +// declared in TCSystemAlloc.h -#if !HAVE(MADV_DONTNEED) && !HAVE(MMAP) - UNUSED_PARAM(start); - UNUSED_PARAM(length); #endif + +#if HAVE(MADV_FREE_REUSE) + +void TCMalloc_SystemCommit(void* start, size_t length) +{ + while (madvise(start, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { } } -#if HAVE(VIRTUALALLOC) +#elif HAVE(VIRTUALALLOC) + void TCMalloc_SystemCommit(void*, size_t) { } + +#else + +// Platforms that don't need to explicitly commit memory use an empty inline version of TCMalloc_SystemCommit +// declared in TCSystemAlloc.h + #endif diff --git a/JavaScriptCore/wtf/TCSystemAlloc.h b/JavaScriptCore/wtf/TCSystemAlloc.h index d82e860..f2c915e 100644 --- a/JavaScriptCore/wtf/TCSystemAlloc.h +++ b/JavaScriptCore/wtf/TCSystemAlloc.h @@ -62,9 +62,13 @@ extern void* TCMalloc_SystemAlloc(size_t bytes, size_t *actual_bytes, // be released, partial pages will not.) extern void TCMalloc_SystemRelease(void* start, size_t length); -#if HAVE(VIRTUALALLOC) extern void TCMalloc_SystemCommit(void* start, size_t length); -#else + +#if !HAVE(MADV_FREE_REUSE) && !HAVE(MADV_DONTNEED) && !HAVE(MMAP) +inline void TCMalloc_SystemRelease(void*, size_t) { } +#endif + +#if !HAVE(VIRTUALALLOC) && !HAVE(MADV_FREE_REUSE) inline void TCMalloc_SystemCommit(void*, size_t) { } #endif diff --git a/JavaScriptCore/wtf/ThreadSpecific.h b/JavaScriptCore/wtf/ThreadSpecific.h index 7603802..8aaaf5f 100644 --- a/JavaScriptCore/wtf/ThreadSpecific.h +++ b/JavaScriptCore/wtf/ThreadSpecific.h @@ -129,8 +129,8 @@ inline void ThreadSpecific<T>::set(T* ptr) // 2) We do not need to hold many instances of ThreadSpecific<> data. This fixed number should be far enough. const int kMaxTlsKeySize = 256; -extern long g_tls_key_count; -extern DWORD g_tls_keys[kMaxTlsKeySize]; +long& tlsKeyCount(); +DWORD* tlsKeys(); template<typename T> inline ThreadSpecific<T>::ThreadSpecific() @@ -140,23 +140,23 @@ inline ThreadSpecific<T>::ThreadSpecific() if (tls_key == TLS_OUT_OF_INDEXES) CRASH(); - m_index = InterlockedIncrement(&g_tls_key_count) - 1; + m_index = InterlockedIncrement(&tlsKeyCount()) - 1; if (m_index >= kMaxTlsKeySize) CRASH(); - g_tls_keys[m_index] = tls_key; + tlsKeys()[m_index] = tls_key; } template<typename T> inline ThreadSpecific<T>::~ThreadSpecific() { // Does not invoke destructor functions. They will be called from ThreadSpecificThreadExit when the thread is detached. - TlsFree(g_tls_keys[m_index]); + TlsFree(tlsKeys()[m_index]); } template<typename T> inline T* ThreadSpecific<T>::get() { - Data* data = static_cast<Data*>(TlsGetValue(g_tls_keys[m_index])); + Data* data = static_cast<Data*>(TlsGetValue(tlsKeys()[m_index])); return data ? data->value : 0; } @@ -166,7 +166,7 @@ inline void ThreadSpecific<T>::set(T* ptr) ASSERT(!get()); Data* data = new Data(ptr, this); data->destructor = &ThreadSpecific<T>::destroy; - TlsSetValue(g_tls_keys[m_index], data); + TlsSetValue(tlsKeys()[m_index], data); } #else @@ -190,7 +190,7 @@ inline void ThreadSpecific<T>::destroy(void* ptr) #if USE(PTHREADS) pthread_setspecific(data->owner->m_key, 0); #elif PLATFORM(WIN_OS) - TlsSetValue(g_tls_keys[data->owner->m_index], 0); + TlsSetValue(tlsKeys()[data->owner->m_index], 0); #else #error ThreadSpecific is not implemented for this platform. #endif diff --git a/JavaScriptCore/wtf/ThreadSpecificWin.cpp b/JavaScriptCore/wtf/ThreadSpecificWin.cpp index 1a3febb..f2c0cad 100644 --- a/JavaScriptCore/wtf/ThreadSpecificWin.cpp +++ b/JavaScriptCore/wtf/ThreadSpecificWin.cpp @@ -29,14 +29,23 @@ namespace WTF { -long g_tls_key_count = 0; -DWORD g_tls_keys[kMaxTlsKeySize]; +long& tlsKeyCount() +{ + static long count; + return count; +} + +DWORD* tlsKeys() +{ + static DWORD keys[kMaxTlsKeySize]; + return keys; +} void ThreadSpecificThreadExit() { - for (long i = 0; i < g_tls_key_count; i++) { + for (long i = 0; i < tlsKeyCount(); i++) { // The layout of ThreadSpecific<T>::Data does not depend on T. So we are safe to do the static cast to ThreadSpecific<int> in order to access its data member. - ThreadSpecific<int>::Data* data = static_cast<ThreadSpecific<int>::Data*>(TlsGetValue(g_tls_keys[i])); + ThreadSpecific<int>::Data* data = static_cast<ThreadSpecific<int>::Data*>(TlsGetValue(tlsKeys()[i])); if (data) data->destructor(data); } diff --git a/JavaScriptCore/wtf/Threading.cpp b/JavaScriptCore/wtf/Threading.cpp index 41c9135..bd25ee7 100644 --- a/JavaScriptCore/wtf/Threading.cpp +++ b/JavaScriptCore/wtf/Threading.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,16 +26,21 @@ #include "config.h" #include "Threading.h" +#include <string.h> + namespace WTF { struct NewThreadContext { - NewThreadContext(ThreadFunction entryPoint, void* data) + NewThreadContext(ThreadFunction entryPoint, void* data, const char* name) : entryPoint(entryPoint) , data(data) - { } + , name(name) + { + } ThreadFunction entryPoint; void* data; + const char* name; Mutex creationMutex; }; @@ -44,6 +49,8 @@ static void* threadEntryPoint(void* contextData) { NewThreadContext* context = reinterpret_cast<NewThreadContext*>(contextData); + setThreadNameInternal(context->name); + // Block until our creating thread has completed any extra setup work { MutexLocker locker(context->creationMutex); @@ -59,7 +66,14 @@ static void* threadEntryPoint(void* contextData) ThreadIdentifier createThread(ThreadFunction entryPoint, void* data, const char* name) { - NewThreadContext* context = new NewThreadContext(entryPoint, data); + // Visual Studio has a 31-character limit on thread names. Longer names will + // be truncated silently, but we'd like callers to know about the limit. +#if !LOG_DISABLED + if (strlen(name) > 31) + LOG_ERROR("Thread name \"%s\" is longer than 31 characters and will be truncated by Visual Studio", name); +#endif + + NewThreadContext* context = new NewThreadContext(entryPoint, data, name); // Prevent the thread body from executing until we've established the thread identifier MutexLocker locker(context->creationMutex); diff --git a/JavaScriptCore/wtf/Threading.h b/JavaScriptCore/wtf/Threading.h index 1c0dab1..e562f35 100644 --- a/JavaScriptCore/wtf/Threading.h +++ b/JavaScriptCore/wtf/Threading.h @@ -110,10 +110,17 @@ namespace WTF { typedef uint32_t ThreadIdentifier; typedef void* (*ThreadFunction)(void* argument); -// Returns 0 if thread creation failed +// Returns 0 if thread creation failed. +// The thread name must be a literal since on some platforms it's passed in to the thread. ThreadIdentifier createThread(ThreadFunction, void*, const char* threadName); + +// Internal platform-specific createThread implementation. ThreadIdentifier createThreadInternal(ThreadFunction, void*, const char* threadName); +// Called in the thread during initialization. +// Helpful for platforms where the thread name must be set from within the thread. +void setThreadNameInternal(const char* threadName); + ThreadIdentifier currentThread(); bool isMainThread(); int waitForThreadCompletion(ThreadIdentifier, void**); @@ -212,9 +219,9 @@ inline int atomicDecrement(int volatile* addend) { return __gnu_cxx::__exchange_ #endif -template<class T> class ThreadSafeShared : Noncopyable { +class ThreadSafeSharedBase : Noncopyable { public: - ThreadSafeShared(int initialRefCount = 1) + ThreadSafeSharedBase(int initialRefCount = 1) : m_refCount(initialRefCount) { } @@ -229,20 +236,6 @@ public: #endif } - void deref() - { -#if USE(LOCKFREE_THREADSAFESHARED) - if (atomicDecrement(&m_refCount) <= 0) -#else - { - MutexLocker locker(m_mutex); - --m_refCount; - } - if (m_refCount <= 0) -#endif - delete static_cast<T*>(this); - } - bool hasOneRef() { return refCount() == 1; @@ -256,13 +249,50 @@ public: return static_cast<int const volatile &>(m_refCount); } +protected: + // Returns whether the pointer should be freed or not. + bool derefBase() + { +#if USE(LOCKFREE_THREADSAFESHARED) + if (atomicDecrement(&m_refCount) <= 0) + return true; +#else + int refCount; + { + MutexLocker locker(m_mutex); + --m_refCount; + refCount = m_refCount; + } + if (refCount <= 0) + return true; +#endif + return false; + } + private: + template<class T> + friend class CrossThreadRefCounted; + int m_refCount; #if !USE(LOCKFREE_THREADSAFESHARED) mutable Mutex m_mutex; #endif }; +template<class T> class ThreadSafeShared : public ThreadSafeSharedBase { +public: + ThreadSafeShared(int initialRefCount = 1) + : ThreadSafeSharedBase(initialRefCount) + { + } + + void deref() + { + if (derefBase()) + delete static_cast<T*>(this); + } +}; + // This function must be called from the main thread. It is safe to call it repeatedly. // Darwin is an exception to this rule: it is OK to call it from any thread, the only requirement is that the calls are not reentrant. void initializeThreading(); diff --git a/JavaScriptCore/wtf/ThreadingGtk.cpp b/JavaScriptCore/wtf/ThreadingGtk.cpp index 24c34ca..b4f4de1 100644 --- a/JavaScriptCore/wtf/ThreadingGtk.cpp +++ b/JavaScriptCore/wtf/ThreadingGtk.cpp @@ -138,6 +138,10 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con return threadID; } +void setThreadNameInternal(const char*) +{ +} + int waitForThreadCompletion(ThreadIdentifier threadID, void** result) { ASSERT(threadID); diff --git a/JavaScriptCore/wtf/ThreadingNone.cpp b/JavaScriptCore/wtf/ThreadingNone.cpp index 0be2a4b..24431fc 100644 --- a/JavaScriptCore/wtf/ThreadingNone.cpp +++ b/JavaScriptCore/wtf/ThreadingNone.cpp @@ -34,6 +34,7 @@ namespace WTF { void initializeThreading() { } ThreadIdentifier createThreadInternal(ThreadFunction, void*, const char*) { return 0; } +void setThreadNameInternal(const char*) { } int waitForThreadCompletion(ThreadIdentifier, void**) { return 0; } void detachThread(ThreadIdentifier) { } ThreadIdentifier currentThread() { return 0; } diff --git a/JavaScriptCore/wtf/ThreadingPthreads.cpp b/JavaScriptCore/wtf/ThreadingPthreads.cpp index 105e42a..42133bc 100644 --- a/JavaScriptCore/wtf/ThreadingPthreads.cpp +++ b/JavaScriptCore/wtf/ThreadingPthreads.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Apple Inc. All rights reserved. + * Copyright (C) 2007, 2009 Apple Inc. All rights reserved. * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) * * Redistribution and use in source and binary forms, with or without @@ -26,18 +26,18 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ + #include "config.h" #include "Threading.h" -#include "StdLibExtras.h" - #if USE(PTHREADS) #include "CurrentTime.h" #include "HashMap.h" #include "MainThread.h" #include "RandomNumberSeed.h" - +#include "StdLibExtras.h" +#include "UnusedParam.h" #include <errno.h> #include <limits.h> #include <sys/time.h> @@ -48,7 +48,7 @@ typedef HashMap<ThreadIdentifier, pthread_t> ThreadMap; static Mutex* atomicallyInitializedStaticMutex; -#if !PLATFORM(DARWIN) +#if !PLATFORM(DARWIN) || PLATFORM(CHROMIUM) static ThreadIdentifier mainThreadIdentifier; // The thread that was the first to call initializeThreading(), which must be the main thread. #endif @@ -64,7 +64,7 @@ void initializeThreading() atomicallyInitializedStaticMutex = new Mutex; threadMapMutex(); initializeRandomNumberGenerator(); -#if !PLATFORM(DARWIN) +#if !PLATFORM(DARWIN) || PLATFORM(CHROMIUM) mainThreadIdentifier = currentThread(); #endif initializeMainThread(); @@ -133,7 +133,7 @@ static void clearPthreadHandleForIdentifier(ThreadIdentifier id) ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char*) { pthread_t threadHandle; - if (pthread_create(&threadHandle, NULL, entryPoint, data)) { + if (pthread_create(&threadHandle, 0, entryPoint, data)) { LOG_ERROR("Failed to create pthread at entry point %p with data %p", entryPoint, data); return 0; } @@ -141,6 +141,15 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con return establishIdentifierForPthreadHandle(threadHandle); } +void setThreadNameInternal(const char* threadName) +{ +#if PLATFORM(DARWIN) && !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) + pthread_setname_np(threadName); +#else + UNUSED_PARAM(threadName); +#endif +} + int waitForThreadCompletion(ThreadIdentifier threadID, void** result) { ASSERT(threadID); @@ -176,7 +185,7 @@ ThreadIdentifier currentThread() bool isMainThread() { -#if PLATFORM(DARWIN) +#if PLATFORM(DARWIN) && !PLATFORM(CHROMIUM) return pthread_main_np(); #else return currentThread() == mainThreadIdentifier; @@ -195,8 +204,8 @@ Mutex::~Mutex() void Mutex::lock() { - if (pthread_mutex_lock(&m_mutex) != 0) - ASSERT(false); + int result = pthread_mutex_lock(&m_mutex); + ASSERT_UNUSED(result, !result); } bool Mutex::tryLock() @@ -205,17 +214,17 @@ bool Mutex::tryLock() if (result == 0) return true; - else if (result == EBUSY) + if (result == EBUSY) return false; - ASSERT(false); + ASSERT_NOT_REACHED(); return false; } void Mutex::unlock() { - if (pthread_mutex_unlock(&m_mutex) != 0) - ASSERT(false); + int result = pthread_mutex_unlock(&m_mutex); + ASSERT_UNUSED(result, !result); } ThreadCondition::ThreadCondition() @@ -230,8 +239,8 @@ ThreadCondition::~ThreadCondition() void ThreadCondition::wait(Mutex& mutex) { - if (pthread_cond_wait(&m_condition, &mutex.impl()) != 0) - ASSERT(false); + int result = pthread_cond_wait(&m_condition, &mutex.impl()); + ASSERT_UNUSED(result, !result); } bool ThreadCondition::timedWait(Mutex& mutex, double absoluteTime) @@ -256,14 +265,14 @@ bool ThreadCondition::timedWait(Mutex& mutex, double absoluteTime) void ThreadCondition::signal() { - if (pthread_cond_signal(&m_condition) != 0) - ASSERT(false); + int result = pthread_cond_signal(&m_condition); + ASSERT_UNUSED(result, !result); } void ThreadCondition::broadcast() { - if (pthread_cond_broadcast(&m_condition) != 0) - ASSERT(false); + int result = pthread_cond_broadcast(&m_condition); + ASSERT_UNUSED(result, !result); } } // namespace WTF diff --git a/JavaScriptCore/wtf/ThreadingQt.cpp b/JavaScriptCore/wtf/ThreadingQt.cpp index 55a479b..1fdd2bb 100644 --- a/JavaScriptCore/wtf/ThreadingQt.cpp +++ b/JavaScriptCore/wtf/ThreadingQt.cpp @@ -162,6 +162,10 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con return establishIdentifierForThread(threadRef); } +void setThreadNameInternal(const char*) +{ +} + int waitForThreadCompletion(ThreadIdentifier threadID, void** result) { ASSERT(threadID); @@ -191,7 +195,7 @@ ThreadIdentifier currentThread() bool isMainThread() { - return currentThread() == mainThreadIdentifier; + return QThread::currentThread() == QCoreApplication::instance()->thread(); } Mutex::Mutex() @@ -242,11 +246,13 @@ bool ThreadCondition::timedWait(Mutex& mutex, double absoluteTime) if (absoluteTime < currentTime) return false; - double intervalMilliseconds = (absoluteTime - currentTime) * 1000.0; - // Qt defines wait for up to ULONG_MAX milliseconds. - if (intervalMilliseconds >= ULONG_MAX) - intervalMilliseconds = ULONG_MAX; + // Time is too far in the future (and would overflow unsigned long) - wait forever. + if (absoluteTime - currentTime > static_cast<double>(INT_MAX) / 1000.0) { + wait(mutex); + return true; + } + double intervalMilliseconds = (absoluteTime - currentTime) * 1000.0; return m_condition->wait(mutex.impl(), static_cast<unsigned long>(intervalMilliseconds)); } diff --git a/JavaScriptCore/wtf/ThreadingWin.cpp b/JavaScriptCore/wtf/ThreadingWin.cpp index 399fb38..415ba53 100644 --- a/JavaScriptCore/wtf/ThreadingWin.cpp +++ b/JavaScriptCore/wtf/ThreadingWin.cpp @@ -98,7 +98,7 @@ namespace WTF { -// MS_VC_EXCEPTION, THREADNAME_INFO, and setThreadName all come from <http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx>. +// MS_VC_EXCEPTION, THREADNAME_INFO, and setThreadNameInternal all come from <http://msdn.microsoft.com/en-us/library/xcb2z8hs.aspx>. static const DWORD MS_VC_EXCEPTION = 0x406D1388; #pragma pack(push, 8) @@ -110,16 +110,12 @@ typedef struct tagTHREADNAME_INFO { } THREADNAME_INFO; #pragma pack(pop) -static void setThreadName(DWORD dwThreadID, LPCSTR szThreadName) +void setThreadNameInternal(const char* szThreadName) { - // Visual Studio has a 31-character limit on thread names. Longer names will - // be truncated silently, but we'd like callers to know about the limit. - ASSERT_ARG(szThreadName, strlen(szThreadName) <= 31); - THREADNAME_INFO info; info.dwType = 0x1000; info.szName = szThreadName; - info.dwThreadID = dwThreadID; + info.dwThreadID = GetCurrentThreadId(); info.dwFlags = 0; __try { @@ -157,7 +153,7 @@ void initializeThreading() initializeRandomNumberGenerator(); initializeMainThread(); mainThreadIdentifier = currentThread(); - setThreadName(mainThreadIdentifier, "Main Thread"); + setThreadNameInternal("Main Thread"); } } @@ -220,9 +216,6 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con return 0; } - if (threadName) - setThreadName(threadIdentifier, threadName); - threadID = static_cast<ThreadIdentifier>(threadIdentifier); storeThreadHandleByIdentifier(threadIdentifier, threadHandle); @@ -457,10 +450,13 @@ bool ThreadCondition::timedWait(Mutex& mutex, double absoluteTime) if (absoluteTime < currentTime) return false; - double intervalMilliseconds = (absoluteTime - currentTime) * 1000.0; - if (intervalMilliseconds >= INT_MAX) - intervalMilliseconds = INT_MAX; + // Time is too far in the future (and would overflow unsigned long) - wait forever. + if (absoluteTime - currentTime > static_cast<double>(INT_MAX) / 1000.0) { + wait(mutex); + return true; + } + double intervalMilliseconds = (absoluteTime - currentTime) * 1000.0; return m_condition.timedWait(mutex.impl(), static_cast<unsigned long>(intervalMilliseconds)); } diff --git a/JavaScriptCore/wtf/TypeTraits.cpp b/JavaScriptCore/wtf/TypeTraits.cpp new file mode 100644 index 0000000..36fc6c6 --- /dev/null +++ b/JavaScriptCore/wtf/TypeTraits.cpp @@ -0,0 +1,120 @@ + /* + * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2009 Google Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#include "config.h" +#include "TypeTraits.h" + +#include "Assertions.h" + +namespace WTF { + +COMPILE_ASSERT(IsInteger<bool>::value, WTF_IsInteger_bool_true); +COMPILE_ASSERT(IsInteger<char>::value, WTF_IsInteger_char_true); +COMPILE_ASSERT(IsInteger<signed char>::value, WTF_IsInteger_signed_char_true); +COMPILE_ASSERT(IsInteger<unsigned char>::value, WTF_IsInteger_unsigned_char_true); +COMPILE_ASSERT(IsInteger<short>::value, WTF_IsInteger_short_true); +COMPILE_ASSERT(IsInteger<unsigned short>::value, WTF_IsInteger_unsigned_short_true); +COMPILE_ASSERT(IsInteger<int>::value, WTF_IsInteger_int_true); +COMPILE_ASSERT(IsInteger<unsigned int>::value, WTF_IsInteger_unsigned_int_true); +COMPILE_ASSERT(IsInteger<long>::value, WTF_IsInteger_long_true); +COMPILE_ASSERT(IsInteger<unsigned long>::value, WTF_IsInteger_unsigned_long_true); +COMPILE_ASSERT(IsInteger<long long>::value, WTF_IsInteger_long_long_true); +COMPILE_ASSERT(IsInteger<unsigned long long>::value, WTF_IsInteger_unsigned_long_long_true); +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) +COMPILE_ASSERT(IsInteger<wchar_t>::value, WTF_IsInteger_wchar_t_true); +#endif +COMPILE_ASSERT(!IsInteger<char*>::value, WTF_IsInteger_char_pointer_false); +COMPILE_ASSERT(!IsInteger<const char*>::value, WTF_IsInteger_const_char_pointer_false); +COMPILE_ASSERT(!IsInteger<volatile char*>::value, WTF_IsInteger_volatile_char_pointer_false); +COMPILE_ASSERT(!IsInteger<double>::value, WTF_IsInteger_double_false); +COMPILE_ASSERT(!IsInteger<float>::value, WTF_IsInteger_float_false); + +COMPILE_ASSERT(IsPod<bool>::value, WTF_IsPod_bool_true); +COMPILE_ASSERT(IsPod<char>::value, WTF_IsPod_char_true); +COMPILE_ASSERT(IsPod<signed char>::value, WTF_IsPod_signed_char_true); +COMPILE_ASSERT(IsPod<unsigned char>::value, WTF_IsPod_unsigned_char_true); +COMPILE_ASSERT(IsPod<short>::value, WTF_IsPod_short_true); +COMPILE_ASSERT(IsPod<unsigned short>::value, WTF_IsPod_unsigned_short_true); +COMPILE_ASSERT(IsPod<int>::value, WTF_IsPod_int_true); +COMPILE_ASSERT(IsPod<unsigned int>::value, WTF_IsPod_unsigned_int_true); +COMPILE_ASSERT(IsPod<long>::value, WTF_IsPod_long_true); +COMPILE_ASSERT(IsPod<unsigned long>::value, WTF_IsPod_unsigned_long_true); +COMPILE_ASSERT(IsPod<long long>::value, WTF_IsPod_long_long_true); +COMPILE_ASSERT(IsPod<unsigned long long>::value, WTF_IsPod_unsigned_long_long_true); +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) +COMPILE_ASSERT(IsPod<wchar_t>::value, WTF_IsPod_wchar_t_true); +#endif +COMPILE_ASSERT(IsPod<char*>::value, WTF_IsPod_char_pointer_true); +COMPILE_ASSERT(IsPod<const char*>::value, WTF_IsPod_const_char_pointer_true); +COMPILE_ASSERT(IsPod<volatile char*>::value, WTF_IsPod_volatile_char_pointer_true); +COMPILE_ASSERT(IsPod<double>::value, WTF_IsPod_double_true); +COMPILE_ASSERT(IsPod<long double>::value, WTF_IsPod_long_double_true); +COMPILE_ASSERT(IsPod<float>::value, WTF_IsPod_float_true); +COMPILE_ASSERT(!IsPod<IsPod<bool> >::value, WTF_IsPod_struct_false); + +enum IsConvertibleToIntegerCheck { }; +COMPILE_ASSERT(IsConvertibleToInteger<IsConvertibleToIntegerCheck>::value, WTF_IsConvertibleToInteger_enum_true); +COMPILE_ASSERT(IsConvertibleToInteger<bool>::value, WTF_IsConvertibleToInteger_bool_true); +COMPILE_ASSERT(IsConvertibleToInteger<char>::value, WTF_IsConvertibleToInteger_char_true); +COMPILE_ASSERT(IsConvertibleToInteger<signed char>::value, WTF_IsConvertibleToInteger_signed_char_true); +COMPILE_ASSERT(IsConvertibleToInteger<unsigned char>::value, WTF_IsConvertibleToInteger_unsigned_char_true); +COMPILE_ASSERT(IsConvertibleToInteger<short>::value, WTF_IsConvertibleToInteger_short_true); +COMPILE_ASSERT(IsConvertibleToInteger<unsigned short>::value, WTF_IsConvertibleToInteger_unsigned_short_true); +COMPILE_ASSERT(IsConvertibleToInteger<int>::value, WTF_IsConvertibleToInteger_int_true); +COMPILE_ASSERT(IsConvertibleToInteger<unsigned int>::value, WTF_IsConvertibleToInteger_unsigned_int_true); +COMPILE_ASSERT(IsConvertibleToInteger<long>::value, WTF_IsConvertibleToInteger_long_true); +COMPILE_ASSERT(IsConvertibleToInteger<unsigned long>::value, WTF_IsConvertibleToInteger_unsigned_long_true); +COMPILE_ASSERT(IsConvertibleToInteger<long long>::value, WTF_IsConvertibleToInteger_long_long_true); +COMPILE_ASSERT(IsConvertibleToInteger<unsigned long long>::value, WTF_IsConvertibleToInteger_unsigned_long_long_true); +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) +COMPILE_ASSERT(IsConvertibleToInteger<wchar_t>::value, WTF_IsConvertibleToInteger_wchar_t_true); +#endif +COMPILE_ASSERT(IsConvertibleToInteger<double>::value, WTF_IsConvertibleToInteger_double_true); +COMPILE_ASSERT(IsConvertibleToInteger<long double>::value, WTF_IsConvertibleToInteger_long_double_true); +COMPILE_ASSERT(IsConvertibleToInteger<float>::value, WTF_IsConvertibleToInteger_float_true); +COMPILE_ASSERT(!IsConvertibleToInteger<char*>::value, WTF_IsConvertibleToInteger_char_pointer_false); +COMPILE_ASSERT(!IsConvertibleToInteger<const char*>::value, WTF_IsConvertibleToInteger_const_char_pointer_false); +COMPILE_ASSERT(!IsConvertibleToInteger<volatile char*>::value, WTF_IsConvertibleToInteger_volatile_char_pointer_false); +COMPILE_ASSERT(!IsConvertibleToInteger<IsConvertibleToInteger<bool> >::value, WTF_IsConvertibleToInteger_struct_false); + +COMPILE_ASSERT((IsSameType<bool, bool>::value), WTF_IsSameType_bool_true); +COMPILE_ASSERT((IsSameType<int*, int*>::value), WTF_IsSameType_int_pointer_true); +COMPILE_ASSERT((!IsSameType<int, int*>::value), WTF_IsSameType_int_int_pointer_false); +COMPILE_ASSERT((!IsSameType<bool, const bool>::value), WTF_IsSameType_const_change_false); +COMPILE_ASSERT((!IsSameType<bool, volatile bool>::value), WTF_IsSameType_volatile_change_false); + +COMPILE_ASSERT((IsSameType<bool, RemoveConst<const bool>::Type>::value), WTF_test_RemoveConst_const_bool); +COMPILE_ASSERT((!IsSameType<bool, RemoveConst<volatile bool>::Type>::value), WTF_test_RemoveConst_volatile_bool); + +COMPILE_ASSERT((IsSameType<bool, RemoveVolatile<bool>::Type>::value), WTF_test_RemoveVolatile_bool); +COMPILE_ASSERT((!IsSameType<bool, RemoveVolatile<const bool>::Type>::value), WTF_test_RemoveVolatile_const_bool); +COMPILE_ASSERT((IsSameType<bool, RemoveVolatile<volatile bool>::Type>::value), WTF_test_RemoveVolatile_volatile_bool); + +COMPILE_ASSERT((IsSameType<bool, RemoveConstVolatile<bool>::Type>::value), WTF_test_RemoveConstVolatile_bool); +COMPILE_ASSERT((IsSameType<bool, RemoveConstVolatile<const bool>::Type>::value), WTF_test_RemoveConstVolatile_const_bool); +COMPILE_ASSERT((IsSameType<bool, RemoveConstVolatile<volatile bool>::Type>::value), WTF_test_RemoveConstVolatile_volatile_bool); +COMPILE_ASSERT((IsSameType<bool, RemoveConstVolatile<const volatile bool>::Type>::value), WTF_test_RemoveConstVolatile_const_volatile_bool); + +COMPILE_ASSERT((IsSameType<int, RemovePointer<int>::Type>::value), WTF_Test_RemovePointer_int); +COMPILE_ASSERT((IsSameType<int, RemovePointer<int*>::Type>::value), WTF_Test_RemovePointer_int_pointer); +COMPILE_ASSERT((!IsSameType<int, RemovePointer<int**>::Type>::value), WTF_Test_RemovePointer_int_pointer_pointer); + +} // namespace WTF diff --git a/JavaScriptCore/wtf/TypeTraits.h b/JavaScriptCore/wtf/TypeTraits.h new file mode 100644 index 0000000..2aeabcf --- /dev/null +++ b/JavaScriptCore/wtf/TypeTraits.h @@ -0,0 +1,133 @@ + /* + * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2009 Google Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef TypeTraits_h +#define TypeTraits_h + +#include "Platform.h" + +namespace WTF { + + // The following are provided in this file: + // + // IsInteger<T>::value + // IsPod<T>::value, see the definition for a note about its limitations + // IsConvertibleToInteger<T>::value + // + // IsSameType<T, U>::value + // + // RemovePointer<T>::Type + // RemoveConst<T>::Type + // RemoveVolatile<T>::Type + // RemoveConstVolatile<T>::Type + // + // COMPILE_ASSERT's in TypeTraits.cpp illustrate their usage and what they do. + + template<typename T> struct IsInteger { static const bool value = false; }; + template<> struct IsInteger<bool> { static const bool value = true; }; + template<> struct IsInteger<char> { static const bool value = true; }; + template<> struct IsInteger<signed char> { static const bool value = true; }; + template<> struct IsInteger<unsigned char> { static const bool value = true; }; + template<> struct IsInteger<short> { static const bool value = true; }; + template<> struct IsInteger<unsigned short> { static const bool value = true; }; + template<> struct IsInteger<int> { static const bool value = true; }; + template<> struct IsInteger<unsigned int> { static const bool value = true; }; + template<> struct IsInteger<long> { static const bool value = true; }; + template<> struct IsInteger<unsigned long> { static const bool value = true; }; + template<> struct IsInteger<long long> { static const bool value = true; }; + template<> struct IsInteger<unsigned long long> { static const bool value = true; }; +#if !COMPILER(MSVC) || defined(_NATIVE_WCHAR_T_DEFINED) + template<> struct IsInteger<wchar_t> { static const bool value = true; }; +#endif + + // IsPod is misnamed as it doesn't cover all plain old data (pod) types. + // Specifically, it doesn't allow for enums or for structs. + template <typename T> struct IsPod { static const bool value = IsInteger<T>::value; }; + template <> struct IsPod<float> { static const bool value = true; }; + template <> struct IsPod<double> { static const bool value = true; }; + template <> struct IsPod<long double> { static const bool value = true; }; + template <typename P> struct IsPod<P*> { static const bool value = true; }; + + template<typename T> class IsConvertibleToInteger { + // Avoid "possible loss of data" warning when using Microsoft's C++ compiler + // by not converting int's to doubles. + template<bool performCheck, typename U> class IsConvertibleToDouble; + template<typename U> class IsConvertibleToDouble<false, U> { + public: + static const bool value = false; + }; + + template<typename U> class IsConvertibleToDouble<true, U> { + typedef char YesType; + struct NoType { + char padding[8]; + }; + + static YesType floatCheck(long double); + static NoType floatCheck(...); + static T& t; + public: + static const bool value = sizeof(floatCheck(t)) == sizeof(YesType); + }; + + public: + static const bool value = IsInteger<T>::value || IsConvertibleToDouble<!IsInteger<T>::value, T>::value; + }; + + template <typename T, typename U> struct IsSameType { + static const bool value = false; + }; + + template <typename T> struct IsSameType<T, T> { + static const bool value = true; + }; + + template <typename T> struct RemoveConst { + typedef T Type; + }; + + template <typename T> struct RemoveConst<const T> { + typedef T Type; + }; + + template <typename T> struct RemoveVolatile { + typedef T Type; + }; + + template <typename T> struct RemoveVolatile<volatile T> { + typedef T Type; + }; + + template <typename T> struct RemoveConstVolatile { + typedef typename RemoveVolatile<typename RemoveConst<T>::Type>::Type Type; + }; + + template <typename T> struct RemovePointer { + typedef T Type; + }; + + template <typename T> struct RemovePointer<T*> { + typedef T Type; + }; + +} // namespace WTF + +#endif // TypeTraits_h diff --git a/JavaScriptCore/wtf/Vector.h b/JavaScriptCore/wtf/Vector.h index 880b45d..190226d 100644 --- a/JavaScriptCore/wtf/Vector.h +++ b/JavaScriptCore/wtf/Vector.h @@ -377,7 +377,8 @@ namespace WTF { VectorBuffer(size_t capacity) : Base(inlineBuffer(), inlineCapacity) { - allocateBuffer(capacity); + if (capacity > inlineCapacity) + Base::allocateBuffer(capacity); } ~VectorBuffer() @@ -389,6 +390,10 @@ namespace WTF { { if (newCapacity > inlineCapacity) Base::allocateBuffer(newCapacity); + else { + m_buffer = inlineBuffer(); + m_capacity = inlineCapacity; + } } void deallocateBuffer(T* bufferToDeallocate) @@ -503,6 +508,7 @@ namespace WTF { void grow(size_t size); void resize(size_t size); void reserveCapacity(size_t newCapacity); + void reserveInitialCapacity(size_t initialCapacity); void shrinkCapacity(size_t newCapacity); void shrinkToFit() { shrinkCapacity(size()); } @@ -733,6 +739,15 @@ namespace WTF { } template<typename T, size_t inlineCapacity> + inline void Vector<T, inlineCapacity>::reserveInitialCapacity(size_t initialCapacity) + { + ASSERT(!m_size); + ASSERT(capacity() == inlineCapacity); + if (initialCapacity > inlineCapacity) + m_buffer.allocateBuffer(initialCapacity); + } + + template<typename T, size_t inlineCapacity> void Vector<T, inlineCapacity>::shrinkCapacity(size_t newCapacity) { if (newCapacity >= capacity()) diff --git a/JavaScriptCore/wtf/VectorTraits.h b/JavaScriptCore/wtf/VectorTraits.h index 6efe36c..7974b9a 100644 --- a/JavaScriptCore/wtf/VectorTraits.h +++ b/JavaScriptCore/wtf/VectorTraits.h @@ -22,6 +22,7 @@ #define WTF_VectorTraits_h #include "RefPtr.h" +#include "TypeTraits.h" #include <utility> #include <memory> @@ -29,24 +30,6 @@ using std::pair; namespace WTF { - template <typename T> struct IsPod { static const bool value = false; }; - template <> struct IsPod<bool> { static const bool value = true; }; - template <> struct IsPod<char> { static const bool value = true; }; - template <> struct IsPod<signed char> { static const bool value = true; }; - template <> struct IsPod<unsigned char> { static const bool value = true; }; - template <> struct IsPod<short> { static const bool value = true; }; - template <> struct IsPod<unsigned short> { static const bool value = true; }; - template <> struct IsPod<int> { static const bool value = true; }; - template <> struct IsPod<unsigned int> { static const bool value = true; }; - template <> struct IsPod<long> { static const bool value = true; }; - template <> struct IsPod<unsigned long> { static const bool value = true; }; - template <> struct IsPod<long long> { static const bool value = true; }; - template <> struct IsPod<unsigned long long> { static const bool value = true; }; - template <> struct IsPod<float> { static const bool value = true; }; - template <> struct IsPod<double> { static const bool value = true; }; - template <> struct IsPod<long double> { static const bool value = true; }; - template <typename P> struct IsPod<P *> { static const bool value = true; }; - template<bool isPod, typename T> class VectorTraitsBase; diff --git a/JavaScriptCore/wtf/android/MainThreadAndroid.cpp b/JavaScriptCore/wtf/android/MainThreadAndroid.cpp index d00c0ab..ab0d3bf 100644 --- a/JavaScriptCore/wtf/android/MainThreadAndroid.cpp +++ b/JavaScriptCore/wtf/android/MainThreadAndroid.cpp @@ -38,6 +38,10 @@ static void timeoutFired(void* ) dispatchFunctionsFromMainThread(); } +void initializeMainThreadPlatform() +{ +} + void scheduleDispatchFunctionsOnMainThread() { JavaSharedClient::EnqueueFunctionPtr(timeoutFired, 0); diff --git a/JavaScriptCore/wtf/chromium/ChromiumThreading.h b/JavaScriptCore/wtf/chromium/ChromiumThreading.h new file mode 100644 index 0000000..e9b1f39 --- /dev/null +++ b/JavaScriptCore/wtf/chromium/ChromiumThreading.h @@ -0,0 +1,45 @@ +/* +* Copyright (C) 2009 Google Inc. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* * Neither the name of Google Inc. nor the names of its +* contributors may be used to endorse or promote products derived from +* this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef ChromiumThreading_h +#define ChromiumThreading_h + +namespace WTF { + + // An interface to the embedding layer, which provides threading support. + class ChromiumThreading { + public: + static void initializeMainThread(); + static void scheduleDispatchFunctionsOnMainThread(); + }; + +} // namespace WTF + +#endif // ChromiumThreading_h diff --git a/JavaScriptCore/wtf/chromium/MainThreadChromium.cpp b/JavaScriptCore/wtf/chromium/MainThreadChromium.cpp new file mode 100644 index 0000000..394370f --- /dev/null +++ b/JavaScriptCore/wtf/chromium/MainThreadChromium.cpp @@ -0,0 +1,49 @@ +/* +* Copyright (C) 2009 Google Inc. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* * Neither the name of Google Inc. nor the names of its +* contributors may be used to endorse or promote products derived from +* this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include "config.h" +#include "MainThread.h" + +#include "ChromiumThreading.h" + +namespace WTF { + +void initializeMainThreadPlatform() +{ + ChromiumThreading::initializeMainThread(); +} + +void scheduleDispatchFunctionsOnMainThread() +{ + ChromiumThreading::scheduleDispatchFunctionsOnMainThread(); +} + +} // namespace WTF + diff --git a/JavaScriptCore/wtf/dtoa.cpp b/JavaScriptCore/wtf/dtoa.cpp index c9e8d30..c104dad 100644 --- a/JavaScriptCore/wtf/dtoa.cpp +++ b/JavaScriptCore/wtf/dtoa.cpp @@ -824,16 +824,16 @@ static double b2d(Bigint* a, int* e) *e = 32 - k; #ifdef Pack_32 if (k < Ebits) { - d0 = Exp_1 | y >> Ebits - k; + d0 = Exp_1 | (y >> (Ebits - k)); w = xa > xa0 ? *--xa : 0; - d1 = y << (32 - Ebits) + k | w >> Ebits - k; + d1 = (y << (32 - Ebits + k)) | (w >> (Ebits - k)); goto ret_d; } z = xa > xa0 ? *--xa : 0; if (k -= Ebits) { - d0 = Exp_1 | y << k | z >> 32 - k; + d0 = Exp_1 | (y << k) | (z >> (32 - k)); y = xa > xa0 ? *--xa : 0; - d1 = z << k | y >> 32 - k; + d1 = (z << k) | (y >> (32 - k)); } else { d0 = Exp_1 | y; d1 = z; @@ -889,7 +889,7 @@ static Bigint* d2b(double d, int* e, int* bits) #ifdef Pack_32 if ((y = d1)) { if ((k = lo0bits(&y))) { - x[0] = y | z << 32 - k; + x[0] = y | (z << (32 - k)); z >>= k; } else x[0] = y; @@ -1349,7 +1349,7 @@ ovfl: if (j >= 53) word0(rv) = (P + 2) * Exp_msk1; else - word0(rv) &= 0xffffffff << j - 32; + word0(rv) &= 0xffffffff << (j - 32); } else word1(rv) &= 0xffffffff << j; } @@ -2011,8 +2011,8 @@ char* dtoa(double d, int ndigits, int* decpt, int* sign, char** rve) /* d is denormalized */ i = bbits + be + (Bias + (P - 1) - 1); - x = i > 32 ? word0(d) << 64 - i | word1(d) >> i - 32 - : word1(d) << 32 - i; + x = (i > 32) ? (word0(d) << (64 - i)) | (word1(d) >> (i - 32)) + : word1(d) << (32 - i); dval(d2) = x; word0(d2) -= 31 * Exp_msk1; /* adjust exponent */ i -= (Bias + (P - 1) - 1) + 1; @@ -2193,7 +2193,7 @@ fast_failed: } if (i == ilim) { dval(d) += dval(d); - if (dval(d) > ds || dval(d) == ds && L & 1) { + if (dval(d) > ds || (dval(d) == ds && (L & 1))) { bump_up: while (*--s == '9') if (s == s0) { @@ -2334,7 +2334,7 @@ bump_up: *s++ = dig; goto ret; } - if (j < 0 || j == 0 && !(word1(d) & 1)) { + if (j < 0 || (j == 0 && !(word1(d) & 1))) { if (!b->x[0] && b->wds <= 1) { #ifdef SET_INEXACT inexact = 0; @@ -2344,7 +2344,7 @@ bump_up: if (j1 > 0) { b = lshift(b, 1); j1 = cmp(b, S); - if ((j1 > 0 || j1 == 0 && dig & 1) && dig++ == '9') + if ((j1 > 0 || (j1 == 0 && (dig & 1))) && dig++ == '9') goto round_9_up; } accept_dig: @@ -2389,7 +2389,7 @@ round_9_up: b = lshift(b, 1); j = cmp(b, S); - if (j > 0 || j == 0 && dig & 1) { + if (j > 0 || (j == 0 && (dig & 1))) { roundoff: while (*--s == '9') if (s == s0) { diff --git a/JavaScriptCore/wtf/gtk/MainThreadGtk.cpp b/JavaScriptCore/wtf/gtk/MainThreadGtk.cpp index a6e061f..7624247 100644 --- a/JavaScriptCore/wtf/gtk/MainThreadGtk.cpp +++ b/JavaScriptCore/wtf/gtk/MainThreadGtk.cpp @@ -34,6 +34,10 @@ namespace WTF { +void initializeMainThreadPlatform() +{ +} + static gboolean timeoutFired(gpointer) { dispatchFunctionsFromMainThread(); @@ -45,5 +49,4 @@ void scheduleDispatchFunctionsOnMainThread() g_timeout_add(0, timeoutFired, 0); } - -} +} // namespace WTF diff --git a/JavaScriptCore/wtf/mac/MainThreadMac.mm b/JavaScriptCore/wtf/mac/MainThreadMac.mm index b04ef0e..c79acc1 100644 --- a/JavaScriptCore/wtf/mac/MainThreadMac.mm +++ b/JavaScriptCore/wtf/mac/MainThreadMac.mm @@ -30,6 +30,7 @@ #import "MainThread.h" #import <Foundation/NSThread.h> +#import <wtf/Assertions.h> @interface WTFMainThreadCaller : NSObject { } @@ -47,11 +48,18 @@ namespace WTF { +static WTFMainThreadCaller* staticMainThreadCaller = nil; + +void initializeMainThreadPlatform() +{ + ASSERT(!staticMainThreadCaller); + staticMainThreadCaller = [[WTFMainThreadCaller alloc] init]; +} + void scheduleDispatchFunctionsOnMainThread() { - WTFMainThreadCaller *caller = [[WTFMainThreadCaller alloc] init]; - [caller performSelectorOnMainThread:@selector(call) withObject:nil waitUntilDone:NO]; - [caller release]; + ASSERT(staticMainThreadCaller); + [staticMainThreadCaller performSelectorOnMainThread:@selector(call) withObject:nil waitUntilDone:NO]; } } // namespace WTF diff --git a/JavaScriptCore/wtf/qt/MainThreadQt.cpp b/JavaScriptCore/wtf/qt/MainThreadQt.cpp index 1914600..7b2d0f2 100644 --- a/JavaScriptCore/wtf/qt/MainThreadQt.cpp +++ b/JavaScriptCore/wtf/qt/MainThreadQt.cpp @@ -58,12 +58,15 @@ void MainThreadInvoker::dispatch() Q_GLOBAL_STATIC(MainThreadInvoker, webkit_main_thread_invoker) +void initializeMainThreadPlatform() +{ +} void scheduleDispatchFunctionsOnMainThread() { QMetaObject::invokeMethod(webkit_main_thread_invoker(), "dispatch", Qt::QueuedConnection); } -} +} // namespace WTF #include "MainThreadQt.moc" diff --git a/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h b/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h index 608aea6..de5e082 100644 --- a/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h +++ b/JavaScriptCore/wtf/unicode/icu/UnicodeIcu.h @@ -179,6 +179,11 @@ inline bool isPunct(UChar32 c) return !!u_ispunct(c); } +inline bool hasLineBreakingPropertyComplexContext(UChar32 c) +{ + return u_getIntPropertyValue(c, UCHAR_LINE_BREAK) == U_LB_COMPLEX_CONTEXT; +} + inline UChar32 mirroredChar(UChar32 c) { return u_charMirror(c); diff --git a/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h b/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h index d7d78ce..f65e292 100644 --- a/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h +++ b/JavaScriptCore/wtf/unicode/qt4/UnicodeQt4.h @@ -352,6 +352,12 @@ inline bool isLower(UChar32 c) return QChar::category(c) == QChar::Letter_Lowercase; } +inline bool hasLineBreakingPropertyComplexContext(UChar32) +{ + // FIXME: Implement this to return whether the character has line breaking property SA (Complex Context). + return false; +} + inline UChar32 mirroredChar(UChar32 c) { return QChar::mirroredChar(c); diff --git a/JavaScriptCore/wtf/win/MainThreadWin.cpp b/JavaScriptCore/wtf/win/MainThreadWin.cpp index 5f0163c..b828b7d 100644 --- a/JavaScriptCore/wtf/win/MainThreadWin.cpp +++ b/JavaScriptCore/wtf/win/MainThreadWin.cpp @@ -50,13 +50,11 @@ LRESULT CALLBACK ThreadingWindowWndProc(HWND hWnd, UINT message, WPARAM wParam, return 0; } -void initializeMainThread() +void initializeMainThreadPlatform() { if (threadingWindowHandle) return; - mainThreadFunctionQueueMutex(); - WNDCLASSEX wcex; memset(&wcex, 0, sizeof(WNDCLASSEX)); wcex.cbSize = sizeof(WNDCLASSEX); @@ -75,4 +73,4 @@ void scheduleDispatchFunctionsOnMainThread() PostMessage(threadingWindowHandle, threadingFiredMessage, 0, 0); } -} // namespace WebCore +} // namespace WTF diff --git a/JavaScriptCore/wtf/wx/MainThreadWx.cpp b/JavaScriptCore/wtf/wx/MainThreadWx.cpp index 3166331..bcd5f05 100644 --- a/JavaScriptCore/wtf/wx/MainThreadWx.cpp +++ b/JavaScriptCore/wtf/wx/MainThreadWx.cpp @@ -31,8 +31,12 @@ namespace WTF { -void scheduleDispatchFunctionsOnMainThread() +void initializeMainThreadPlatform() { } +void scheduleDispatchFunctionsOnMainThread() +{ } + +} // namespace WTF |