summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2010-04-27 16:23:55 +0100
committerSteve Block <steveblock@google.com>2010-04-27 17:07:03 +0100
commit692e5dbf12901edacf14812a6fae25462920af42 (patch)
treed62802373a429e0a9dc093b6046c166b2c514285 /JavaScriptCore
parente24bea4efef1c414137d36a9778aa4e142e10c7d (diff)
downloadexternal_webkit-692e5dbf12901edacf14812a6fae25462920af42.zip
external_webkit-692e5dbf12901edacf14812a6fae25462920af42.tar.gz
external_webkit-692e5dbf12901edacf14812a6fae25462920af42.tar.bz2
Merge webkit.org at r55033 : Initial merge by git
Change-Id: I98a4af828067cc243ec3dc5e5826154dd88074b5
Diffstat (limited to 'JavaScriptCore')
-rw-r--r--JavaScriptCore/API/APIShims.h2
-rw-r--r--JavaScriptCore/API/JSClassRef.cpp12
-rw-r--r--JavaScriptCore/API/JSContextRef.cpp8
-rw-r--r--JavaScriptCore/ChangeLog622
-rw-r--r--JavaScriptCore/DerivedSources.pro4
-rw-r--r--JavaScriptCore/GNUmakefile.am1
-rw-r--r--JavaScriptCore/JavaScriptCore.exp5
-rw-r--r--JavaScriptCore/JavaScriptCore.gypi1
-rw-r--r--JavaScriptCore/JavaScriptCore.pri1
-rw-r--r--JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def6
-rw-r--r--JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj4
-rw-r--r--JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj6
-rw-r--r--JavaScriptCore/bytecode/CodeBlock.cpp40
-rw-r--r--JavaScriptCore/bytecode/CodeBlock.h25
-rw-r--r--JavaScriptCore/bytecode/EvalCodeCache.h2
-rw-r--r--JavaScriptCore/bytecode/Opcode.h5
-rw-r--r--JavaScriptCore/bytecompiler/BytecodeGenerator.cpp2
-rw-r--r--JavaScriptCore/bytecompiler/NodesCodegen.cpp4
-rw-r--r--JavaScriptCore/create_jit_stubs (renamed from JavaScriptCore/create_rvct_stubs)31
-rw-r--r--JavaScriptCore/interpreter/Interpreter.cpp168
-rw-r--r--JavaScriptCore/jit/JIT.cpp7
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp4
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp980
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess32_64.cpp1026
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp21
-rw-r--r--JavaScriptCore/qt/api/qscriptvalue_p.h27
-rw-r--r--JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue.h28
-rw-r--r--JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue_generated.cpp508
-rw-r--r--JavaScriptCore/runtime/ArrayPrototype.cpp2
-rw-r--r--JavaScriptCore/runtime/Identifier.cpp12
-rw-r--r--JavaScriptCore/runtime/Identifier.h2
-rw-r--r--JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp2
-rw-r--r--JavaScriptCore/runtime/JSONObject.cpp4
-rw-r--r--JavaScriptCore/runtime/JSObject.cpp9
-rw-r--r--JavaScriptCore/runtime/JSString.cpp83
-rw-r--r--JavaScriptCore/runtime/JSString.h241
-rw-r--r--JavaScriptCore/runtime/NumberPrototype.cpp6
-rw-r--r--JavaScriptCore/runtime/Operations.h149
-rw-r--r--JavaScriptCore/runtime/PropertySlot.cpp4
-rw-r--r--JavaScriptCore/runtime/PropertySlot.h32
-rw-r--r--JavaScriptCore/runtime/RegExp.cpp10
-rw-r--r--JavaScriptCore/runtime/SmallStrings.cpp2
-rw-r--r--JavaScriptCore/runtime/StringPrototype.cpp42
-rw-r--r--JavaScriptCore/runtime/Structure.cpp114
-rw-r--r--JavaScriptCore/runtime/Structure.h74
-rw-r--r--JavaScriptCore/runtime/StructureTransitionTable.h94
-rw-r--r--JavaScriptCore/runtime/UString.cpp114
-rw-r--r--JavaScriptCore/runtime/UString.h99
-rw-r--r--JavaScriptCore/runtime/UStringImpl.cpp51
-rw-r--r--JavaScriptCore/runtime/UStringImpl.h268
-rw-r--r--JavaScriptCore/wtf/AlwaysInline.h4
-rw-r--r--JavaScriptCore/wtf/FastMalloc.cpp35
-rw-r--r--JavaScriptCore/wtf/PtrAndFlags.h79
-rw-r--r--JavaScriptCore/wtf/gtk/GOwnPtr.cpp7
-rw-r--r--JavaScriptCore/wtf/gtk/GOwnPtr.h2
55 files changed, 3292 insertions, 1799 deletions
diff --git a/JavaScriptCore/API/APIShims.h b/JavaScriptCore/API/APIShims.h
index d7276ec..9a6cacb 100644
--- a/JavaScriptCore/API/APIShims.h
+++ b/JavaScriptCore/API/APIShims.h
@@ -80,12 +80,10 @@ public:
, m_globalData(&exec->globalData())
{
resetCurrentIdentifierTable();
- m_globalData->timeoutChecker.start();
}
~APICallbackShim()
{
- m_globalData->timeoutChecker.stop();
setCurrentIdentifierTable(m_globalData->identifierTable);
}
diff --git a/JavaScriptCore/API/JSClassRef.cpp b/JavaScriptCore/API/JSClassRef.cpp
index 747aa16..717488f 100644
--- a/JavaScriptCore/API/JSClassRef.cpp
+++ b/JavaScriptCore/API/JSClassRef.cpp
@@ -82,7 +82,9 @@ OpaqueJSClass::OpaqueJSClass(const JSClassDefinition* definition, OpaqueJSClass*
if (!valueName.isNull()) {
// Use a local variable here to sidestep an RVCT compiler bug.
StaticValueEntry* entry = new StaticValueEntry(staticValue->getProperty, staticValue->setProperty, staticValue->attributes);
- m_staticValues->add(valueName.rep()->ref(), entry);
+ UStringImpl* impl = valueName.rep();
+ impl->ref();
+ m_staticValues->add(impl, entry);
}
++staticValue;
}
@@ -95,7 +97,9 @@ OpaqueJSClass::OpaqueJSClass(const JSClassDefinition* definition, OpaqueJSClass*
if (!functionName.isNull()) {
// Use a local variable here to sidestep an RVCT compiler bug.
StaticFunctionEntry* entry = new StaticFunctionEntry(staticFunction->callAsFunction, staticFunction->attributes);
- m_staticFunctions->add(functionName.rep()->ref(), entry);
+ UStringImpl* impl = functionName.rep();
+ impl->ref();
+ m_staticFunctions->add(impl, entry);
}
++staticFunction;
}
@@ -167,7 +171,7 @@ OpaqueJSClassContextData::OpaqueJSClassContextData(OpaqueJSClass* jsClass)
ASSERT(!it->first->isIdentifier());
// Use a local variable here to sidestep an RVCT compiler bug.
StaticValueEntry* entry = new StaticValueEntry(it->second->getProperty, it->second->setProperty, it->second->attributes);
- staticValues->add(UString::Rep::create(it->first->data(), it->first->size()), entry);
+ staticValues->add(UString::Rep::create(it->first->data(), it->first->length()), entry);
}
} else
staticValues = 0;
@@ -179,7 +183,7 @@ OpaqueJSClassContextData::OpaqueJSClassContextData(OpaqueJSClass* jsClass)
ASSERT(!it->first->isIdentifier());
// Use a local variable here to sidestep an RVCT compiler bug.
StaticFunctionEntry* entry = new StaticFunctionEntry(it->second->callAsFunction, it->second->attributes);
- staticFunctions->add(UString::Rep::create(it->first->data(), it->first->size()), entry);
+ staticFunctions->add(UString::Rep::create(it->first->data(), it->first->length()), entry);
}
} else
diff --git a/JavaScriptCore/API/JSContextRef.cpp b/JavaScriptCore/API/JSContextRef.cpp
index 6bdc3c8..2c76338 100644
--- a/JavaScriptCore/API/JSContextRef.cpp
+++ b/JavaScriptCore/API/JSContextRef.cpp
@@ -120,11 +120,13 @@ JSGlobalContextRef JSGlobalContextRetain(JSGlobalContextRef ctx)
void JSGlobalContextRelease(JSGlobalContextRef ctx)
{
ExecState* exec = toJS(ctx);
- APIEntryShim entryShim(exec, false);
+ JSLock lock(exec);
+
+ JSGlobalData& globalData = exec->globalData();
+ IdentifierTable* savedIdentifierTable = setCurrentIdentifierTable(globalData.identifierTable);
gcUnprotect(exec->dynamicGlobalObject());
- JSGlobalData& globalData = exec->globalData();
if (globalData.refCount() == 2) { // One reference is held by JSGlobalObject, another added by JSGlobalContextRetain().
// The last reference was released, this is our last chance to collect.
globalData.heap.destroy();
@@ -132,6 +134,8 @@ void JSGlobalContextRelease(JSGlobalContextRef ctx)
globalData.heap.collectAllGarbage();
globalData.deref();
+
+ setCurrentIdentifierTable(savedIdentifierTable);
}
JSObjectRef JSContextGetGlobalObject(JSContextRef ctx)
diff --git a/JavaScriptCore/ChangeLog b/JavaScriptCore/ChangeLog
index 4257344..3d1a925 100644
--- a/JavaScriptCore/ChangeLog
+++ b/JavaScriptCore/ChangeLog
@@ -1,3 +1,625 @@
+2010-02-19 Oliver Hunt <oliver@apple.com>
+
+ RS = Gavin Barraclough.
+
+ Split the 32/64 version of JITPropertyAccess into a separate file.
+
+ * GNUmakefile.am:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.pri:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * jit/JITPropertyAccess.cpp:
+ * jit/JITPropertyAccess32_64.cpp: Added.
+ (JSC::JIT::emit_op_put_by_index):
+ (JSC::JIT::emit_op_put_getter):
+ (JSC::JIT::emit_op_put_setter):
+ (JSC::JIT::emit_op_del_by_id):
+ (JSC::JIT::emit_op_method_check):
+ (JSC::JIT::emitSlow_op_method_check):
+ (JSC::JIT::emit_op_get_by_val):
+ (JSC::JIT::emitSlow_op_get_by_val):
+ (JSC::JIT::emit_op_put_by_val):
+ (JSC::JIT::emitSlow_op_put_by_val):
+ (JSC::JIT::emit_op_get_by_id):
+ (JSC::JIT::emitSlow_op_get_by_id):
+ (JSC::JIT::emit_op_put_by_id):
+ (JSC::JIT::emitSlow_op_put_by_id):
+ (JSC::JIT::compileGetByIdHotPath):
+ (JSC::JIT::compileGetByIdSlowCase):
+ (JSC::JIT::compilePutDirectOffset):
+ (JSC::JIT::compileGetDirectOffset):
+ (JSC::JIT::testPrototype):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::patchGetByIdSelf):
+ (JSC::JIT::patchMethodCallProto):
+ (JSC::JIT::patchPutByIdReplace):
+ (JSC::JIT::privateCompilePatchGetArrayLength):
+ (JSC::JIT::privateCompileGetByIdProto):
+ (JSC::JIT::privateCompileGetByIdSelfList):
+ (JSC::JIT::privateCompileGetByIdProtoList):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+ (JSC::JIT::emit_op_get_by_pname):
+ (JSC::JIT::emitSlow_op_get_by_pname):
+
+2010-02-19 Patrick Gansterer <paroga@paroga.com>
+
+ Reviewed by Laszlo Gombos.
+
+ Added additional parameter to create_rvct_stubs
+ for setting the regularexpression prefix.
+ Renamed it because it now works for other platforms too.
+ https://bugs.webkit.org/show_bug.cgi?id=34951
+
+ * DerivedSources.pro:
+ * create_jit_stubs: Copied from JavaScriptCore/create_rvct_stubs.
+ * create_rvct_stubs: Removed.
+
+2010-02-18 Oliver Hunt <oliver@apple.com>
+
+ Reviewed by Gavin Barraclough.
+
+ Improve interpreter getter performance
+ https://bugs.webkit.org/show_bug.cgi?id=35138
+
+ Improve the performance of getter dispatch by making it possible
+ for the interpreter to cache the GetterSetter object lookup.
+
+ To do this we simply need to make PropertySlot aware of getters
+ as a potentially cacheable property, and record the base and this
+ objects for a getter access. This allows us to use more-or-less
+ identical code to that used by the normal get_by_id caching, with
+ the dispatch being the only actual difference.
+
+ I'm holding off of implementing this in the JIT until I do some
+ cleanup to try and making coding in the JIT not be as horrible
+ as it is currently.
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::dump):
+ (JSC::CodeBlock::derefStructures):
+ (JSC::CodeBlock::refStructures):
+ * bytecode/Opcode.h:
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::resolveGlobal):
+ (JSC::Interpreter::tryCacheGetByID):
+ (JSC::Interpreter::privateExecute):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileMainPass):
+ * jit/JITStubs.cpp:
+ (JSC::JITThunks::tryCacheGetByID):
+ (JSC::DEFINE_STUB_FUNCTION):
+ * runtime/JSObject.cpp:
+ (JSC::JSObject::fillGetterPropertySlot):
+ * runtime/PropertySlot.cpp:
+ (JSC::PropertySlot::functionGetter):
+ * runtime/PropertySlot.h:
+ (JSC::PropertySlot::isGetter):
+ (JSC::PropertySlot::isCacheable):
+ (JSC::PropertySlot::isCacheableValue):
+ (JSC::PropertySlot::setValueSlot):
+ (JSC::PropertySlot::setGetterSlot):
+ (JSC::PropertySlot::setCacheableGetterSlot):
+ (JSC::PropertySlot::clearOffset):
+ (JSC::PropertySlot::thisValue):
+
+2010-02-17 Geoffrey Garen <ggaren@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ Fixed a portion of:
+ <rdar://problem/7165917> | https://bugs.webkit.org/show_bug.cgi?id=28676
+ Safari 4 does not release memory back to the operating system fast enough (28676)
+
+ This patch fixes a surprisingly common edge case in which the page heap
+ would have only one free span, but that span would be larger than the
+ minimum free size, so we would decide not to free it, even though it
+ could be as large as 100MB or more!
+
+ SunSpider reports no change on Mac or Windows.
+
+ * wtf/FastMalloc.cpp:
+ (WTF::TCMalloc_PageHeap::scavenge): Call shouldContinueScavenging() instead
+ of doing the math ourselves. Don't keep a local value for pagesDecommitted
+ because that lets free_committed_pages_ be wrong temporarily. Instead,
+ update free_committed_pages_ as we go. ASSERT that we aren't releasing
+ a span that has already been released, because we think this is impossible.
+ Finally, don't be afraid to release all free memory in the page heap when
+ scavenging. We only scavenge after 5 seconds of the application's working
+ set not growing, and we keep both thread caches and a central cache on
+ top of the page heap, so the extra free pages in the page heap were just
+ overkill.
+
+2010-02-17 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ https://bugs.webkit.org/show_bug.cgi?id=35070
+ Addition of 2 strings of length 2^31 may result in a string of length 0.
+
+ Check for overflow when creating a new JSString as a result of an addition
+ or concatenation, throw an out of memory exception.
+
+ * runtime/JSString.h:
+ (JSC::):
+ * runtime/Operations.h:
+ (JSC::jsString):
+
+2010-02-17 Xan Lopez <xlopez@igalia.com>
+
+ Reviewed by Gustavo Noronha.
+
+ [Linux] Webkit incompatible with Java plugins
+ https://bugs.webkit.org/show_bug.cgi?id=24912
+
+ Add support for GFile to GOwnPtr.
+
+ Based on original work by Gustavo Noronha.
+
+ * wtf/gtk/GOwnPtr.cpp:
+ (WTF::GFile):
+ * wtf/gtk/GOwnPtr.h:
+
+2010-02-16 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Mark Rowe.
+
+ Fix a handful of other leaks seen on the buildbot.
+
+ * runtime/UStringImpl.h:
+ (JSC::UStringOrRopeImpl::deref): Delegate through to the subclass version of deref to ensure that
+ the correct cleanup takes place. This function previously featured some code that attempted to
+ skip deletion of static UStringImpl's. Closer inspection revealed that it was in fact equivalent
+ to "if (false)", meaning that UStringImpl's which had their final deref performed via this function
+ were leaked.
+
+2010-02-16 Mark Rowe <mrowe@apple.com>
+
+ Reviewed by Gavin Barraclough.
+
+ Fix a handful of leaks seen on the buildbot.
+
+ * runtime/UStringImpl.h:
+ (JSC::UStringOrRopeImpl::deref): Call URopeImpl::destructNonRecursive rather than delete
+ to ensure that the rope's fibers are also destroyed.
+
+2010-02-16 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ https://bugs.webkit.org/show_bug.cgi?id=34964
+ Leaks tool reports false memory leaks due to Rope implementation.
+
+ A rope is a recursive data structure where each node in the rope holds a set of
+ pointers, each of which may reference either a string (in UStringImpl form) or
+ another rope node. A low bit in each pointer is used to distinguish between
+ rope & string elements, in a fashion similar to the recently-removed
+ PtrAndFlags class (see https://bugs.webkit.org/show_bug.cgi?id=33731 ). Again,
+ this causes a problem for Leaks – refactor to remove the magic pointer
+ mangling.
+
+ Move Rope out from JSString.h and rename to URopeImpl, to match UStringImpl.
+ Give UStringImpl and URopeImpl a common parent class, UStringOrRopeImpl.
+ Repurpose an otherwise invalid permutation to flags (static & should report
+ memory cost) to identify ropes.
+
+ This allows us to change the rope's fibers to interrogate the object rather
+ than storing a bool within the low bits of the pointer (or in some cases the
+ use of a common parent class removes the need to determine the type at all -
+ there is a common interface to ref or get the length of either ropes or strings).
+
+ * API/JSClassRef.cpp:
+ (OpaqueJSClass::OpaqueJSClass):
+ (OpaqueJSClassContextData::OpaqueJSClassContextData):
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::keyForCharacterSwitch):
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::privateExecute):
+ * jit/JITStubs.cpp:
+ (JSC::DEFINE_STUB_FUNCTION):
+ * runtime/ArrayPrototype.cpp:
+ (JSC::arrayProtoFuncToString):
+ * runtime/Identifier.cpp:
+ (JSC::Identifier::equal):
+ (JSC::Identifier::addSlowCase):
+ * runtime/JSString.cpp:
+ (JSC::JSString::resolveRope):
+ * runtime/JSString.h:
+ (JSC::):
+ (JSC::RopeBuilder::JSString):
+ (JSC::RopeBuilder::~JSString):
+ (JSC::RopeBuilder::appendStringInConstruct):
+ (JSC::RopeBuilder::appendValueInConstructAndIncrementLength):
+ (JSC::RopeBuilder::JSStringFinalizerStruct::JSStringFinalizerStruct):
+ (JSC::RopeBuilder::JSStringFinalizerStruct::):
+ * runtime/UString.cpp:
+ (JSC::UString::toStrictUInt32):
+ (JSC::equal):
+ * runtime/UString.h:
+ (JSC::UString::isEmpty):
+ (JSC::UString::size):
+ * runtime/UStringImpl.cpp:
+ (JSC::URopeImpl::derefFibersNonRecursive):
+ (JSC::URopeImpl::destructNonRecursive):
+ * runtime/UStringImpl.h:
+ (JSC::UStringOrRopeImpl::isRope):
+ (JSC::UStringOrRopeImpl::length):
+ (JSC::UStringOrRopeImpl::ref):
+ (JSC::UStringOrRopeImpl::):
+ (JSC::UStringOrRopeImpl::operator new):
+ (JSC::UStringOrRopeImpl::UStringOrRopeImpl):
+ (JSC::UStringImpl::adopt):
+ (JSC::UStringImpl::createUninitialized):
+ (JSC::UStringImpl::tryCreateUninitialized):
+ (JSC::UStringImpl::data):
+ (JSC::UStringImpl::cost):
+ (JSC::UStringImpl::deref):
+ (JSC::UStringImpl::UStringImpl):
+ (JSC::UStringImpl::):
+ (JSC::URopeImpl::tryCreateUninitialized):
+ (JSC::URopeImpl::initializeFiber):
+ (JSC::URopeImpl::fiberCount):
+ (JSC::URopeImpl::fibers):
+ (JSC::URopeImpl::deref):
+ (JSC::URopeImpl::URopeImpl):
+ (JSC::URopeImpl::hasOneRef):
+ (JSC::UStringOrRopeImpl::deref):
+
+2010-02-15 Gabor Loki <loki@webkit.org>
+
+ Reviewed by Gavin Barraclough.
+
+ Fix the SP at ctiOpThrowNotCaught on Thumb2 (JSVALUE32)
+ https://bugs.webkit.org/show_bug.cgi?id=34939
+
+ * jit/JITStubs.cpp:
+
+2010-02-15 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by NOBODY (Build Fix!).
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2010-02-15 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ Some general Rope related refactoring.
+
+ Rename Rope::m_ropeLength to m_fiberCount, to be more descriptive.
+ Rename Rope::m_stringLength to simply m_length (since this is the
+ more conventional name for the length of a string). Move append
+ behaviour out into a new RopeBuilder class, so that Rope no longer
+ needs any knowledge of the JSString or UString implementation.
+
+ Make Rope no longer be nested within JSString.
+ (Rope now no-longer need reside within JSString.h, but leaving
+ the change of moving this out to a different header as a separate
+ change from these renames).
+
+ * JavaScriptCore.exp:
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::privateCompileCTIMachineTrampolines):
+ * runtime/JSString.cpp:
+ (JSC::Rope::destructNonRecursive):
+ (JSC::Rope::~Rope):
+ (JSC::JSString::resolveRope):
+ (JSC::JSString::toBoolean):
+ (JSC::JSString::getStringPropertyDescriptor):
+ * runtime/JSString.h:
+ (JSC::Rope::Fiber::Fiber):
+ (JSC::Rope::Fiber::deref):
+ (JSC::Rope::Fiber::ref):
+ (JSC::Rope::Fiber::refAndGetLength):
+ (JSC::Rope::Fiber::isRope):
+ (JSC::Rope::Fiber::rope):
+ (JSC::Rope::Fiber::isString):
+ (JSC::Rope::Fiber::string):
+ (JSC::Rope::Fiber::nonFiber):
+ (JSC::Rope::tryCreateUninitialized):
+ (JSC::Rope::append):
+ (JSC::Rope::fiberCount):
+ (JSC::Rope::length):
+ (JSC::Rope::fibers):
+ (JSC::Rope::Rope):
+ (JSC::Rope::operator new):
+ (JSC::):
+ (JSC::RopeBuilder::JSString):
+ (JSC::RopeBuilder::~JSString):
+ (JSC::RopeBuilder::length):
+ (JSC::RopeBuilder::canGetIndex):
+ (JSC::RopeBuilder::appendStringInConstruct):
+ (JSC::RopeBuilder::appendValueInConstructAndIncrementLength):
+ (JSC::RopeBuilder::isRope):
+ (JSC::RopeBuilder::fiberCount):
+ (JSC::JSString::getStringPropertySlot):
+ * runtime/Operations.h:
+ (JSC::jsString):
+
+2010-02-15 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by NOBODY (Build fix).
+
+ Add missing cast for !YARR (PPC) builds.
+
+ * runtime/RegExp.cpp:
+ (JSC::RegExp::match):
+
+2010-02-14 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Darin Adler.
+
+ https://bugs.webkit.org/show_bug.cgi?id=33731
+ Many false leaks in release builds due to PtrAndFlags
+
+ StructureTransitionTable was effectively a smart pointer type,
+ one machine word in size and wholly contained as a member of
+ of Structure. It either pointed to an actual table, or could
+ be used to describe a single transtion entry without use of a
+ table.
+
+ This, however, worked by using a PtrAndFlags, which is not
+ compatible with the leaks tool. Since there is no clear way to
+ obtain another bit for 'free' here, and since there are bits
+ available up in Structure, merge this functionality back up into
+ Structure. Having this in a separate class was quite clean
+ from an enacapsulation perspective, but this solution doesn't
+ seem to bad - all table access is now intermediated through the
+ Structure::structureTransitionTableFoo methods, keeping the
+ optimization fairly well contained.
+
+ This was the last use of PtrAndFlags, so removing the file too.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * bytecode/CodeBlock.h:
+ * runtime/Structure.cpp:
+ (JSC::Structure::Structure):
+ (JSC::Structure::~Structure):
+ (JSC::Structure::addPropertyTransitionToExistingStructure):
+ (JSC::Structure::addPropertyTransition):
+ (JSC::Structure::hasTransition):
+ * runtime/Structure.h:
+ (JSC::Structure::):
+ (JSC::Structure::structureTransitionTableContains):
+ (JSC::Structure::structureTransitionTableGet):
+ (JSC::Structure::structureTransitionTableHasTransition):
+ (JSC::Structure::structureTransitionTableRemove):
+ (JSC::Structure::structureTransitionTableAdd):
+ (JSC::Structure::structureTransitionTable):
+ (JSC::Structure::setStructureTransitionTable):
+ (JSC::Structure::singleTransition):
+ (JSC::Structure::setSingleTransition):
+ * runtime/StructureTransitionTable.h:
+ * wtf/PtrAndFlags.h: Removed.
+
+2010-02-15 Gavin Barraclough <barraclough@apple.com>
+
+ Rubber Stamped by Geoff Garen.
+
+ Bug 34948 - tryMakeString should fail on error in length calculation
+
+ Ooops! - "bool overflow" argument should have been "bool& overflow".
+
+ * runtime/UString.h:
+ (JSC::sumWithOverflow):
+ (JSC::tryMakeString):
+
+2010-02-15 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by NOBODY (Build Fix (pt 2!)).
+
+ Some symbol names have changed, remove, will readd if required.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2010-02-15 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by NOBODY (Build Fix (pt 1?)).
+
+ Some symbol names have changed, remove, will readd if required.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2010-02-15 Geoffrey Garen <ggaren@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ Removed some mistaken code added in http://trac.webkit.org/changeset/53860.
+
+ * API/APIShims.h:
+ (JSC::APICallbackShim::APICallbackShim):
+ (JSC::APICallbackShim::~APICallbackShim): No need to start/stop the
+ timeout checker when calling out from the API to the client; we want to
+ monitor the VM for timeouts, not the client. This mistake was harmless /
+ undetectable, since it's totally redundant with the APIEntryShim, which
+ also starts / stops the timeout checker.
+
+2010-02-15 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Geoff Garen.
+
+ Bug 34952 - String lengths in UString should be unsigned.
+ This matches WebCore::StringImpl, and better unifies behaviour throughout JSC.
+
+ * JavaScriptCore.exp:
+ * bytecode/EvalCodeCache.h:
+ * runtime/Identifier.cpp:
+ (JSC::Identifier::equal):
+ * runtime/Identifier.h:
+ * runtime/JSGlobalObjectFunctions.cpp:
+ (JSC::globalFuncEscape):
+ * runtime/JSONObject.cpp:
+ (JSC::gap):
+ (JSC::Stringifier::indent):
+ * runtime/NumberPrototype.cpp:
+ (JSC::numberProtoFuncToFixed):
+ (JSC::numberProtoFuncToPrecision):
+ * runtime/RegExp.cpp:
+ (JSC::RegExp::match):
+ * runtime/StringPrototype.cpp:
+ (JSC::substituteBackreferencesSlow):
+ (JSC::stringProtoFuncReplace):
+ (JSC::stringProtoFuncSplit):
+ (JSC::trimString):
+ * runtime/UString.cpp:
+ (JSC::UString::UString):
+ (JSC::UString::from):
+ (JSC::UString::getCString):
+ (JSC::UString::ascii):
+ (JSC::UString::operator[]):
+ (JSC::UString::toStrictUInt32):
+ (JSC::UString::find):
+ (JSC::UString::rfind):
+ (JSC::UString::substr):
+ (JSC::operator<):
+ (JSC::operator>):
+ (JSC::compare):
+ (JSC::equal):
+ (JSC::UString::UTF8String):
+ * runtime/UString.h:
+ (JSC::UString::size):
+ (JSC::operator==):
+ * runtime/UStringImpl.cpp:
+ (JSC::UStringImpl::create):
+ * runtime/UStringImpl.h:
+ (JSC::UStringImpl::create):
+ (JSC::UStringImpl::size):
+ (JSC::UStringImpl::computeHash):
+ (JSC::UStringImpl::UStringImpl):
+
+2010-02-15 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Geoff Garen.
+
+ Bug 34948 - tryMakeString should fail on error in length calculation
+
+ The sum of the length of substrings could overflow.
+
+ * runtime/UString.h:
+ (JSC::sumWithOverflow):
+ (JSC::tryMakeString):
+
+2010-02-15 Geoffrey Garen <ggaren@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ Fixed <rdar://problem/7628524> Crash beneath JSGlobalContextRelease when
+ typing in Google search field with GuardMalloc/full page heap enabled
+
+ * API/JSContextRef.cpp: Don't use APIEntryShim, since that requires
+ a JSGlobalData, which this function destroys. Do use setCurrentIdentifierTable
+ and JSLock instead, since those are the two features of APIEntryShim we
+ require.
+
+2010-02-15 Patrick Gansterer <paroga@paroga.com>
+
+ Reviewed by Laszlo Gombos.
+
+ Added additional parameter to create_rvct_stubs
+ for setting the offset of thunkReturnAddress.
+ https://bugs.webkit.org/show_bug.cgi?id=34657
+
+ * create_rvct_stubs:
+ * jit/JITStubs.cpp:
+
+2010-02-15 Jedrzej Nowacki <jedrzej.nowacki@nokia.com>
+
+ Reviewed by Simon Hausmann.
+
+ Fix QScriptValue::toIntXX methods.
+
+ More ECMA Script compliance.
+
+ [Qt] QScriptValue::toIntXX returns incorrect values
+ https://bugs.webkit.org/show_bug.cgi?id=34847
+
+ * qt/api/qscriptvalue_p.h:
+ (QScriptValuePrivate::toInteger):
+ (QScriptValuePrivate::toInt32):
+ (QScriptValuePrivate::toUInt32):
+ (QScriptValuePrivate::toUInt16):
+ * qt/tests/qscriptvalue/tst_qscriptvalue.h:
+ * qt/tests/qscriptvalue/tst_qscriptvalue_generated.cpp:
+ (tst_QScriptValue::toInteger_initData):
+ (tst_QScriptValue::toInteger_makeData):
+ (tst_QScriptValue::toInteger_test):
+ (tst_QScriptValue::toInt32_initData):
+ (tst_QScriptValue::toInt32_makeData):
+ (tst_QScriptValue::toInt32_test):
+ (tst_QScriptValue::toUInt32_initData):
+ (tst_QScriptValue::toUInt32_makeData):
+ (tst_QScriptValue::toUInt32_test):
+ (tst_QScriptValue::toUInt16_initData):
+ (tst_QScriptValue::toUInt16_makeData):
+ (tst_QScriptValue::toUInt16_test):
+
+2010-02-14 Laszlo Gombos <laszlo.1.gombos@nokia.com>
+
+ Reviewed by Adam Barth.
+
+ Implement NEVER_INLINE and NO_RETURN for RVCT
+ https://bugs.webkit.org/show_bug.cgi?id=34740
+
+ * wtf/AlwaysInline.h:
+
+2010-02-12 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ https://bugs.webkit.org/show_bug.cgi?id=33731
+ Remove uses of PtrAndFlags from JIT data stuctures.
+
+ These break the OS X Leaks tool. Free up a bit in CallLinkInfo, and invalid
+ permutation of pointer states in MethodCallLinkInfo to represent the removed bits.
+
+ * bytecode/CodeBlock.h:
+ (JSC::CallLinkInfo::seenOnce):
+ (JSC::CallLinkInfo::setSeen):
+ (JSC::MethodCallLinkInfo::MethodCallLinkInfo):
+ (JSC::MethodCallLinkInfo::seenOnce):
+ (JSC::MethodCallLinkInfo::setSeen):
+ * jit/JIT.cpp:
+ (JSC::JIT::unlinkCall):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::patchMethodCallProto):
+ * runtime/UString.h:
+
+2010-02-12 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Darin Adler.
+
+ https://bugs.webkit.org/show_bug.cgi?id=33731
+ Many false leaks in release builds due to PtrAndFlags
+
+ Remove UntypedPtrAndBitfield (similar to PtrAndFlags) in UStringImpl,
+ and steal bits from the refCount instead.
+
+ * runtime/UStringImpl.cpp:
+ (JSC::UStringImpl::baseSharedBuffer):
+ (JSC::UStringImpl::~UStringImpl):
+ * runtime/UStringImpl.h:
+ (JSC::UStringImpl::cost):
+ (JSC::UStringImpl::isIdentifier):
+ (JSC::UStringImpl::setIsIdentifier):
+ (JSC::UStringImpl::ref):
+ (JSC::UStringImpl::deref):
+ (JSC::UStringImpl::UStringImpl):
+ (JSC::UStringImpl::bufferOwnerString):
+ (JSC::UStringImpl::bufferOwnership):
+ (JSC::UStringImpl::isStatic):
+ (JSC::UStringImpl::):
+
+2010-02-12 Geoffrey Garen <ggaren@apple.com>
+
+ Reviewed by Darin Adler.
+
+ Removed an unnecessary data dependency from my last patch.
+
+ * runtime/SmallStrings.cpp:
+ (JSC::SmallStrings::markChildren): Since isAnyStringMarked being false
+ is a condition of entering the loop, we can just use '=' instead of '|='.
+
2010-02-12 Janne Koskinen <janne.p.koskinen@digia.com>
Reviewed by Tor Arne Vestbø.
diff --git a/JavaScriptCore/DerivedSources.pro b/JavaScriptCore/DerivedSources.pro
index bd9f6ab..28a229d 100644
--- a/JavaScriptCore/DerivedSources.pro
+++ b/JavaScriptCore/DerivedSources.pro
@@ -77,8 +77,8 @@ addExtraCompiler(jscbison)
# GENERATOR 3: JIT Stub functions for RVCT
rvctstubs.output = $${JSC_GENERATED_SOURCES_DIR}$${QMAKE_DIR_SEP}Generated${QMAKE_FILE_BASE}_RVCT.h
-rvctstubs.wkScript = $$PWD/create_rvct_stubs
-rvctstubs.commands = perl $$rvctstubs.wkScript ${QMAKE_FILE_NAME} -i > ${QMAKE_FILE_OUT}
+rvctstubs.wkScript = $$PWD/create_jit_stubs
+rvctstubs.commands = perl $$rvctstubs.wkScript --prefix RVCT ${QMAKE_FILE_NAME} -i > ${QMAKE_FILE_OUT}
rvctstubs.depends = ${QMAKE_FILE_NAME}
rvctstubs.input = RVCT_STUB_FILES
rvctstubs.CONFIG += no_link
diff --git a/JavaScriptCore/GNUmakefile.am b/JavaScriptCore/GNUmakefile.am
index 4ac89d9..53e3c65 100644
--- a/JavaScriptCore/GNUmakefile.am
+++ b/JavaScriptCore/GNUmakefile.am
@@ -84,6 +84,7 @@ javascriptcore_sources += \
JavaScriptCore/jit/JITCall.cpp \
JavaScriptCore/jit/JITCode.h \
JavaScriptCore/jit/JITPropertyAccess.cpp \
+ JavaScriptCore/jit/JITPropertyAccess32_64.cpp \
JavaScriptCore/jit/JITArithmetic.cpp \
JavaScriptCore/jit/ExecutableAllocator.cpp \
JavaScriptCore/jit/JIT.h \
diff --git a/JavaScriptCore/JavaScriptCore.exp b/JavaScriptCore/JavaScriptCore.exp
index f562b52..9355321 100644
--- a/JavaScriptCore/JavaScriptCore.exp
+++ b/JavaScriptCore/JavaScriptCore.exp
@@ -240,7 +240,7 @@ __ZN3JSC7UString4fromEj
__ZN3JSC7UString4fromEl
__ZN3JSC7UString9s_nullRepE
__ZN3JSC7UStringC1EPKc
-__ZN3JSC7UStringC1EPKti
+__ZN3JSC7UStringC1EPKtj
__ZN3JSC8Debugger23recompileAllJSFunctionsEPNS_12JSGlobalDataE
__ZN3JSC8Debugger6attachEPNS_14JSGlobalObjectE
__ZN3JSC8Debugger6detachEPNS_14JSGlobalObjectE
@@ -270,7 +270,6 @@ __ZN3JSC8JSObject23allocatePropertyStorageEmm
__ZN3JSC8JSObject24getOwnPropertyDescriptorEPNS_9ExecStateERKNS_10IdentifierERNS_18PropertyDescriptorE
__ZN3JSC8JSObject3putEPNS_9ExecStateERKNS_10IdentifierENS_7JSValueERNS_15PutPropertySlotE
__ZN3JSC8JSObject3putEPNS_9ExecStateEjNS_7JSValueE
-__ZN3JSC8JSString4RopeD1Ev
__ZN3JSC8Profiler13stopProfilingEPNS_9ExecStateERKNS_7UStringE
__ZN3JSC8Profiler14startProfilingEPNS_9ExecStateERKNS_7UStringE
__ZN3JSC8Profiler8profilerEv
@@ -393,7 +392,7 @@ __ZNK3JSC7UString10UTF8StringEb
__ZNK3JSC7UString14toStrictUInt32EPb
__ZNK3JSC7UString5asciiEv
__ZNK3JSC7UString6is8BitEv
-__ZNK3JSC7UString6substrEii
+__ZNK3JSC7UString6substrEjj
__ZNK3JSC7UString8toUInt32EPb
__ZNK3JSC7UString8toUInt32EPbb
__ZNK3JSC8JSObject11hasPropertyEPNS_9ExecStateERKNS_10IdentifierE
diff --git a/JavaScriptCore/JavaScriptCore.gypi b/JavaScriptCore/JavaScriptCore.gypi
index c67b6a8..c0eb086 100644
--- a/JavaScriptCore/JavaScriptCore.gypi
+++ b/JavaScriptCore/JavaScriptCore.gypi
@@ -120,6 +120,7 @@
'jit/JITInlineMethods.h',
'jit/JITOpcodes.cpp',
'jit/JITPropertyAccess.cpp',
+ 'jit/JITPropertyAccess32_64.cpp',
'jit/JITStubCall.h',
'jit/JITStubs.cpp',
'jit/JITStubs.h',
diff --git a/JavaScriptCore/JavaScriptCore.pri b/JavaScriptCore/JavaScriptCore.pri
index 75737ae..d4cfe10 100644
--- a/JavaScriptCore/JavaScriptCore.pri
+++ b/JavaScriptCore/JavaScriptCore.pri
@@ -107,6 +107,7 @@ SOURCES += \
jit/JIT.cpp \
jit/JITOpcodes.cpp \
jit/JITPropertyAccess.cpp \
+ jit/JITPropertyAccess32_64.cpp \
jit/JITStubs.cpp \
parser/Lexer.cpp \
parser/Nodes.cpp \
diff --git a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
index 6afef77..9e1e74a 100644
--- a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
+++ b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
@@ -17,7 +17,7 @@ EXPORTS
??0Structure@JSC@@AAE@VJSValue@1@ABVTypeInfo@1@I@Z
??0ThreadCondition@WTF@@QAE@XZ
??0UString@JSC@@QAE@PBD@Z
- ??0UString@JSC@@QAE@PB_WH@Z
+ ??0UString@JSC@@QAE@PB_WI@Z
??1CString@JSC@@QAE@XZ
??1ClientData@JSGlobalData@JSC@@UAE@XZ
??1Collator@WTF@@QAE@XZ
@@ -27,7 +27,6 @@ EXPORTS
??1JSGlobalObject@JSC@@UAE@XZ
??1Mutex@WTF@@QAE@XZ
??1RefCountedLeakCounter@WTF@@QAE@XZ
- ??1Rope@JSString@JSC@@QAE@XZ
??1Structure@JSC@@QAE@XZ
??1ThreadCondition@WTF@@QAE@XZ
??1UStringImpl@JSC@@AAE@XZ
@@ -61,7 +60,6 @@ EXPORTS
?className@JSObject@JSC@@UBE?AVUString@2@XZ
?collate@Collator@WTF@@QBE?AW4Result@12@PB_WI0I@Z
?collectAllGarbage@Heap@JSC@@QAEXXZ
- ?computeHash@UStringImpl@JSC@@SAIPB_WH@Z
?configurable@PropertyDescriptor@JSC@@QBE_NXZ
?construct@JSC@@YAPAVJSObject@1@PAVExecState@1@VJSValue@1@W4ConstructType@1@ABTConstructData@1@ABVArgList@1@@Z
?constructArray@JSC@@YAPAVJSArray@1@PAVExecState@1@ABVArgList@1@@Z
@@ -257,7 +255,7 @@ EXPORTS
?stopProfiling@Profiler@JSC@@QAE?AV?$PassRefPtr@VProfile@JSC@@@WTF@@PAVExecState@2@ABVUString@2@@Z
?stopSampling@JSGlobalData@JSC@@QAEXXZ
?strtod@WTF@@YANPBDPAPAD@Z
- ?substr@UString@JSC@@QBE?AV12@HH@Z
+ ?substr@UString@JSC@@QBE?AV12@II@Z
?symbolTableGet@JSVariableObject@JSC@@IAE_NABVIdentifier@2@AAVPropertyDescriptor@2@@Z
?synthesizePrototype@JSValue@JSC@@ABEPAVJSObject@2@PAVExecState@2@@Z
?thisObject@DebuggerCallFrame@JSC@@QBEPAVJSObject@2@XZ
diff --git a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
index 9c20af9..60022dd 100644
--- a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
+++ b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
@@ -1721,6 +1721,10 @@
>
</File>
<File
+ RelativePath="..\..\jit\JITPropertyAccess32_64.cpp"
+ >
+ </File>
+ <File
RelativePath="..\..\jit\JITStubCall.h"
>
</File>
diff --git a/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
index 5f904ce..2410466 100644
--- a/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
+++ b/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
@@ -39,7 +39,6 @@
088FA5BB0EF76D4300578E6F /* RandomNumber.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 088FA5B90EF76D4300578E6F /* RandomNumber.cpp */; };
088FA5BC0EF76D4300578E6F /* RandomNumber.h in Headers */ = {isa = PBXBuildFile; fileRef = 088FA5BA0EF76D4300578E6F /* RandomNumber.h */; settings = {ATTRIBUTES = (Private, ); }; };
08E279E90EF83B10007DB523 /* RandomNumberSeed.h in Headers */ = {isa = PBXBuildFile; fileRef = 08E279E80EF83B10007DB523 /* RandomNumberSeed.h */; };
- 0B1F921D0F1753500036468E /* PtrAndFlags.h in Headers */ = {isa = PBXBuildFile; fileRef = 0B1F921B0F17502D0036468E /* PtrAndFlags.h */; settings = {ATTRIBUTES = (Private, ); }; };
0B330C270F38C62300692DE3 /* TypeTraits.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0B330C260F38C62300692DE3 /* TypeTraits.cpp */; };
0B4D7E630F319AC800AD7E58 /* TypeTraits.h in Headers */ = {isa = PBXBuildFile; fileRef = 0B4D7E620F319AC800AD7E58 /* TypeTraits.h */; settings = {ATTRIBUTES = (Private, ); }; };
0BDFFAE00FC6192900D69EF4 /* CrossThreadRefCounted.h in Headers */ = {isa = PBXBuildFile; fileRef = 0BDFFAD40FC6171000D69EF4 /* CrossThreadRefCounted.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -295,6 +294,7 @@
A7A1F7AC0F252B3C00E184E2 /* ByteArray.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7A1F7AA0F252B3C00E184E2 /* ByteArray.cpp */; };
A7A1F7AD0F252B3C00E184E2 /* ByteArray.h in Headers */ = {isa = PBXBuildFile; fileRef = A7A1F7AB0F252B3C00E184E2 /* ByteArray.h */; settings = {ATTRIBUTES = (Private, ); }; };
A7B48F490EE8936F00DCBDB6 /* ExecutableAllocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7B48DB60EE74CFC00DCBDB6 /* ExecutableAllocator.cpp */; };
+ A7C1E8E4112E72EF00A37F98 /* JITPropertyAccess32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7C1E8C8112E701C00A37F98 /* JITPropertyAccess32_64.cpp */; };
A7C2217810C7479400F97913 /* JSZombie.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7C2216B10C7469C00F97913 /* JSZombie.cpp */; };
A7C530E4102A3813005BC741 /* MarkStackPosix.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7C530E3102A3813005BC741 /* MarkStackPosix.cpp */; };
A7D649AA1015224E009B2E1B /* PossiblyNull.h in Headers */ = {isa = PBXBuildFile; fileRef = A7D649A91015224E009B2E1B /* PossiblyNull.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -861,6 +861,7 @@
A7A1F7AB0F252B3C00E184E2 /* ByteArray.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ByteArray.h; sourceTree = "<group>"; };
A7B48DB50EE74CFC00DCBDB6 /* ExecutableAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExecutableAllocator.h; sourceTree = "<group>"; };
A7B48DB60EE74CFC00DCBDB6 /* ExecutableAllocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ExecutableAllocator.cpp; sourceTree = "<group>"; };
+ A7C1E8C8112E701C00A37F98 /* JITPropertyAccess32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITPropertyAccess32_64.cpp; sourceTree = "<group>"; };
A7C2216810C745E000F97913 /* JSZombie.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSZombie.h; sourceTree = "<group>"; };
A7C2216B10C7469C00F97913 /* JSZombie.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSZombie.cpp; sourceTree = "<group>"; };
A7C530E3102A3813005BC741 /* MarkStackPosix.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MarkStackPosix.cpp; sourceTree = "<group>"; };
@@ -1192,6 +1193,7 @@
960626950FB8EC02009798AB /* JITStubCall.h */,
14A23D6C0F4E19CE0023CDAD /* JITStubs.cpp */,
14A6581A0F4E36F4000150FD /* JITStubs.h */,
+ A7C1E8C8112E701C00A37F98 /* JITPropertyAccess32_64.cpp */,
);
path = jit;
sourceTree = "<group>";
@@ -1981,7 +1983,6 @@
BC18C4550E16F5CD00B34460 /* PropertySlot.h in Headers */,
BC18C4560E16F5CD00B34460 /* Protect.h in Headers */,
BC257DF40E1F53740016B6C9 /* PrototypeFunction.h in Headers */,
- 0B1F921D0F1753500036468E /* PtrAndFlags.h in Headers */,
147B84630E6DE6B1004775A4 /* PutPropertySlot.h in Headers */,
1429DA4A0ED245EC00B89619 /* Quantifier.h in Headers */,
088FA5BC0EF76D4300578E6F /* RandomNumber.h in Headers */,
@@ -2504,6 +2505,7 @@
1429DA820ED2482900B89619 /* WRECFunctors.cpp in Sources */,
1429DAE10ED2645B00B89619 /* WRECGenerator.cpp in Sources */,
1429DAC00ED263E700B89619 /* WRECParser.cpp in Sources */,
+ A7C1E8E4112E72EF00A37F98 /* JITPropertyAccess32_64.cpp in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
diff --git a/JavaScriptCore/bytecode/CodeBlock.cpp b/JavaScriptCore/bytecode/CodeBlock.cpp
index fd56ece..68debb2 100644
--- a/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -49,8 +49,8 @@ namespace JSC {
static UString escapeQuotes(const UString& str)
{
UString result = str;
- int pos = 0;
- while ((pos = result.find('\"', pos)) >= 0) {
+ unsigned pos = 0;
+ while ((pos = result.find('\"', pos)) != UString::NotFound) {
result = makeString(result.substr(0, pos), "\"\\\"\"", result.substr(pos + 1));
pos += 4;
}
@@ -765,6 +765,26 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
printGetByIdOp(exec, location, it, "get_by_id_chain");
break;
}
+ case op_get_by_id_getter_self: {
+ printGetByIdOp(exec, location, it, "get_by_id_getter_self");
+ break;
+ }
+ case op_get_by_id_getter_self_list: {
+ printGetByIdOp(exec, location, it, "get_by_id_getter_self_list");
+ break;
+ }
+ case op_get_by_id_getter_proto: {
+ printGetByIdOp(exec, location, it, "get_by_id_getter_proto");
+ break;
+ }
+ case op_get_by_id_getter_proto_list: {
+ printGetByIdOp(exec, location, it, "get_by_id_getter_proto_list");
+ break;
+ }
+ case op_get_by_id_getter_chain: {
+ printGetByIdOp(exec, location, it, "get_by_id_getter_chain");
+ break;
+ }
case op_get_by_id_generic: {
printGetByIdOp(exec, location, it, "get_by_id_generic");
break;
@@ -1355,16 +1375,16 @@ void CodeBlock::derefStructures(Instruction* vPC) const
{
Interpreter* interpreter = m_globalData->interpreter;
- if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self)) {
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_self)) {
vPC[4].u.structure->deref();
return;
}
- if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto)) {
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_proto)) {
vPC[4].u.structure->deref();
vPC[5].u.structure->deref();
return;
}
- if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain)) {
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_chain)) {
vPC[4].u.structure->deref();
vPC[5].u.structureChain->deref();
return;
@@ -1385,7 +1405,9 @@ void CodeBlock::derefStructures(Instruction* vPC) const
return;
}
if ((vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto_list))
- || (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self_list))) {
+ || (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self_list))
+ || (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_proto_list))
+ || (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_self_list))) {
PolymorphicAccessStructureList* polymorphicStructures = vPC[4].u.polymorphicStructures;
polymorphicStructures->derefStructures(vPC[5].u.operand);
delete polymorphicStructures;
@@ -1400,16 +1422,16 @@ void CodeBlock::refStructures(Instruction* vPC) const
{
Interpreter* interpreter = m_globalData->interpreter;
- if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self)) {
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_self) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_self)) {
vPC[4].u.structure->ref();
return;
}
- if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto)) {
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_proto)) {
vPC[4].u.structure->ref();
vPC[5].u.structure->ref();
return;
}
- if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain)) {
+ if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain) || vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_getter_chain)) {
vPC[4].u.structure->ref();
vPC[5].u.structureChain->ref();
return;
diff --git a/JavaScriptCore/bytecode/CodeBlock.h b/JavaScriptCore/bytecode/CodeBlock.h
index eb874cc..d92dc9d 100644
--- a/JavaScriptCore/bytecode/CodeBlock.h
+++ b/JavaScriptCore/bytecode/CodeBlock.h
@@ -36,7 +36,6 @@
#include "JSGlobalObject.h"
#include "JumpTable.h"
#include "Nodes.h"
-#include "PtrAndFlags.h"
#include "RegExp.h"
#include "UString.h"
#include <wtf/FastAllocBase.h>
@@ -110,44 +109,54 @@ namespace JSC {
CodeLocationNearCall callReturnLocation;
CodeLocationDataLabelPtr hotPathBegin;
CodeLocationNearCall hotPathOther;
- PtrAndFlags<CodeBlock, HasSeenShouldRepatch> ownerCodeBlock;
+ CodeBlock* ownerCodeBlock;
CodeBlock* callee;
- unsigned position;
+ unsigned position : 31;
+ unsigned hasSeenShouldRepatch : 1;
void setUnlinked() { callee = 0; }
bool isLinked() { return callee; }
bool seenOnce()
{
- return ownerCodeBlock.isFlagSet(hasSeenShouldRepatch);
+ return hasSeenShouldRepatch;
}
void setSeen()
{
- ownerCodeBlock.setFlag(hasSeenShouldRepatch);
+ hasSeenShouldRepatch = true;
}
};
struct MethodCallLinkInfo {
MethodCallLinkInfo()
: cachedStructure(0)
+ , cachedPrototypeStructure(0)
{
}
bool seenOnce()
{
- return cachedPrototypeStructure.isFlagSet(hasSeenShouldRepatch);
+ ASSERT(!cachedStructure);
+ return cachedPrototypeStructure;
}
void setSeen()
{
- cachedPrototypeStructure.setFlag(hasSeenShouldRepatch);
+ ASSERT(!cachedStructure && !cachedPrototypeStructure);
+ // We use the values of cachedStructure & cachedPrototypeStructure to indicate the
+ // current state.
+ // - In the initial state, both are null.
+ // - Once this transition has been taken once, cachedStructure is
+ // null and cachedPrototypeStructure is set to a nun-null value.
+ // - Once the call is linked both structures are set to non-null values.
+ cachedPrototypeStructure = (Structure*)1;
}
CodeLocationCall callReturnLocation;
CodeLocationDataLabelPtr structureLabel;
Structure* cachedStructure;
- PtrAndFlags<Structure, HasSeenShouldRepatch> cachedPrototypeStructure;
+ Structure* cachedPrototypeStructure;
};
struct FunctionRegisterInfo {
diff --git a/JavaScriptCore/bytecode/EvalCodeCache.h b/JavaScriptCore/bytecode/EvalCodeCache.h
index 05834fc..a036dd4 100644
--- a/JavaScriptCore/bytecode/EvalCodeCache.h
+++ b/JavaScriptCore/bytecode/EvalCodeCache.h
@@ -65,7 +65,7 @@ namespace JSC {
bool isEmpty() const { return m_cacheMap.isEmpty(); }
private:
- static const int maxCacheableSourceLength = 256;
+ static const unsigned maxCacheableSourceLength = 256;
static const int maxCacheEntries = 64;
typedef HashMap<RefPtr<UString::Rep>, RefPtr<EvalExecutable> > EvalCacheMap;
diff --git a/JavaScriptCore/bytecode/Opcode.h b/JavaScriptCore/bytecode/Opcode.h
index d9b2153..56555f3 100644
--- a/JavaScriptCore/bytecode/Opcode.h
+++ b/JavaScriptCore/bytecode/Opcode.h
@@ -104,6 +104,11 @@ namespace JSC {
macro(op_get_by_id_proto, 8) \
macro(op_get_by_id_proto_list, 8) \
macro(op_get_by_id_chain, 8) \
+ macro(op_get_by_id_getter_self, 8) \
+ macro(op_get_by_id_getter_self_list, 8) \
+ macro(op_get_by_id_getter_proto, 8) \
+ macro(op_get_by_id_getter_proto_list, 8) \
+ macro(op_get_by_id_getter_chain, 8) \
macro(op_get_by_id_generic, 8) \
macro(op_get_array_length, 8) \
macro(op_get_string_length, 8) \
diff --git a/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
index b0a0877..f2193b0 100644
--- a/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
+++ b/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
@@ -1940,7 +1940,7 @@ static int32_t keyForCharacterSwitch(ExpressionNode* node, int32_t min, int32_t
UNUSED_PARAM(max);
ASSERT(node->isString());
UString::Rep* clause = static_cast<StringNode*>(node)->value().ustring().rep();
- ASSERT(clause->size() == 1);
+ ASSERT(clause->length() == 1);
int32_t key = clause->data()[0];
ASSERT(key >= min);
diff --git a/JavaScriptCore/bytecompiler/NodesCodegen.cpp b/JavaScriptCore/bytecompiler/NodesCodegen.cpp
index b66c50d..4162873 100644
--- a/JavaScriptCore/bytecompiler/NodesCodegen.cpp
+++ b/JavaScriptCore/bytecompiler/NodesCodegen.cpp
@@ -76,8 +76,8 @@ namespace JSC {
static void substitute(UString& string, const UString& substring)
{
- int position = string.find("%s");
- ASSERT(position != -1);
+ unsigned position = string.find("%s");
+ ASSERT(position != UString::NotFound);
string = makeString(string.substr(0, position), substring, string.substr(position + 2));
}
diff --git a/JavaScriptCore/create_rvct_stubs b/JavaScriptCore/create_jit_stubs
index 0c49c4f..4d510ea 100644
--- a/JavaScriptCore/create_rvct_stubs
+++ b/JavaScriptCore/create_jit_stubs
@@ -1,6 +1,7 @@
#! /usr/bin/perl -w
#
# Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies)
+# Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
@@ -18,9 +19,27 @@
# Boston, MA 02110-1301, USA.
use strict;
+use File::Basename;
+use Getopt::Long;
-my $file = $ARGV[0];
-shift;
+my $usage = basename($0) . " --prefix prefix [--offset offset] file";
+
+my $rtype_template = quotemeta("#rtype#");
+my $offset_template = quotemeta("#offset#");
+my $op_template = quotemeta("#op#");
+
+my $prefix;
+my $offset = 32;
+my $file;
+
+my $getOptionsResult = GetOptions(
+ 'prefix=s' => \$prefix,
+ 'offset=i' => \$offset
+);
+
+$file = $ARGV[0];
+
+die "$usage\n" unless ($prefix and $file);
my $stub_template = "";
my $stub = "";
@@ -28,20 +47,18 @@ my $stub = "";
my $rtype = "";
my $op = "";
-my $rtype_template = quotemeta("#rtype#");
-my $op_template = quotemeta("#op#");
-
-print STDERR "Creating RVCT stubs for $file \n";
+print STDERR "Creating JIT stubs for $file \n";
open(IN, $file) or die "No such file $file";
while ( $_ = <IN> ) {
- if ( /^RVCT\((.*)\)/ ) {
+ if ( /^$prefix\((.*)\)/ ) {
$stub_template .= $1 . "\n";
}
if ( /^DEFINE_STUB_FUNCTION\((.*), (.*)\)/ ) {
$stub = $stub_template;
$rtype = quotemeta($1);
$op = quotemeta($2);
+ $stub =~ s/$offset_template/$offset/g;
$stub =~ s/$rtype_template/$rtype/g;
$stub =~ s/$op_template/$op/g;
$stub =~ s/\\\*/\*/g;
diff --git a/JavaScriptCore/interpreter/Interpreter.cpp b/JavaScriptCore/interpreter/Interpreter.cpp
index 2498d69..6dbbcf9 100644
--- a/JavaScriptCore/interpreter/Interpreter.cpp
+++ b/JavaScriptCore/interpreter/Interpreter.cpp
@@ -40,6 +40,7 @@
#include "DebuggerCallFrame.h"
#include "EvalCodeCache.h"
#include "ExceptionHelpers.h"
+#include "GetterSetter.h"
#include "GlobalEvalFunction.h"
#include "JSActivation.h"
#include "JSArray.h"
@@ -169,7 +170,7 @@ NEVER_INLINE bool Interpreter::resolveGlobal(CallFrame* callFrame, Instruction*
PropertySlot slot(globalObject);
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
JSValue result = slot.getValue(callFrame, ident);
- if (slot.isCacheable() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
+ if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
if (vPC[4].u.structure)
vPC[4].u.structure->deref();
globalObject->structure()->ref();
@@ -1029,7 +1030,7 @@ NEVER_INLINE void Interpreter::tryCacheGetByID(CallFrame* callFrame, CodeBlock*
// Cache hit: Specialize instruction and ref Structures.
if (slot.slotBase() == baseValue) {
- vPC[0] = getOpcode(op_get_by_id_self);
+ vPC[0] = slot.isGetter() ? getOpcode(op_get_by_id_getter_self) : getOpcode(op_get_by_id_self);
vPC[5] = slot.cachedOffset();
codeBlock->refStructures(vPC);
@@ -1056,7 +1057,7 @@ NEVER_INLINE void Interpreter::tryCacheGetByID(CallFrame* callFrame, CodeBlock*
ASSERT(!baseObject->structure()->isUncacheableDictionary());
- vPC[0] = getOpcode(op_get_by_id_proto);
+ vPC[0] = slot.isGetter() ? getOpcode(op_get_by_id_getter_proto) : getOpcode(op_get_by_id_proto);
vPC[5] = baseObject->structure();
vPC[6] = offset;
@@ -1071,7 +1072,7 @@ NEVER_INLINE void Interpreter::tryCacheGetByID(CallFrame* callFrame, CodeBlock*
return;
}
- vPC[0] = getOpcode(op_get_by_id_chain);
+ vPC[0] = slot.isGetter() ? getOpcode(op_get_by_id_getter_chain) : getOpcode(op_get_by_id_chain);
vPC[4] = structure;
vPC[5] = structure->prototypeChain(callFrame);
vPC[6] = count;
@@ -2161,6 +2162,51 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
uncacheGetByID(callFrame->codeBlock(), vPC);
NEXT_INSTRUCTION();
}
+#if HAVE(COMPUTED_GOTO)
+ goto *(&&skip_id_getter_proto);
+#endif
+ DEFINE_OPCODE(op_get_by_id_getter_proto) {
+ /* op_get_by_id_getter_proto dst(r) base(r) property(id) structure(sID) prototypeStructure(sID) offset(n) nop(n)
+
+ Cached property access: Attempts to get a cached getter property from the
+ value base's prototype. If the cache misses, op_get_by_id_getter_proto
+ reverts to op_get_by_id.
+ */
+ int base = vPC[2].u.operand;
+ JSValue baseValue = callFrame->r(base).jsValue();
+
+ if (LIKELY(baseValue.isCell())) {
+ JSCell* baseCell = asCell(baseValue);
+ Structure* structure = vPC[4].u.structure;
+
+ if (LIKELY(baseCell->structure() == structure)) {
+ ASSERT(structure->prototypeForLookup(callFrame).isObject());
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+ Structure* prototypeStructure = vPC[5].u.structure;
+
+ if (LIKELY(protoObject->structure() == prototypeStructure)) {
+ int dst = vPC[1].u.operand;
+ int offset = vPC[6].u.operand;
+ if (GetterSetter* getterSetter = asGetterSetter(protoObject->getDirectOffset(offset).asCell())) {
+ JSObject* getter = getterSetter->getter();
+ CallData callData;
+ CallType callType = getter->getCallData(callData);
+ JSValue result = call(callFrame, getter, callType, callData, asObject(baseCell), ArgList());
+ CHECK_FOR_EXCEPTION();
+ callFrame->r(dst) = result;
+ } else
+ callFrame->r(dst) = jsUndefined();
+ vPC += OPCODE_LENGTH(op_get_by_id_getter_proto);
+ NEXT_INSTRUCTION();
+ }
+ }
+ }
+ uncacheGetByID(callFrame->codeBlock(), vPC);
+ NEXT_INSTRUCTION();
+ }
+#if HAVE(COMPUTED_GOTO)
+ skip_id_getter_proto:
+#endif
DEFINE_OPCODE(op_get_by_id_self_list) {
// Polymorphic self access caching currently only supported when JITting.
ASSERT_NOT_REACHED();
@@ -2175,6 +2221,20 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
vPC += OPCODE_LENGTH(op_get_by_id_proto_list);
NEXT_INSTRUCTION();
}
+ DEFINE_OPCODE(op_get_by_id_getter_self_list) {
+ // Polymorphic self access caching currently only supported when JITting.
+ ASSERT_NOT_REACHED();
+ // This case of the switch must not be empty, else (op_get_by_id_self_list == op_get_by_id_chain)!
+ vPC += OPCODE_LENGTH(op_get_by_id_self_list);
+ NEXT_INSTRUCTION();
+ }
+ DEFINE_OPCODE(op_get_by_id_getter_proto_list) {
+ // Polymorphic prototype access caching currently only supported when JITting.
+ ASSERT_NOT_REACHED();
+ // This case of the switch must not be empty, else (op_get_by_id_proto_list == op_get_by_id_chain)!
+ vPC += OPCODE_LENGTH(op_get_by_id_proto_list);
+ NEXT_INSTRUCTION();
+ }
DEFINE_OPCODE(op_get_by_id_chain) {
/* op_get_by_id_chain dst(r) base(r) property(id) structure(sID) structureChain(chain) count(n) offset(n)
@@ -2221,6 +2281,49 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
uncacheGetByID(callFrame->codeBlock(), vPC);
NEXT_INSTRUCTION();
}
+#if HAVE(COMPUTED_GOTO)
+ goto *(&&skip_id_getter_self);
+#endif
+ DEFINE_OPCODE(op_get_by_id_getter_self) {
+ /* op_get_by_id_self dst(r) base(r) property(id) structure(sID) offset(n) nop(n) nop(n)
+
+ Cached property access: Attempts to get a cached property from the
+ value base. If the cache misses, op_get_by_id_getter_self reverts to
+ op_get_by_id.
+ */
+ int base = vPC[2].u.operand;
+ JSValue baseValue = callFrame->r(base).jsValue();
+
+ if (LIKELY(baseValue.isCell())) {
+ JSCell* baseCell = asCell(baseValue);
+ Structure* structure = vPC[4].u.structure;
+
+ if (LIKELY(baseCell->structure() == structure)) {
+ ASSERT(baseCell->isObject());
+ JSObject* baseObject = asObject(baseCell);
+ int dst = vPC[1].u.operand;
+ int offset = vPC[5].u.operand;
+
+ if (GetterSetter* getterSetter = asGetterSetter(baseObject->getDirectOffset(offset).asCell())) {
+ JSObject* getter = getterSetter->getter();
+ CallData callData;
+ CallType callType = getter->getCallData(callData);
+ JSValue result = call(callFrame, getter, callType, callData, baseObject, ArgList());
+ CHECK_FOR_EXCEPTION();
+ callFrame->r(dst) = result;
+ } else
+ callFrame->r(dst) = jsUndefined();
+
+ vPC += OPCODE_LENGTH(op_get_by_id_getter_self);
+ NEXT_INSTRUCTION();
+ }
+ }
+ uncacheGetByID(callFrame->codeBlock(), vPC);
+ NEXT_INSTRUCTION();
+ }
+#if HAVE(COMPUTED_GOTO)
+ skip_id_getter_self:
+#endif
DEFINE_OPCODE(op_get_by_id_generic) {
/* op_get_by_id_generic dst(r) base(r) property(id) nop(sID) nop(n) nop(n) nop(n)
@@ -2241,6 +2344,61 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
vPC += OPCODE_LENGTH(op_get_by_id_generic);
NEXT_INSTRUCTION();
}
+#if HAVE(COMPUTED_GOTO)
+ goto *(&&skip_id_getter_chain);
+#endif
+ DEFINE_OPCODE(op_get_by_id_getter_chain) {
+ /* op_get_by_id_getter_chain dst(r) base(r) property(id) structure(sID) structureChain(chain) count(n) offset(n)
+
+ Cached property access: Attempts to get a cached property from the
+ value base's prototype chain. If the cache misses, op_get_by_id_getter_chain
+ reverts to op_get_by_id.
+ */
+ int base = vPC[2].u.operand;
+ JSValue baseValue = callFrame->r(base).jsValue();
+
+ if (LIKELY(baseValue.isCell())) {
+ JSCell* baseCell = asCell(baseValue);
+ Structure* structure = vPC[4].u.structure;
+
+ if (LIKELY(baseCell->structure() == structure)) {
+ RefPtr<Structure>* it = vPC[5].u.structureChain->head();
+ size_t count = vPC[6].u.operand;
+ RefPtr<Structure>* end = it + count;
+
+ while (true) {
+ JSObject* baseObject = asObject(baseCell->structure()->prototypeForLookup(callFrame));
+
+ if (UNLIKELY(baseObject->structure() != (*it).get()))
+ break;
+
+ if (++it == end) {
+ int dst = vPC[1].u.operand;
+ int offset = vPC[7].u.operand;
+ if (GetterSetter* getterSetter = asGetterSetter(baseObject->getDirectOffset(offset).asCell())) {
+ JSObject* getter = getterSetter->getter();
+ CallData callData;
+ CallType callType = getter->getCallData(callData);
+ JSValue result = call(callFrame, getter, callType, callData, asObject(baseCell), ArgList());
+ CHECK_FOR_EXCEPTION();
+ callFrame->r(dst) = result;
+ } else
+ callFrame->r(dst) = jsUndefined();
+ vPC += OPCODE_LENGTH(op_get_by_id_getter_chain);
+ NEXT_INSTRUCTION();
+ }
+
+ // Update baseCell, so that next time around the loop we'll pick up the prototype's prototype.
+ baseCell = baseObject;
+ }
+ }
+ }
+ uncacheGetByID(callFrame->codeBlock(), vPC);
+ NEXT_INSTRUCTION();
+ }
+#if HAVE(COMPUTED_GOTO)
+ skip_id_getter_chain:
+#endif
DEFINE_OPCODE(op_get_array_length) {
/* op_get_array_length dst(r) base(r) property(id) nop(sID) nop(n) nop(n) nop(n)
@@ -2924,7 +3082,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
vPC += defaultOffset;
else {
UString::Rep* value = asString(scrutinee)->value(callFrame).rep();
- if (value->size() != 1)
+ if (value->length() != 1)
vPC += defaultOffset;
else
vPC += callFrame->codeBlock()->characterSwitchJumpTable(tableIndex).offsetForValue(value->data()[0], defaultOffset);
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index c0da66d..78c5153 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -322,6 +322,11 @@ void JIT::privateCompileMainPass()
case op_get_by_id_proto_list:
case op_get_by_id_self:
case op_get_by_id_self_list:
+ case op_get_by_id_getter_chain:
+ case op_get_by_id_getter_proto:
+ case op_get_by_id_getter_proto_list:
+ case op_get_by_id_getter_self:
+ case op_get_by_id_getter_self_list:
case op_get_string_length:
case op_put_by_id_generic:
case op_put_by_id_replace:
@@ -582,7 +587,7 @@ void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
// When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
// (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
// match). Reset the check so it no longer matches.
- RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock.get());
+ RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock);
#if USE(JSVALUE32_64)
repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0);
#else
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index c3f20f1..c470495 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -56,7 +56,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
// Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_stringLength)), regT2);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2);
Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
move(regT2, regT0);
@@ -1517,7 +1517,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
// Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_stringLength)), regT0);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index ef95f99..151bb03 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -24,6 +24,9 @@
*/
#include "config.h"
+
+#if !USE(JSVALUE32_64)
+
#include "JIT.h"
#if ENABLE(JIT)
@@ -48,978 +51,6 @@ using namespace std;
namespace JSC {
-#if USE(JSVALUE32_64)
-
-void JIT::emit_op_put_by_index(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_index);
- stubCall.addArgument(base);
- stubCall.addArgument(Imm32(property));
- stubCall.addArgument(value);
- stubCall.call();
-}
-
-void JIT::emit_op_put_getter(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned function = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_getter);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.addArgument(function);
- stubCall.call();
-}
-
-void JIT::emit_op_put_setter(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned function = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_setter);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.addArgument(function);
- stubCall.call();
-}
-
-void JIT::emit_op_del_by_id(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_del_by_id);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.call(dst);
-}
-
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.addArgument(value);
- stubCall.call();
-}
-
-void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.call(dst);
-
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- m_propertyAccessInstructionIndex++;
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int ident = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.addArgument(value);
- stubCall.call();
-
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- m_propertyAccessInstructionIndex++;
- ASSERT_NOT_REACHED();
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-void JIT::emit_op_method_check(Instruction* currentInstruction)
-{
- // Assert that the following instruction is a get_by_id.
- ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
-
- currentInstruction += OPCODE_LENGTH(op_method_check);
-
- // Do the method check - check the object & its prototype's structure inline (this is the common case).
- m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
- MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
-
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
-
- emitLoad(base, regT1, regT0);
- emitJumpSlowCaseIfNotJSCell(base, regT1);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
- Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-
- // This will be relinked to load the function without doing a load.
- DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- move(Imm32(JSValue::CellTag), regT1);
- Jump match = jump();
-
- ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
- ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
- ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
-
- // Link the failure cases here.
- structureCheck.link(this);
- protoStructureCheck.link(this);
-
- // Do a regular(ish) get_by_id (the slow case will be link to
- // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
- compileGetByIdHotPath();
-
- match.link(this);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
-
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
-}
-
-void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- currentInstruction += OPCODE_LENGTH(op_method_check);
-
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
-
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
-}
-
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
-#endif
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, property, regT3, regT2);
-
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
-
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
-
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base array check
- linkSlowCase(iter); // vector length check
- linkSlowCase(iter); // empty value
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, property, regT3, regT2);
-
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
-
- Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag));
-
- Label storeResult(this);
- emitLoad(value, regT1, regT0);
- store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
- store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
- Jump end = jump();
-
- empty.link(this);
- add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
-
- add32(Imm32(1), regT2, regT0);
- store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- jump().linkTo(storeResult, this);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // in vector check
-
- JITStubCall stubPutByValCall(this, cti_op_put_by_val);
- stubPutByValCall.addArgument(base);
- stubPutByValCall.addArgument(property);
- stubPutByValCall.addArgument(value);
- stubPutByValCall.call();
-}
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
-
- emitLoad(base, regT1, regT0);
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- compileGetByIdHotPath();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
-}
-
-void JIT::compileGetByIdHotPath()
-{
- // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
- // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
- // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
- // to jump back to if one of these trampolies finds a match.
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-
- Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
- m_propertyAccessInstructionIndex++;
-
- DataLabelPtr structureToCompare;
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- addSlowCase(structureCheck);
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
- ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
-
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
- Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
-
- DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
- ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
- DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
- ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
-
- Label putResult(this);
- ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
-}
-
-void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
-{
- // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
- // so that we only need track one pointer into the slow case code - we track a pointer to the location
- // of the call (which we can use to look up the patch information), but should a array-length or
- // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
- // the distance from the call to the head of the slow case.
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
-#ifndef NDEBUG
- Label coldPathBegin(this);
-#endif
- JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(ident));
- Call call = stubCall.call(dst);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
- ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- // In order to be able to patch both the Structure, and the object offset, we store one pointer,
- // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
- // such that the Structure & offset are always at the same distance from this.
-
- int base = currentInstruction[1].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, value, regT3, regT2);
-
- emitJumpSlowCaseIfNotJSCell(base, regT1);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
- m_propertyAccessInstructionIndex++;
-
- // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
- DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
-
- // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
- Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
-
- DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
- DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
-
- END_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
- ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int base = currentInstruction[1].u.operand;
- int ident = currentInstruction[2].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_put_by_id);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.addArgument(regT3, regT2);
- Call call = stubCall.call();
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
- m_propertyAccessInstructionIndex++;
-}
-
-// Compile a store into an object's property storage. May overwrite base.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
-{
- int offset = cachedOffset;
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- emitStore(offset, valueTag, valuePayload, base);
-}
-
-// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
-{
- int offset = cachedOffset;
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- emitLoad(offset, resultTag, resultPayload, base);
-}
-
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
-{
- if (base->isUsingInlineStorage()) {
- load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
- load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
- return;
- }
-
- size_t offset = cachedOffset * sizeof(JSValue);
-
- PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
- loadPtr(static_cast<void*>(protoPropertyStorage), temp);
- load32(Address(temp, offset), resultPayload);
- load32(Address(temp, offset + 4), resultTag);
-}
-
-void JIT::testPrototype(Structure* structure, JumpList& failureCases)
-{
- if (structure->m_prototype.isNull())
- return;
-
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure)));
-}
-
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
-{
- // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
-
- JumpList failureCases;
- failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
- testPrototype(oldStructure, failureCases);
-
- // Verify that nothing in the prototype chain has a setter for this property.
- for (RefPtr<Structure>* it = chain->head(); *it; ++it)
- testPrototype(it->get(), failureCases);
-
- // Reallocate property storage if needed.
- Call callTarget;
- bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
- if (willNeedStorageRealloc) {
- // This trampoline was called to like a JIT stub; before we can can call again we need to
- // remove the return address from the stack, to prevent the stack from becoming misaligned.
- preserveReturnAddressAfterCall(regT3);
-
- JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
- stubCall.skipArgument(); // base
- stubCall.skipArgument(); // ident
- stubCall.skipArgument(); // value
- stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
- stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
- stubCall.call(regT0);
-
- restoreReturnAddressBeforeReturn(regT3);
- }
-
- sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
- add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
-
- load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
- load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
-
- // Write the value
- compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
-
- ret();
-
- ASSERT(!failureCases.empty());
- failureCases.link(this);
- restoreArgumentReferenceForTrampoline();
- Call failureCall = tailRecursiveCall();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
-
- if (willNeedStorageRealloc) {
- ASSERT(m_calls.size() == 1);
- patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
- }
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
-}
-
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
-
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
-}
-
-void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- ASSERT(!methodCallLinkInfo.cachedStructure);
- methodCallLinkInfo.cachedStructure = structure;
- structure->ref();
-
- Structure* prototypeStructure = proto->structure();
- ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
- methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
- prototypeStructure->ref();
-
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
-
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
-}
-
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
-
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
-}
-
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
-{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
-
- // regT0 holds a JSCell*
-
- // Check for array
- Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
-
- // Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
-
- Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
- move(regT2, regT0);
- move(Imm32(JSValue::Int32Tag), regT1);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
-}
-
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
-
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
- // Checks out okay! - getDirectOffset
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
-{
- // regT0 holds a JSCell*
-
- Jump failureCase = checkStructure(regT0, structure);
- compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
- if (!lastProtoBegin)
- lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-
- patchBuffer.link(failureCase, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- polymorphicStructures->list[currentIndex].set(entryLabel, structure);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
-
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
- patchBuffer.link(failureCases1, lastProtoBegin);
- patchBuffer.link(failureCases2, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- prototypeStructure->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
-
- ASSERT(count);
-
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
- }
- ASSERT(protoObject);
-
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
-
- patchBuffer.link(bucketsOfFail, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- // Track the stub we have created so that it will be deleted later.
- structure->ref();
- chain->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
-
- ASSERT(count);
-
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
- }
- ASSERT(protoObject);
-
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset)
-{
- ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
- ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
- ASSERT(sizeof(JSValue) == 8);
-
- Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
- Jump finishedLoad = jump();
- notUsingInlineStorage.link(this);
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
- finishedLoad.link(this);
-}
-
-void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
- unsigned expected = currentInstruction[4].u.operand;
- unsigned iter = currentInstruction[5].u.operand;
- unsigned i = currentInstruction[6].u.operand;
-
- emitLoad2(property, regT1, regT0, base, regT3, regT2);
- emitJumpSlowCaseIfNotJSCell(property, regT1);
- addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
- // Property registers are now available as the property is known
- emitJumpSlowCaseIfNotJSCell(base, regT3);
- emitLoadPayload(iter, regT1);
-
- // Test base's structure
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
- load32(addressFor(i), regT3);
- sub32(Imm32(1), regT3);
- addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3);
-
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, property);
- linkSlowCase(iter);
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-#else // USE(JSVALUE32_64)
-
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -1594,7 +625,6 @@ void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodC
structure->ref();
Structure* prototypeStructure = proto->structure();
- ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
prototypeStructure->ref();
@@ -1894,8 +924,8 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-#endif // USE(JSVALUE32_64)
-
} // namespace JSC
#endif // ENABLE(JIT)
+
+#endif // !USE(JSVALUE32_64)
diff --git a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
new file mode 100644
index 0000000..f9e323d
--- /dev/null
+++ b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -0,0 +1,1026 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if USE(JSVALUE32_64)
+
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "Interpreter.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_index);
+ stubCall.addArgument(base);
+ stubCall.addArgument(Imm32(property));
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_getter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_setter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_del_by_id);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.call(dst);
+}
+
+
+#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
+#endif
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.call(dst);
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(value);
+ stubCall.call();
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ move(Imm32(JSValue::CellTag), regT1);
+ Jump match = jump();
+
+ ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
+ ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
+ ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath();
+
+ match.link(this);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+
+#endif
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
+
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base array check
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+
+ Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag));
+
+ Label storeResult(this);
+ emitLoad(value, regT1, regT0);
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
+ Jump end = jump();
+
+ empty.link(this);
+ add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+ add32(Imm32(1), regT2, regT0);
+ store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ jump().linkTo(storeResult, this);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // in vector check
+
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(base);
+ stubPutByValCall.addArgument(property);
+ stubPutByValCall.addArgument(value);
+ stubPutByValCall.call();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ compileGetByIdHotPath();
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
+}
+
+void JIT::compileGetByIdHotPath()
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
+ ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
+
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
+
+ DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
+ DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
+
+ Label putResult(this);
+ ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
+}
+
+void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+#ifndef NDEBUG
+ Label coldPathBegin(this);
+#endif
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ Call call = stubCall.call(dst);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+ ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, value, regT3, regT2);
+
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+
+ // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
+
+ DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
+ DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
+
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_put_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(regT3, regT2);
+ Call call = stubCall.call();
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+// Compile a store into an object's property storage. May overwrite base.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ emitStore(offset, valueTag, valuePayload, base);
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ emitLoad(offset, resultTag, resultPayload, base);
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
+{
+ if (base->isUsingInlineStorage()) {
+ load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
+ load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
+ return;
+ }
+
+ size_t offset = cachedOffset * sizeof(JSValue);
+
+ PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
+ loadPtr(static_cast<void*>(protoPropertyStorage), temp);
+ load32(Address(temp, offset), resultPayload);
+ load32(Address(temp, offset + 4), resultTag);
+}
+
+void JIT::testPrototype(Structure* structure, JumpList& failureCases)
+{
+ if (structure->m_prototype.isNull())
+ return;
+
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure)));
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
+{
+ // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
+
+ JumpList failureCases;
+ failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
+ testPrototype(oldStructure, failureCases);
+
+ // Verify that nothing in the prototype chain has a setter for this property.
+ for (RefPtr<Structure>* it = chain->head(); *it; ++it)
+ testPrototype(it->get(), failureCases);
+
+ // Reallocate property storage if needed.
+ Call callTarget;
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
+ stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+ stubCall.call(regT0);
+
+ restoreReturnAddressBeforeReturn(regT3);
+ }
+
+ sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
+ add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
+ storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
+
+ load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
+ load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
+
+ // Write the value
+ compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
+
+ ret();
+
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
+
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ }
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+}
+
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
+}
+
+void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ ASSERT(!methodCallLinkInfo.cachedStructure);
+ methodCallLinkInfo.cachedStructure = structure;
+ structure->ref();
+
+ Structure* prototypeStructure = proto->structure();
+ methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
+ prototypeStructure->ref();
+
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
+}
+
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
+}
+
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // regT0 holds a JSCell*
+
+ // Check for array
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+
+ Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
+ move(regT2, regT0);
+ move(Imm32(JSValue::Int32Tag), regT1);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if CPU(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ // Checks out okay! - getDirectOffset
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+{
+ // regT0 holds a JSCell*
+
+ Jump failureCase = checkStructure(regT0, structure);
+ compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ if (!lastProtoBegin)
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ polymorphicStructures->list[currentIndex].set(entryLabel, structure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if CPU(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ prototypeStructure->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if CPU(X86_64)
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+#else
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ // Track the stub we have created so that it will be deleted later.
+ structure->ref();
+ chain->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* chainEntries = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = chainEntries[i].get();
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if CPU(X86_64)
+ move(ImmPtr(currStructure), regT3);
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
+#else
+ bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
+#endif
+ }
+ ASSERT(protoObject);
+
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset)
+{
+ ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
+ ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
+ ASSERT(sizeof(JSValue) == 8);
+
+ Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ Jump finishedLoad = jump();
+ notUsingInlineStorage.link(this);
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ finishedLoad.link(this);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ unsigned iter = currentInstruction[5].u.operand;
+ unsigned i = currentInstruction[6].u.operand;
+
+ emitLoad2(property, regT1, regT0, base, regT3, regT2);
+ emitJumpSlowCaseIfNotJSCell(property, regT1);
+ addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
+ // Property registers are now available as the property is known
+ emitJumpSlowCaseIfNotJSCell(base, regT3);
+ emitLoadPayload(iter, regT1);
+
+ // Test base's structure
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(Imm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3);
+
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, property);
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // ENABLE(JSVALUE32_64)
+
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index 85471de..bf430a6 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -600,7 +600,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"ldr r5, [sp, #0x28]" "\n"
"ldr r4, [sp, #0x24]" "\n"
"ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x3c" "\n"
+ "add sp, sp, #0x40" "\n"
"bx lr" "\n"
);
@@ -856,10 +856,11 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
}
// Uncacheable: give up.
- if (!slot.isCacheable()) {
+ if (!slot.isCacheableValue()) {
ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
return;
}
+ ASSERT(!slot.isGetter());
JSCell* baseCell = asCell(baseValue);
Structure* structure = baseCell->structure();
@@ -1070,9 +1071,9 @@ RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION))
RVCT({)
RVCT( ARM)
RVCT( IMPORT JITStubThunked_#op#)
-RVCT( str lr, [sp, #32])
+RVCT( str lr, [sp, ##offset#])
RVCT( bl JITStubThunked_#op#)
-RVCT( ldr lr, [sp, #32])
+RVCT( ldr lr, [sp, ##offset#])
RVCT( bx lr)
RVCT(})
RVCT()
@@ -1289,7 +1290,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
// If we successfully got something, then the base from which it is being accessed must
// be an object. (Assertion to ensure asObject() call below is safe, which comes after
// an isCacheable() chceck.
- ASSERT(!slot.isCacheable() || slot.slotBase().isObject());
+ ASSERT(!slot.isCacheableValue() || slot.slotBase().isObject());
// Check that:
// * We're dealing with a JSCell,
@@ -1300,7 +1301,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
JSCell* specific;
JSObject* slotBaseObject;
if (baseValue.isCell()
- && slot.isCacheable()
+ && slot.isCacheableValue()
&& !(structure = asCell(baseValue)->structure())->isUncacheableDictionary()
&& (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
&& specific
@@ -1374,7 +1375,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
CHECK_FOR_EXCEPTION();
if (baseValue.isCell()
- && slot.isCacheable()
+ && slot.isCacheableValue()
&& !asCell(baseValue)->structure()->isUncacheableDictionary()
&& slot.slotBase() == baseValue) {
@@ -1447,7 +1448,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
CHECK_FOR_EXCEPTION();
- if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) {
+ if (!baseValue.isCell() || !slot.isCacheableValue() || asCell(baseValue)->structure()->isDictionary()) {
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
return JSValue::encode(result);
}
@@ -2303,7 +2304,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
PropertySlot slot(globalObject);
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
JSValue result = slot.getValue(callFrame, ident);
- if (slot.isCacheable() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
+ if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex);
if (globalResolveInfo.structure)
globalResolveInfo.structure->deref();
@@ -3023,7 +3024,7 @@ DEFINE_STUB_FUNCTION(void*, op_switch_char)
if (scrutinee.isString()) {
UString::Rep* value = asString(scrutinee)->value(callFrame).rep();
- if (value->size() == 1)
+ if (value->length() == 1)
result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).executableAddress();
}
diff --git a/JavaScriptCore/qt/api/qscriptvalue_p.h b/JavaScriptCore/qt/api/qscriptvalue_p.h
index dea2298..8db43a7 100644
--- a/JavaScriptCore/qt/api/qscriptvalue_p.h
+++ b/JavaScriptCore/qt/api/qscriptvalue_p.h
@@ -525,26 +525,37 @@ bool QScriptValuePrivate::toBool() const
qsreal QScriptValuePrivate::toInteger() const
{
- // TODO it is not true implementation!
- return toNumber();
+ qsreal result = toNumber();
+ if (qIsNaN(result))
+ return 0;
+ if (qIsInf(result))
+ return result;
+ return (result > 0) ? qFloor(result) : -1 * qFloor(-result);
}
qint32 QScriptValuePrivate::toInt32() const
{
- // TODO it is not true implementation!
- return toNumber();
+ qsreal result = toInteger();
+ // Orginaly it should look like that (result == 0 || qIsInf(result) || qIsNaN(result)), but
+ // some of these operation are invoked in toInteger subcall.
+ if (qIsInf(result))
+ return 0;
+ return result;
}
quint32 QScriptValuePrivate::toUInt32() const
{
- // TODO it is not true implementation!
- return toNumber();
+ qsreal result = toInteger();
+ // Orginaly it should look like that (result == 0 || qIsInf(result) || qIsNaN(result)), but
+ // some of these operation are invoked in toInteger subcall.
+ if (qIsInf(result))
+ return 0;
+ return result;
}
quint16 QScriptValuePrivate::toUInt16() const
{
- // TODO it is not true implementation!
- return toNumber();
+ return toInt32();
}
diff --git a/JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue.h b/JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue.h
index 828ef96..1b3c657 100644
--- a/JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue.h
+++ b/JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue.h
@@ -85,6 +85,18 @@ private slots:
void toBoolean_data();
void toBoolean();
+ void toInteger_data();
+ void toInteger();
+
+ void toInt32_data();
+ void toInt32();
+
+ void toUInt32_data();
+ void toUInt32();
+
+ void toUInt16_data();
+ void toUInt16();
+
private:
typedef void (tst_QScriptValue::*InitDataFunction)();
typedef void (tst_QScriptValue::*DefineDataFunction)(const char*);
@@ -146,6 +158,22 @@ private:
void toBoolean_makeData(const char*);
void toBoolean_test(const char*, const QScriptValue&);
+ void toInteger_initData();
+ void toInteger_makeData(const char*);
+ void toInteger_test(const char*, const QScriptValue&);
+
+ void toInt32_initData();
+ void toInt32_makeData(const char*);
+ void toInt32_test(const char*, const QScriptValue&);
+
+ void toUInt32_initData();
+ void toUInt32_makeData(const char*);
+ void toUInt32_test(const char*, const QScriptValue&);
+
+ void toUInt16_initData();
+ void toUInt16_makeData(const char*);
+ void toUInt16_test(const char*, const QScriptValue&);
+
private:
QScriptEngine* engine;
QHash<QString, QScriptValue> m_values;
diff --git a/JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue_generated.cpp b/JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue_generated.cpp
index f9891a6..006b343 100644
--- a/JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue_generated.cpp
+++ b/JavaScriptCore/qt/tests/qscriptvalue/tst_qscriptvalue_generated.cpp
@@ -940,3 +940,511 @@ void tst_QScriptValue::toBoolean_test(const char*, const QScriptValue& value)
}
DEFINE_TEST_FUNCTION(toBoolean)
+
+
+void tst_QScriptValue::toInteger_initData()
+{
+ QTest::addColumn<qsreal>("expected");
+ initScriptValues();
+}
+
+void tst_QScriptValue::toInteger_makeData(const char* expr)
+{
+ static QHash<QString, qsreal> toInteger;
+ if (toInteger.isEmpty()) {
+ toInteger.insert("QScriptValue()", 0);
+ toInteger.insert("QScriptValue(QScriptValue::UndefinedValue)", 0);
+ toInteger.insert("QScriptValue(QScriptValue::NullValue)", 0);
+ toInteger.insert("QScriptValue(true)", 1);
+ toInteger.insert("QScriptValue(false)", 0);
+ toInteger.insert("QScriptValue(int(122))", 122);
+ toInteger.insert("QScriptValue(uint(124))", 124);
+ toInteger.insert("QScriptValue(0)", 0);
+ toInteger.insert("QScriptValue(0.0)", 0);
+ toInteger.insert("QScriptValue(123.0)", 123);
+ toInteger.insert("QScriptValue(6.37e-8)", 0);
+ toInteger.insert("QScriptValue(-6.37e-8)", 0);
+ toInteger.insert("QScriptValue(0x43211234)", 1126240820);
+ toInteger.insert("QScriptValue(0x10000)", 65536);
+ toInteger.insert("QScriptValue(0x10001)", 65537);
+ toInteger.insert("QScriptValue(qSNaN())", 0);
+ toInteger.insert("QScriptValue(qQNaN())", 0);
+ toInteger.insert("QScriptValue(qInf())", qInf());
+ toInteger.insert("QScriptValue(-qInf())", qInf());
+ toInteger.insert("QScriptValue(\"NaN\")", 0);
+ toInteger.insert("QScriptValue(\"Infinity\")", qInf());
+ toInteger.insert("QScriptValue(\"-Infinity\")", qInf());
+ toInteger.insert("QScriptValue(\"ciao\")", 0);
+ toInteger.insert("QScriptValue(QString::fromLatin1(\"ciao\"))", 0);
+ toInteger.insert("QScriptValue(QString(\"\"))", 0);
+ toInteger.insert("QScriptValue(QString())", 0);
+ toInteger.insert("QScriptValue(QString(\"0\"))", 0);
+ toInteger.insert("QScriptValue(QString(\"123\"))", 123);
+ toInteger.insert("QScriptValue(QString(\"12.4\"))", 12);
+ toInteger.insert("QScriptValue(0, QScriptValue::UndefinedValue)", 0);
+ toInteger.insert("QScriptValue(0, QScriptValue::NullValue)", 0);
+ toInteger.insert("QScriptValue(0, true)", 1);
+ toInteger.insert("QScriptValue(0, false)", 0);
+ toInteger.insert("QScriptValue(0, int(122))", 122);
+ toInteger.insert("QScriptValue(0, uint(124))", 124);
+ toInteger.insert("QScriptValue(0, 0)", 0);
+ toInteger.insert("QScriptValue(0, 0.0)", 0);
+ toInteger.insert("QScriptValue(0, 123.0)", 123);
+ toInteger.insert("QScriptValue(0, 6.37e-8)", 0);
+ toInteger.insert("QScriptValue(0, -6.37e-8)", 0);
+ toInteger.insert("QScriptValue(0, 0x43211234)", 1126240820);
+ toInteger.insert("QScriptValue(0, 0x10000)", 65536);
+ toInteger.insert("QScriptValue(0, 0x10001)", 65537);
+ toInteger.insert("QScriptValue(0, qSNaN())", 0);
+ toInteger.insert("QScriptValue(0, qQNaN())", 0);
+ toInteger.insert("QScriptValue(0, qInf())", qInf());
+ toInteger.insert("QScriptValue(0, -qInf())", qInf());
+ toInteger.insert("QScriptValue(0, \"NaN\")", 0);
+ toInteger.insert("QScriptValue(0, \"Infinity\")", qInf());
+ toInteger.insert("QScriptValue(0, \"-Infinity\")", qInf());
+ toInteger.insert("QScriptValue(0, \"ciao\")", 0);
+ toInteger.insert("QScriptValue(0, QString::fromLatin1(\"ciao\"))", 0);
+ toInteger.insert("QScriptValue(0, QString(\"\"))", 0);
+ toInteger.insert("QScriptValue(0, QString())", 0);
+ toInteger.insert("QScriptValue(0, QString(\"0\"))", 0);
+ toInteger.insert("QScriptValue(0, QString(\"123\"))", 123);
+ toInteger.insert("QScriptValue(0, QString(\"12.3\"))", 12);
+ toInteger.insert("QScriptValue(engine, QScriptValue::UndefinedValue)", 0);
+ toInteger.insert("QScriptValue(engine, QScriptValue::NullValue)", 0);
+ toInteger.insert("QScriptValue(engine, true)", 1);
+ toInteger.insert("QScriptValue(engine, false)", 0);
+ toInteger.insert("QScriptValue(engine, int(122))", 122);
+ toInteger.insert("QScriptValue(engine, uint(124))", 124);
+ toInteger.insert("QScriptValue(engine, 0)", 0);
+ toInteger.insert("QScriptValue(engine, 0.0)", 0);
+ toInteger.insert("QScriptValue(engine, 123.0)", 123);
+ toInteger.insert("QScriptValue(engine, 6.37e-8)", 0);
+ toInteger.insert("QScriptValue(engine, -6.37e-8)", 0);
+ toInteger.insert("QScriptValue(engine, 0x43211234)", 1126240820);
+ toInteger.insert("QScriptValue(engine, 0x10000)", 65536);
+ toInteger.insert("QScriptValue(engine, 0x10001)", 65537);
+ toInteger.insert("QScriptValue(engine, qSNaN())", 0);
+ toInteger.insert("QScriptValue(engine, qQNaN())", 0);
+ toInteger.insert("QScriptValue(engine, qInf())", qInf());
+ toInteger.insert("QScriptValue(engine, -qInf())", qInf());
+ toInteger.insert("QScriptValue(engine, \"NaN\")", 0);
+ toInteger.insert("QScriptValue(engine, \"Infinity\")", qInf());
+ toInteger.insert("QScriptValue(engine, \"-Infinity\")", qInf());
+ toInteger.insert("QScriptValue(engine, \"ciao\")", 0);
+ toInteger.insert("QScriptValue(engine, QString::fromLatin1(\"ciao\"))", 0);
+ toInteger.insert("QScriptValue(engine, QString(\"\"))", 0);
+ toInteger.insert("QScriptValue(engine, QString())", 0);
+ toInteger.insert("QScriptValue(engine, QString(\"0\"))", 0);
+ toInteger.insert("QScriptValue(engine, QString(\"123\"))", 123);
+ toInteger.insert("QScriptValue(engine, QString(\"1.23\"))", 1);
+ toInteger.insert("engine->evaluate(\"[]\")", 0);
+ toInteger.insert("engine->evaluate(\"{}\")", 0);
+ toInteger.insert("engine->evaluate(\"Object.prototype\")", 0);
+ toInteger.insert("engine->evaluate(\"Date.prototype\")", 0);
+ toInteger.insert("engine->evaluate(\"Array.prototype\")", 0);
+ toInteger.insert("engine->evaluate(\"Function.prototype\")", 0);
+ toInteger.insert("engine->evaluate(\"Error.prototype\")", 0);
+ toInteger.insert("engine->evaluate(\"Object\")", 0);
+ toInteger.insert("engine->evaluate(\"Array\")", 0);
+ toInteger.insert("engine->evaluate(\"Number\")", 0);
+ toInteger.insert("engine->evaluate(\"Function\")", 0);
+ toInteger.insert("engine->evaluate(\"(function() { return 1; })\")", 0);
+ toInteger.insert("engine->evaluate(\"(function() { return 'ciao'; })\")", 0);
+ toInteger.insert("engine->evaluate(\"(function() { throw new Error('foo'); })\")", 0);
+ toInteger.insert("engine->evaluate(\"/foo/\")", 0);
+ toInteger.insert("engine->evaluate(\"new Object()\")", 0);
+ toInteger.insert("engine->evaluate(\"new Array()\")", 0);
+ toInteger.insert("engine->evaluate(\"new Error()\")", 0);
+ }
+ newRow(expr) << toInteger.value(expr);
+}
+
+void tst_QScriptValue::toInteger_test(const char*, const QScriptValue& value)
+{
+ QFETCH(qsreal, expected);
+ if (qIsInf(expected)) {
+ QVERIFY(qIsInf(value.toInteger()));
+ return;
+ }
+ QCOMPARE(value.toInteger(), expected);
+}
+
+DEFINE_TEST_FUNCTION(toInteger)
+
+
+void tst_QScriptValue::toInt32_initData()
+{
+ QTest::addColumn<qint32>("expected");
+ initScriptValues();
+}
+
+void tst_QScriptValue::toInt32_makeData(const char* expr)
+{
+ static QHash<QString, qint32> toInt32;
+ if (toInt32.isEmpty()) {
+ toInt32.insert("QScriptValue()", 0);
+ toInt32.insert("QScriptValue(QScriptValue::UndefinedValue)", 0);
+ toInt32.insert("QScriptValue(QScriptValue::NullValue)", 0);
+ toInt32.insert("QScriptValue(true)", 1);
+ toInt32.insert("QScriptValue(false)", 0);
+ toInt32.insert("QScriptValue(int(122))", 122);
+ toInt32.insert("QScriptValue(uint(124))", 124);
+ toInt32.insert("QScriptValue(0)", 0);
+ toInt32.insert("QScriptValue(0.0)", 0);
+ toInt32.insert("QScriptValue(123.0)", 123);
+ toInt32.insert("QScriptValue(6.37e-8)", 0);
+ toInt32.insert("QScriptValue(-6.37e-8)", 0);
+ toInt32.insert("QScriptValue(0x43211234)", 1126240820);
+ toInt32.insert("QScriptValue(0x10000)", 65536);
+ toInt32.insert("QScriptValue(0x10001)", 65537);
+ toInt32.insert("QScriptValue(qSNaN())", 0);
+ toInt32.insert("QScriptValue(qQNaN())", 0);
+ toInt32.insert("QScriptValue(qInf())", 0);
+ toInt32.insert("QScriptValue(-qInf())", 0);
+ toInt32.insert("QScriptValue(\"NaN\")", 0);
+ toInt32.insert("QScriptValue(\"Infinity\")", 0);
+ toInt32.insert("QScriptValue(\"-Infinity\")", 0);
+ toInt32.insert("QScriptValue(\"ciao\")", 0);
+ toInt32.insert("QScriptValue(QString::fromLatin1(\"ciao\"))", 0);
+ toInt32.insert("QScriptValue(QString(\"\"))", 0);
+ toInt32.insert("QScriptValue(QString())", 0);
+ toInt32.insert("QScriptValue(QString(\"0\"))", 0);
+ toInt32.insert("QScriptValue(QString(\"123\"))", 123);
+ toInt32.insert("QScriptValue(QString(\"12.4\"))", 12);
+ toInt32.insert("QScriptValue(0, QScriptValue::UndefinedValue)", 0);
+ toInt32.insert("QScriptValue(0, QScriptValue::NullValue)", 0);
+ toInt32.insert("QScriptValue(0, true)", 1);
+ toInt32.insert("QScriptValue(0, false)", 0);
+ toInt32.insert("QScriptValue(0, int(122))", 122);
+ toInt32.insert("QScriptValue(0, uint(124))", 124);
+ toInt32.insert("QScriptValue(0, 0)", 0);
+ toInt32.insert("QScriptValue(0, 0.0)", 0);
+ toInt32.insert("QScriptValue(0, 123.0)", 123);
+ toInt32.insert("QScriptValue(0, 6.37e-8)", 0);
+ toInt32.insert("QScriptValue(0, -6.37e-8)", 0);
+ toInt32.insert("QScriptValue(0, 0x43211234)", 1126240820);
+ toInt32.insert("QScriptValue(0, 0x10000)", 65536);
+ toInt32.insert("QScriptValue(0, 0x10001)", 65537);
+ toInt32.insert("QScriptValue(0, qSNaN())", 0);
+ toInt32.insert("QScriptValue(0, qQNaN())", 0);
+ toInt32.insert("QScriptValue(0, qInf())", 0);
+ toInt32.insert("QScriptValue(0, -qInf())", 0);
+ toInt32.insert("QScriptValue(0, \"NaN\")", 0);
+ toInt32.insert("QScriptValue(0, \"Infinity\")", 0);
+ toInt32.insert("QScriptValue(0, \"-Infinity\")", 0);
+ toInt32.insert("QScriptValue(0, \"ciao\")", 0);
+ toInt32.insert("QScriptValue(0, QString::fromLatin1(\"ciao\"))", 0);
+ toInt32.insert("QScriptValue(0, QString(\"\"))", 0);
+ toInt32.insert("QScriptValue(0, QString())", 0);
+ toInt32.insert("QScriptValue(0, QString(\"0\"))", 0);
+ toInt32.insert("QScriptValue(0, QString(\"123\"))", 123);
+ toInt32.insert("QScriptValue(0, QString(\"12.3\"))", 12);
+ toInt32.insert("QScriptValue(engine, QScriptValue::UndefinedValue)", 0);
+ toInt32.insert("QScriptValue(engine, QScriptValue::NullValue)", 0);
+ toInt32.insert("QScriptValue(engine, true)", 1);
+ toInt32.insert("QScriptValue(engine, false)", 0);
+ toInt32.insert("QScriptValue(engine, int(122))", 122);
+ toInt32.insert("QScriptValue(engine, uint(124))", 124);
+ toInt32.insert("QScriptValue(engine, 0)", 0);
+ toInt32.insert("QScriptValue(engine, 0.0)", 0);
+ toInt32.insert("QScriptValue(engine, 123.0)", 123);
+ toInt32.insert("QScriptValue(engine, 6.37e-8)", 0);
+ toInt32.insert("QScriptValue(engine, -6.37e-8)", 0);
+ toInt32.insert("QScriptValue(engine, 0x43211234)", 1126240820);
+ toInt32.insert("QScriptValue(engine, 0x10000)", 65536);
+ toInt32.insert("QScriptValue(engine, 0x10001)", 65537);
+ toInt32.insert("QScriptValue(engine, qSNaN())", 0);
+ toInt32.insert("QScriptValue(engine, qQNaN())", 0);
+ toInt32.insert("QScriptValue(engine, qInf())", 0);
+ toInt32.insert("QScriptValue(engine, -qInf())", 0);
+ toInt32.insert("QScriptValue(engine, \"NaN\")", 0);
+ toInt32.insert("QScriptValue(engine, \"Infinity\")", 0);
+ toInt32.insert("QScriptValue(engine, \"-Infinity\")", 0);
+ toInt32.insert("QScriptValue(engine, \"ciao\")", 0);
+ toInt32.insert("QScriptValue(engine, QString::fromLatin1(\"ciao\"))", 0);
+ toInt32.insert("QScriptValue(engine, QString(\"\"))", 0);
+ toInt32.insert("QScriptValue(engine, QString())", 0);
+ toInt32.insert("QScriptValue(engine, QString(\"0\"))", 0);
+ toInt32.insert("QScriptValue(engine, QString(\"123\"))", 123);
+ toInt32.insert("QScriptValue(engine, QString(\"1.23\"))", 1);
+ toInt32.insert("engine->evaluate(\"[]\")", 0);
+ toInt32.insert("engine->evaluate(\"{}\")", 0);
+ toInt32.insert("engine->evaluate(\"Object.prototype\")", 0);
+ toInt32.insert("engine->evaluate(\"Date.prototype\")", 0);
+ toInt32.insert("engine->evaluate(\"Array.prototype\")", 0);
+ toInt32.insert("engine->evaluate(\"Function.prototype\")", 0);
+ toInt32.insert("engine->evaluate(\"Error.prototype\")", 0);
+ toInt32.insert("engine->evaluate(\"Object\")", 0);
+ toInt32.insert("engine->evaluate(\"Array\")", 0);
+ toInt32.insert("engine->evaluate(\"Number\")", 0);
+ toInt32.insert("engine->evaluate(\"Function\")", 0);
+ toInt32.insert("engine->evaluate(\"(function() { return 1; })\")", 0);
+ toInt32.insert("engine->evaluate(\"(function() { return 'ciao'; })\")", 0);
+ toInt32.insert("engine->evaluate(\"(function() { throw new Error('foo'); })\")", 0);
+ toInt32.insert("engine->evaluate(\"/foo/\")", 0);
+ toInt32.insert("engine->evaluate(\"new Object()\")", 0);
+ toInt32.insert("engine->evaluate(\"new Array()\")", 0);
+ toInt32.insert("engine->evaluate(\"new Error()\")", 0);
+ }
+ newRow(expr) << toInt32.value(expr);
+}
+
+void tst_QScriptValue::toInt32_test(const char*, const QScriptValue& value)
+{
+ QFETCH(qint32, expected);
+ QCOMPARE(value.toInt32(), expected);
+}
+
+DEFINE_TEST_FUNCTION(toInt32)
+
+
+void tst_QScriptValue::toUInt32_initData()
+{
+ QTest::addColumn<quint32>("expected");
+ initScriptValues();
+}
+
+void tst_QScriptValue::toUInt32_makeData(const char* expr)
+{
+ static QHash<QString, quint32> toUInt32;
+ if (toUInt32.isEmpty()) {
+ toUInt32.insert("QScriptValue()", 0);
+ toUInt32.insert("QScriptValue(QScriptValue::UndefinedValue)", 0);
+ toUInt32.insert("QScriptValue(QScriptValue::NullValue)", 0);
+ toUInt32.insert("QScriptValue(true)", 1);
+ toUInt32.insert("QScriptValue(false)", 0);
+ toUInt32.insert("QScriptValue(int(122))", 122);
+ toUInt32.insert("QScriptValue(uint(124))", 124);
+ toUInt32.insert("QScriptValue(0)", 0);
+ toUInt32.insert("QScriptValue(0.0)", 0);
+ toUInt32.insert("QScriptValue(123.0)", 123);
+ toUInt32.insert("QScriptValue(6.37e-8)", 0);
+ toUInt32.insert("QScriptValue(-6.37e-8)", 0);
+ toUInt32.insert("QScriptValue(0x43211234)", 1126240820);
+ toUInt32.insert("QScriptValue(0x10000)", 65536);
+ toUInt32.insert("QScriptValue(0x10001)", 65537);
+ toUInt32.insert("QScriptValue(qSNaN())", 0);
+ toUInt32.insert("QScriptValue(qQNaN())", 0);
+ toUInt32.insert("QScriptValue(qInf())", 0);
+ toUInt32.insert("QScriptValue(-qInf())", 0);
+ toUInt32.insert("QScriptValue(\"NaN\")", 0);
+ toUInt32.insert("QScriptValue(\"Infinity\")", 0);
+ toUInt32.insert("QScriptValue(\"-Infinity\")", 0);
+ toUInt32.insert("QScriptValue(\"ciao\")", 0);
+ toUInt32.insert("QScriptValue(QString::fromLatin1(\"ciao\"))", 0);
+ toUInt32.insert("QScriptValue(QString(\"\"))", 0);
+ toUInt32.insert("QScriptValue(QString())", 0);
+ toUInt32.insert("QScriptValue(QString(\"0\"))", 0);
+ toUInt32.insert("QScriptValue(QString(\"123\"))", 123);
+ toUInt32.insert("QScriptValue(QString(\"12.4\"))", 12);
+ toUInt32.insert("QScriptValue(0, QScriptValue::UndefinedValue)", 0);
+ toUInt32.insert("QScriptValue(0, QScriptValue::NullValue)", 0);
+ toUInt32.insert("QScriptValue(0, true)", 1);
+ toUInt32.insert("QScriptValue(0, false)", 0);
+ toUInt32.insert("QScriptValue(0, int(122))", 122);
+ toUInt32.insert("QScriptValue(0, uint(124))", 124);
+ toUInt32.insert("QScriptValue(0, 0)", 0);
+ toUInt32.insert("QScriptValue(0, 0.0)", 0);
+ toUInt32.insert("QScriptValue(0, 123.0)", 123);
+ toUInt32.insert("QScriptValue(0, 6.37e-8)", 0);
+ toUInt32.insert("QScriptValue(0, -6.37e-8)", 0);
+ toUInt32.insert("QScriptValue(0, 0x43211234)", 1126240820);
+ toUInt32.insert("QScriptValue(0, 0x10000)", 65536);
+ toUInt32.insert("QScriptValue(0, 0x10001)", 65537);
+ toUInt32.insert("QScriptValue(0, qSNaN())", 0);
+ toUInt32.insert("QScriptValue(0, qQNaN())", 0);
+ toUInt32.insert("QScriptValue(0, qInf())", 0);
+ toUInt32.insert("QScriptValue(0, -qInf())", 0);
+ toUInt32.insert("QScriptValue(0, \"NaN\")", 0);
+ toUInt32.insert("QScriptValue(0, \"Infinity\")", 0);
+ toUInt32.insert("QScriptValue(0, \"-Infinity\")", 0);
+ toUInt32.insert("QScriptValue(0, \"ciao\")", 0);
+ toUInt32.insert("QScriptValue(0, QString::fromLatin1(\"ciao\"))", 0);
+ toUInt32.insert("QScriptValue(0, QString(\"\"))", 0);
+ toUInt32.insert("QScriptValue(0, QString())", 0);
+ toUInt32.insert("QScriptValue(0, QString(\"0\"))", 0);
+ toUInt32.insert("QScriptValue(0, QString(\"123\"))", 123);
+ toUInt32.insert("QScriptValue(0, QString(\"12.3\"))", 12);
+ toUInt32.insert("QScriptValue(engine, QScriptValue::UndefinedValue)", 0);
+ toUInt32.insert("QScriptValue(engine, QScriptValue::NullValue)", 0);
+ toUInt32.insert("QScriptValue(engine, true)", 1);
+ toUInt32.insert("QScriptValue(engine, false)", 0);
+ toUInt32.insert("QScriptValue(engine, int(122))", 122);
+ toUInt32.insert("QScriptValue(engine, uint(124))", 124);
+ toUInt32.insert("QScriptValue(engine, 0)", 0);
+ toUInt32.insert("QScriptValue(engine, 0.0)", 0);
+ toUInt32.insert("QScriptValue(engine, 123.0)", 123);
+ toUInt32.insert("QScriptValue(engine, 6.37e-8)", 0);
+ toUInt32.insert("QScriptValue(engine, -6.37e-8)", 0);
+ toUInt32.insert("QScriptValue(engine, 0x43211234)", 1126240820);
+ toUInt32.insert("QScriptValue(engine, 0x10000)", 65536);
+ toUInt32.insert("QScriptValue(engine, 0x10001)", 65537);
+ toUInt32.insert("QScriptValue(engine, qSNaN())", 0);
+ toUInt32.insert("QScriptValue(engine, qQNaN())", 0);
+ toUInt32.insert("QScriptValue(engine, qInf())", 0);
+ toUInt32.insert("QScriptValue(engine, -qInf())", 0);
+ toUInt32.insert("QScriptValue(engine, \"NaN\")", 0);
+ toUInt32.insert("QScriptValue(engine, \"Infinity\")", 0);
+ toUInt32.insert("QScriptValue(engine, \"-Infinity\")", 0);
+ toUInt32.insert("QScriptValue(engine, \"ciao\")", 0);
+ toUInt32.insert("QScriptValue(engine, QString::fromLatin1(\"ciao\"))", 0);
+ toUInt32.insert("QScriptValue(engine, QString(\"\"))", 0);
+ toUInt32.insert("QScriptValue(engine, QString())", 0);
+ toUInt32.insert("QScriptValue(engine, QString(\"0\"))", 0);
+ toUInt32.insert("QScriptValue(engine, QString(\"123\"))", 123);
+ toUInt32.insert("QScriptValue(engine, QString(\"1.23\"))", 1);
+ toUInt32.insert("engine->evaluate(\"[]\")", 0);
+ toUInt32.insert("engine->evaluate(\"{}\")", 0);
+ toUInt32.insert("engine->evaluate(\"Object.prototype\")", 0);
+ toUInt32.insert("engine->evaluate(\"Date.prototype\")", 0);
+ toUInt32.insert("engine->evaluate(\"Array.prototype\")", 0);
+ toUInt32.insert("engine->evaluate(\"Function.prototype\")", 0);
+ toUInt32.insert("engine->evaluate(\"Error.prototype\")", 0);
+ toUInt32.insert("engine->evaluate(\"Object\")", 0);
+ toUInt32.insert("engine->evaluate(\"Array\")", 0);
+ toUInt32.insert("engine->evaluate(\"Number\")", 0);
+ toUInt32.insert("engine->evaluate(\"Function\")", 0);
+ toUInt32.insert("engine->evaluate(\"(function() { return 1; })\")", 0);
+ toUInt32.insert("engine->evaluate(\"(function() { return 'ciao'; })\")", 0);
+ toUInt32.insert("engine->evaluate(\"(function() { throw new Error('foo'); })\")", 0);
+ toUInt32.insert("engine->evaluate(\"/foo/\")", 0);
+ toUInt32.insert("engine->evaluate(\"new Object()\")", 0);
+ toUInt32.insert("engine->evaluate(\"new Array()\")", 0);
+ toUInt32.insert("engine->evaluate(\"new Error()\")", 0);
+ }
+ newRow(expr) << toUInt32.value(expr);
+}
+
+void tst_QScriptValue::toUInt32_test(const char*, const QScriptValue& value)
+{
+ QFETCH(quint32, expected);
+ QCOMPARE(value.toUInt32(), expected);
+}
+
+DEFINE_TEST_FUNCTION(toUInt32)
+
+
+void tst_QScriptValue::toUInt16_initData()
+{
+ QTest::addColumn<quint16>("expected");
+ initScriptValues();
+}
+
+void tst_QScriptValue::toUInt16_makeData(const char* expr)
+{
+ static QHash<QString, quint16> toUInt16;
+ if (toUInt16.isEmpty()) {
+ toUInt16.insert("QScriptValue()", 0);
+ toUInt16.insert("QScriptValue(QScriptValue::UndefinedValue)", 0);
+ toUInt16.insert("QScriptValue(QScriptValue::NullValue)", 0);
+ toUInt16.insert("QScriptValue(true)", 1);
+ toUInt16.insert("QScriptValue(false)", 0);
+ toUInt16.insert("QScriptValue(int(122))", 122);
+ toUInt16.insert("QScriptValue(uint(124))", 124);
+ toUInt16.insert("QScriptValue(0)", 0);
+ toUInt16.insert("QScriptValue(0.0)", 0);
+ toUInt16.insert("QScriptValue(123.0)", 123);
+ toUInt16.insert("QScriptValue(6.37e-8)", 0);
+ toUInt16.insert("QScriptValue(-6.37e-8)", 0);
+ toUInt16.insert("QScriptValue(0x43211234)", 4660);
+ toUInt16.insert("QScriptValue(0x10000)", 0);
+ toUInt16.insert("QScriptValue(0x10001)", 1);
+ toUInt16.insert("QScriptValue(qSNaN())", 0);
+ toUInt16.insert("QScriptValue(qQNaN())", 0);
+ toUInt16.insert("QScriptValue(qInf())", 0);
+ toUInt16.insert("QScriptValue(-qInf())", 0);
+ toUInt16.insert("QScriptValue(\"NaN\")", 0);
+ toUInt16.insert("QScriptValue(\"Infinity\")", 0);
+ toUInt16.insert("QScriptValue(\"-Infinity\")", 0);
+ toUInt16.insert("QScriptValue(\"ciao\")", 0);
+ toUInt16.insert("QScriptValue(QString::fromLatin1(\"ciao\"))", 0);
+ toUInt16.insert("QScriptValue(QString(\"\"))", 0);
+ toUInt16.insert("QScriptValue(QString())", 0);
+ toUInt16.insert("QScriptValue(QString(\"0\"))", 0);
+ toUInt16.insert("QScriptValue(QString(\"123\"))", 123);
+ toUInt16.insert("QScriptValue(QString(\"12.4\"))", 12);
+ toUInt16.insert("QScriptValue(0, QScriptValue::UndefinedValue)", 0);
+ toUInt16.insert("QScriptValue(0, QScriptValue::NullValue)", 0);
+ toUInt16.insert("QScriptValue(0, true)", 1);
+ toUInt16.insert("QScriptValue(0, false)", 0);
+ toUInt16.insert("QScriptValue(0, int(122))", 122);
+ toUInt16.insert("QScriptValue(0, uint(124))", 124);
+ toUInt16.insert("QScriptValue(0, 0)", 0);
+ toUInt16.insert("QScriptValue(0, 0.0)", 0);
+ toUInt16.insert("QScriptValue(0, 123.0)", 123);
+ toUInt16.insert("QScriptValue(0, 6.37e-8)", 0);
+ toUInt16.insert("QScriptValue(0, -6.37e-8)", 0);
+ toUInt16.insert("QScriptValue(0, 0x43211234)", 4660);
+ toUInt16.insert("QScriptValue(0, 0x10000)", 0);
+ toUInt16.insert("QScriptValue(0, 0x10001)", 1);
+ toUInt16.insert("QScriptValue(0, qSNaN())", 0);
+ toUInt16.insert("QScriptValue(0, qQNaN())", 0);
+ toUInt16.insert("QScriptValue(0, qInf())", 0);
+ toUInt16.insert("QScriptValue(0, -qInf())", 0);
+ toUInt16.insert("QScriptValue(0, \"NaN\")", 0);
+ toUInt16.insert("QScriptValue(0, \"Infinity\")", 0);
+ toUInt16.insert("QScriptValue(0, \"-Infinity\")", 0);
+ toUInt16.insert("QScriptValue(0, \"ciao\")", 0);
+ toUInt16.insert("QScriptValue(0, QString::fromLatin1(\"ciao\"))", 0);
+ toUInt16.insert("QScriptValue(0, QString(\"\"))", 0);
+ toUInt16.insert("QScriptValue(0, QString())", 0);
+ toUInt16.insert("QScriptValue(0, QString(\"0\"))", 0);
+ toUInt16.insert("QScriptValue(0, QString(\"123\"))", 123);
+ toUInt16.insert("QScriptValue(0, QString(\"12.3\"))", 12);
+ toUInt16.insert("QScriptValue(engine, QScriptValue::UndefinedValue)", 0);
+ toUInt16.insert("QScriptValue(engine, QScriptValue::NullValue)", 0);
+ toUInt16.insert("QScriptValue(engine, true)", 1);
+ toUInt16.insert("QScriptValue(engine, false)", 0);
+ toUInt16.insert("QScriptValue(engine, int(122))", 122);
+ toUInt16.insert("QScriptValue(engine, uint(124))", 124);
+ toUInt16.insert("QScriptValue(engine, 0)", 0);
+ toUInt16.insert("QScriptValue(engine, 0.0)", 0);
+ toUInt16.insert("QScriptValue(engine, 123.0)", 123);
+ toUInt16.insert("QScriptValue(engine, 6.37e-8)", 0);
+ toUInt16.insert("QScriptValue(engine, -6.37e-8)", 0);
+ toUInt16.insert("QScriptValue(engine, 0x43211234)", 4660);
+ toUInt16.insert("QScriptValue(engine, 0x10000)", 0);
+ toUInt16.insert("QScriptValue(engine, 0x10001)", 1);
+ toUInt16.insert("QScriptValue(engine, qSNaN())", 0);
+ toUInt16.insert("QScriptValue(engine, qQNaN())", 0);
+ toUInt16.insert("QScriptValue(engine, qInf())", 0);
+ toUInt16.insert("QScriptValue(engine, -qInf())", 0);
+ toUInt16.insert("QScriptValue(engine, \"NaN\")", 0);
+ toUInt16.insert("QScriptValue(engine, \"Infinity\")", 0);
+ toUInt16.insert("QScriptValue(engine, \"-Infinity\")", 0);
+ toUInt16.insert("QScriptValue(engine, \"ciao\")", 0);
+ toUInt16.insert("QScriptValue(engine, QString::fromLatin1(\"ciao\"))", 0);
+ toUInt16.insert("QScriptValue(engine, QString(\"\"))", 0);
+ toUInt16.insert("QScriptValue(engine, QString())", 0);
+ toUInt16.insert("QScriptValue(engine, QString(\"0\"))", 0);
+ toUInt16.insert("QScriptValue(engine, QString(\"123\"))", 123);
+ toUInt16.insert("QScriptValue(engine, QString(\"1.23\"))", 1);
+ toUInt16.insert("engine->evaluate(\"[]\")", 0);
+ toUInt16.insert("engine->evaluate(\"{}\")", 0);
+ toUInt16.insert("engine->evaluate(\"Object.prototype\")", 0);
+ toUInt16.insert("engine->evaluate(\"Date.prototype\")", 0);
+ toUInt16.insert("engine->evaluate(\"Array.prototype\")", 0);
+ toUInt16.insert("engine->evaluate(\"Function.prototype\")", 0);
+ toUInt16.insert("engine->evaluate(\"Error.prototype\")", 0);
+ toUInt16.insert("engine->evaluate(\"Object\")", 0);
+ toUInt16.insert("engine->evaluate(\"Array\")", 0);
+ toUInt16.insert("engine->evaluate(\"Number\")", 0);
+ toUInt16.insert("engine->evaluate(\"Function\")", 0);
+ toUInt16.insert("engine->evaluate(\"(function() { return 1; })\")", 0);
+ toUInt16.insert("engine->evaluate(\"(function() { return 'ciao'; })\")", 0);
+ toUInt16.insert("engine->evaluate(\"(function() { throw new Error('foo'); })\")", 0);
+ toUInt16.insert("engine->evaluate(\"/foo/\")", 0);
+ toUInt16.insert("engine->evaluate(\"new Object()\")", 0);
+ toUInt16.insert("engine->evaluate(\"new Array()\")", 0);
+ toUInt16.insert("engine->evaluate(\"new Error()\")", 0);
+ }
+ newRow(expr) << toUInt16.value(expr);
+}
+
+void tst_QScriptValue::toUInt16_test(const char*, const QScriptValue& value)
+{
+ QFETCH(quint16, expected);
+ QCOMPARE(value.toUInt16(), expected);
+}
+
+DEFINE_TEST_FUNCTION(toUInt16)
diff --git a/JavaScriptCore/runtime/ArrayPrototype.cpp b/JavaScriptCore/runtime/ArrayPrototype.cpp
index b64abad..6d79581 100644
--- a/JavaScriptCore/runtime/ArrayPrototype.cpp
+++ b/JavaScriptCore/runtime/ArrayPrototype.cpp
@@ -201,7 +201,7 @@ JSValue JSC_HOST_CALL arrayProtoFuncToString(ExecState* exec, JSObject*, JSValue
if (i)
buffer.append(',');
if (RefPtr<UString::Rep> rep = strBuffer[i])
- buffer.append(rep->data(), rep->size());
+ buffer.append(rep->data(), rep->length());
}
ASSERT(buffer.size() == totalSize);
return jsString(exec, UString::adopt(buffer));
diff --git a/JavaScriptCore/runtime/Identifier.cpp b/JavaScriptCore/runtime/Identifier.cpp
index 97929e2..46fcd0b 100644
--- a/JavaScriptCore/runtime/Identifier.cpp
+++ b/JavaScriptCore/runtime/Identifier.cpp
@@ -79,7 +79,7 @@ void deleteIdentifierTable(IdentifierTable* table)
bool Identifier::equal(const UString::Rep* r, const char* s)
{
- int length = r->size();
+ int length = r->length();
const UChar* d = r->data();
for (int i = 0; i != length; ++i)
if (d[i] != (unsigned char)s[i])
@@ -87,12 +87,12 @@ bool Identifier::equal(const UString::Rep* r, const char* s)
return s[length] == 0;
}
-bool Identifier::equal(const UString::Rep* r, const UChar* s, int length)
+bool Identifier::equal(const UString::Rep* r, const UChar* s, unsigned length)
{
- if (r->size() != length)
+ if (r->length() != length)
return false;
const UChar* d = r->data();
- for (int i = 0; i != length; ++i)
+ for (unsigned i = 0; i != length; ++i)
if (d[i] != s[i])
return false;
return true;
@@ -209,7 +209,7 @@ PassRefPtr<UString::Rep> Identifier::add(ExecState* exec, const UChar* s, int le
PassRefPtr<UString::Rep> Identifier::addSlowCase(JSGlobalData* globalData, UString::Rep* r)
{
ASSERT(!r->isIdentifier());
- if (r->size() == 1) {
+ if (r->length() == 1) {
UChar c = r->data()[0];
if (c <= 0xFF)
r = globalData->smallStrings.singleCharacterStringRep(c);
@@ -220,7 +220,7 @@ PassRefPtr<UString::Rep> Identifier::addSlowCase(JSGlobalData* globalData, UStri
return r;
}
}
- if (!r->size()) {
+ if (!r->length()) {
UString::Rep::empty().hash();
return &UString::Rep::empty();
}
diff --git a/JavaScriptCore/runtime/Identifier.h b/JavaScriptCore/runtime/Identifier.h
index 1d1bd18..73e2af8 100644
--- a/JavaScriptCore/runtime/Identifier.h
+++ b/JavaScriptCore/runtime/Identifier.h
@@ -76,7 +76,7 @@ namespace JSC {
static void remove(UString::Rep*);
static bool equal(const UString::Rep*, const char*);
- static bool equal(const UString::Rep*, const UChar*, int length);
+ static bool equal(const UString::Rep*, const UChar*, unsigned length);
static bool equal(const UString::Rep* a, const UString::Rep* b) { return JSC::equal(a, b); }
static PassRefPtr<UString::Rep> add(ExecState*, const char*); // Only to be used with string literals.
diff --git a/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp b/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp
index 3ddac7c..0e1fbee 100644
--- a/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp
+++ b/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp
@@ -381,7 +381,7 @@ JSValue JSC_HOST_CALL globalFuncEscape(ExecState* exec, JSObject*, JSValue, cons
JSStringBuilder builder;
UString str = args.at(0).toString(exec);
const UChar* c = str.data();
- for (int k = 0; k < str.size(); k++, c++) {
+ for (unsigned k = 0; k < str.size(); k++, c++) {
int u = c[0];
if (u > 255) {
char tmp[7];
diff --git a/JavaScriptCore/runtime/JSONObject.cpp b/JavaScriptCore/runtime/JSONObject.cpp
index acd9280..d69a8da 100644
--- a/JavaScriptCore/runtime/JSONObject.cpp
+++ b/JavaScriptCore/runtime/JSONObject.cpp
@@ -135,7 +135,7 @@ static inline JSValue unwrapBoxedPrimitive(ExecState* exec, JSValue value)
static inline UString gap(ExecState* exec, JSValue space)
{
- const int maxGapLength = 10;
+ const unsigned maxGapLength = 10;
space = unwrapBoxedPrimitive(exec, space);
// If the space value is a number, create a gap string with that number of spaces.
@@ -456,7 +456,7 @@ inline bool Stringifier::willIndent() const
inline void Stringifier::indent()
{
// Use a single shared string, m_repeatedGap, so we don't keep allocating new ones as we indent and unindent.
- int newSize = m_indent.size() + m_gap.size();
+ unsigned newSize = m_indent.size() + m_gap.size();
if (newSize > m_repeatedGap.size())
m_repeatedGap = makeString(m_repeatedGap, m_gap);
ASSERT(newSize <= m_repeatedGap.size());
diff --git a/JavaScriptCore/runtime/JSObject.cpp b/JavaScriptCore/runtime/JSObject.cpp
index d9500aa..61d3bb1 100644
--- a/JavaScriptCore/runtime/JSObject.cpp
+++ b/JavaScriptCore/runtime/JSObject.cpp
@@ -516,9 +516,12 @@ void JSObject::putDirectFunctionWithoutTransition(ExecState* exec, InternalFunct
NEVER_INLINE void JSObject::fillGetterPropertySlot(PropertySlot& slot, JSValue* location)
{
- if (JSObject* getterFunction = asGetterSetter(*location)->getter())
- slot.setGetterSlot(getterFunction);
- else
+ if (JSObject* getterFunction = asGetterSetter(*location)->getter()) {
+ if (!structure()->isDictionary())
+ slot.setCacheableGetterSlot(this, getterFunction, offsetForLocation(location));
+ else
+ slot.setGetterSlot(getterFunction);
+ } else
slot.setUndefined();
}
diff --git a/JavaScriptCore/runtime/JSString.cpp b/JavaScriptCore/runtime/JSString.cpp
index 1e23a15..a72457e 100644
--- a/JavaScriptCore/runtime/JSString.cpp
+++ b/JavaScriptCore/runtime/JSString.cpp
@@ -31,48 +31,13 @@
namespace JSC {
-void JSString::Rope::destructNonRecursive()
-{
- Vector<Rope*, 32> workQueue;
- Rope* rope = this;
-
- while (true) {
- unsigned length = rope->ropeLength();
- for (unsigned i = 0; i < length; ++i) {
- Fiber& fiber = rope->fibers(i);
- if (fiber.isString())
- fiber.string()->deref();
- else {
- Rope* nextRope = fiber.rope();
- if (nextRope->hasOneRef())
- workQueue.append(nextRope);
- else
- nextRope->deref();
- }
- }
- if (rope != this)
- fastFree(rope);
-
- if (workQueue.isEmpty())
- return;
-
- rope = workQueue.last();
- workQueue.removeLast();
- }
-}
-
-JSString::Rope::~Rope()
-{
- destructNonRecursive();
-}
-
// Overview: this methods converts a JSString from holding a string in rope form
// down to a simple UString representation. It does so by building up the string
// backwards, since we want to avoid recursion, we expect that the tree structure
// representing the rope is likely imbalanced with more nodes down the left side
// (since appending to the string is likely more common) - and as such resolving
// in this fashion should minimize work queue size. (If we built the queue forwards
-// we would likely have to place all of the constituent UString::Reps into the
+// we would likely have to place all of the constituent UStringImpls into the
// Vector before performing any concatenation, but by working backwards we likely
// only fill the queue with the number of substrings at any given level in a
// rope-of-ropes.)
@@ -82,39 +47,39 @@ void JSString::resolveRope(ExecState* exec) const
// Allocate the buffer to hold the final string, position initially points to the end.
UChar* buffer;
- if (PassRefPtr<UStringImpl> newImpl = UStringImpl::tryCreateUninitialized(m_stringLength, buffer))
+ if (PassRefPtr<UStringImpl> newImpl = UStringImpl::tryCreateUninitialized(m_length, buffer))
m_value = newImpl;
else {
- for (unsigned i = 0; i < m_ropeLength; ++i) {
- m_fibers[i].deref();
- m_fibers[i] = static_cast<void*>(0);
+ for (unsigned i = 0; i < m_fiberCount; ++i) {
+ m_other.m_fibers[i]->deref();
+ m_other.m_fibers[i] = 0;
}
- m_ropeLength = 0;
+ m_fiberCount = 0;
ASSERT(!isRope());
ASSERT(m_value == UString());
throwOutOfMemoryError(exec);
return;
}
- UChar* position = buffer + m_stringLength;
+ UChar* position = buffer + m_length;
// Start with the current Rope.
Vector<Rope::Fiber, 32> workQueue;
Rope::Fiber currentFiber;
- for (unsigned i = 0; i < (m_ropeLength - 1); ++i)
- workQueue.append(m_fibers[i]);
- currentFiber = m_fibers[m_ropeLength - 1];
+ for (unsigned i = 0; i < (m_fiberCount - 1); ++i)
+ workQueue.append(m_other.m_fibers[i]);
+ currentFiber = m_other.m_fibers[m_fiberCount - 1];
while (true) {
- if (currentFiber.isRope()) {
- Rope* rope = currentFiber.rope();
+ if (currentFiber->isRope()) {
+ Rope* rope = static_cast<URopeImpl*>(currentFiber);
// Copy the contents of the current rope into the workQueue, with the last item in 'currentFiber'
// (we will be working backwards over the rope).
- unsigned ropeLengthMinusOne = rope->ropeLength() - 1;
- for (unsigned i = 0; i < ropeLengthMinusOne; ++i)
+ unsigned fiberCountMinusOne = rope->fiberCount() - 1;
+ for (unsigned i = 0; i < fiberCountMinusOne; ++i)
workQueue.append(rope->fibers(i));
- currentFiber = rope->fibers(ropeLengthMinusOne);
+ currentFiber = rope->fibers(fiberCountMinusOne);
} else {
- UString::Rep* string = currentFiber.string();
- unsigned length = string->size();
+ UStringImpl* string = static_cast<UStringImpl*>(currentFiber);
+ unsigned length = string->length();
position -= length;
UStringImpl::copyChars(position, string->data(), length);
@@ -122,11 +87,11 @@ void JSString::resolveRope(ExecState* exec) const
if (workQueue.isEmpty()) {
// Create a string from the UChar buffer, clear the rope RefPtr.
ASSERT(buffer == position);
- for (unsigned i = 0; i < m_ropeLength; ++i) {
- m_fibers[i].deref();
- m_fibers[i] = static_cast<void*>(0);
+ for (unsigned i = 0; i < m_fiberCount; ++i) {
+ m_other.m_fibers[i]->deref();
+ m_other.m_fibers[i] = 0;
}
- m_ropeLength = 0;
+ m_fiberCount = 0;
ASSERT(!isRope());
return;
@@ -153,7 +118,7 @@ bool JSString::getPrimitiveNumber(ExecState* exec, double& number, JSValue& resu
bool JSString::toBoolean(ExecState*) const
{
- return m_stringLength;
+ return m_length;
}
double JSString::toNumber(ExecState* exec) const
@@ -215,13 +180,13 @@ bool JSString::getOwnPropertySlot(ExecState* exec, const Identifier& propertyNam
bool JSString::getStringPropertyDescriptor(ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor)
{
if (propertyName == exec->propertyNames().length) {
- descriptor.setDescriptor(jsNumber(exec, m_stringLength), DontEnum | DontDelete | ReadOnly);
+ descriptor.setDescriptor(jsNumber(exec, m_length), DontEnum | DontDelete | ReadOnly);
return true;
}
bool isStrictUInt32;
unsigned i = propertyName.toStrictUInt32(&isStrictUInt32);
- if (isStrictUInt32 && i < m_stringLength) {
+ if (isStrictUInt32 && i < m_length) {
descriptor.setDescriptor(jsSingleCharacterSubstring(exec, value(exec), i), DontDelete | ReadOnly);
return true;
}
diff --git a/JavaScriptCore/runtime/JSString.h b/JavaScriptCore/runtime/JSString.h
index cff8e3a..0162282 100644
--- a/JavaScriptCore/runtime/JSString.h
+++ b/JavaScriptCore/runtime/JSString.h
@@ -67,109 +67,55 @@ namespace JSC {
friend class JIT;
friend class JSGlobalData;
- // A Rope is a string composed of a set of substrings.
- class Rope : public RefCounted<Rope> {
- public:
- // A Rope is composed from a set of smaller strings called Fibers.
- // Each Fiber in a rope is either UString::Rep or another Rope.
- class Fiber {
- public:
- Fiber() : m_value(0) {}
- Fiber(UString::Rep* string) : m_value(reinterpret_cast<intptr_t>(string)) {}
- Fiber(Rope* rope) : m_value(reinterpret_cast<intptr_t>(rope) | 1) {}
-
- Fiber(void* nonFiber) : m_value(reinterpret_cast<intptr_t>(nonFiber)) {}
-
- void deref()
- {
- if (isRope())
- rope()->deref();
- else
- string()->deref();
- }
-
- Fiber& ref()
- {
- if (isString())
- string()->ref();
- else
- rope()->ref();
- return *this;
- }
-
- unsigned refAndGetLength()
- {
- if (isString()) {
- UString::Rep* rep = string();
- return rep->ref()->size();
- } else {
- Rope* r = rope();
- r->ref();
- return r->stringLength();
- }
- }
-
- bool isRope() { return m_value & 1; }
- Rope* rope() { return reinterpret_cast<Rope*>(m_value & ~1); }
- bool isString() { return !isRope(); }
- UString::Rep* string() { return reinterpret_cast<UString::Rep*>(m_value); }
-
- void* nonFiber() { return reinterpret_cast<void*>(m_value); }
- private:
- intptr_t m_value;
- };
+ typedef URopeImpl Rope;
- // Creates a Rope comprising of 'ropeLength' Fibers.
- // The Rope is constructed in an uninitialized state - initialize must be called for each Fiber in the Rope.
- static PassRefPtr<Rope> createOrNull(unsigned ropeLength)
+ class RopeBuilder {
+ public:
+ RopeBuilder(unsigned fiberCount)
+ : m_index(0)
+ , m_rope(Rope::tryCreateUninitialized(fiberCount))
{
- void* allocation;
- if (tryFastMalloc(sizeof(Rope) + (ropeLength - 1) * sizeof(Fiber)).getValue(allocation))
- return adoptRef(new (allocation) Rope(ropeLength));
- return 0;
}
- ~Rope();
- void destructNonRecursive();
+ bool isOutOfMemory() { return !m_rope; }
- void append(unsigned &index, Fiber& fiber)
+ void append(Rope::Fiber& fiber)
{
- m_fibers[index++] = fiber;
- m_stringLength += fiber.refAndGetLength();
+ ASSERT(m_rope);
+ m_rope->initializeFiber(m_index, fiber);
}
- void append(unsigned &index, const UString& string)
+ void append(const UString& string)
{
- UString::Rep* rep = string.rep();
- m_fibers[index++] = Fiber(rep);
- m_stringLength += rep->ref()->size();
+ ASSERT(m_rope);
+ m_rope->initializeFiber(m_index, string.rep());
}
- void append(unsigned& index, JSString* jsString)
+ void append(JSString* jsString)
{
if (jsString->isRope()) {
- for (unsigned i = 0; i < jsString->m_ropeLength; ++i)
- append(index, jsString->m_fibers[i]);
+ for (unsigned i = 0; i < jsString->m_fiberCount; ++i)
+ append(jsString->m_other.m_fibers[i]);
} else
- append(index, jsString->string());
+ append(jsString->string());
+ }
+
+ PassRefPtr<Rope> release()
+ {
+ ASSERT(m_index == m_rope->fiberCount());
+ return m_rope.release();
}
- unsigned ropeLength() { return m_ropeLength; }
- unsigned stringLength() { return m_stringLength; }
- Fiber& fibers(unsigned index) { return m_fibers[index]; }
+ unsigned length() { return m_rope->length(); }
private:
- Rope(unsigned ropeLength) : m_ropeLength(ropeLength), m_stringLength(0) {}
- void* operator new(size_t, void* inPlace) { return inPlace; }
-
- unsigned m_ropeLength;
- unsigned m_stringLength;
- Fiber m_fibers[1];
+ unsigned m_index;
+ RefPtr<Rope> m_rope;
};
ALWAYS_INLINE JSString(JSGlobalData* globalData, const UString& value)
: JSCell(globalData->stringStructure.get())
- , m_stringLength(value.size())
+ , m_length(value.size())
, m_value(value)
- , m_ropeLength(0)
+ , m_fiberCount(0)
{
Heap::heap(this)->reportExtraMemoryCost(value.cost());
}
@@ -177,72 +123,72 @@ namespace JSC {
enum HasOtherOwnerType { HasOtherOwner };
JSString(JSGlobalData* globalData, const UString& value, HasOtherOwnerType)
: JSCell(globalData->stringStructure.get())
- , m_stringLength(value.size())
+ , m_length(value.size())
, m_value(value)
- , m_ropeLength(0)
+ , m_fiberCount(0)
{
}
JSString(JSGlobalData* globalData, PassRefPtr<UString::Rep> value, HasOtherOwnerType)
: JSCell(globalData->stringStructure.get())
- , m_stringLength(value->size())
+ , m_length(value->length())
, m_value(value)
- , m_ropeLength(0)
+ , m_fiberCount(0)
{
}
- JSString(JSGlobalData* globalData, PassRefPtr<JSString::Rope> rope)
+ JSString(JSGlobalData* globalData, PassRefPtr<Rope> rope)
: JSCell(globalData->stringStructure.get())
- , m_stringLength(rope->stringLength())
- , m_ropeLength(1)
+ , m_length(rope->length())
+ , m_fiberCount(1)
{
- m_fibers[0] = rope.releaseRef();
+ m_other.m_fibers[0] = rope.releaseRef();
}
// This constructor constructs a new string by concatenating s1 & s2.
- // This should only be called with ropeLength <= 3.
- JSString(JSGlobalData* globalData, unsigned ropeLength, JSString* s1, JSString* s2)
+ // This should only be called with fiberCount <= 3.
+ JSString(JSGlobalData* globalData, unsigned fiberCount, JSString* s1, JSString* s2)
: JSCell(globalData->stringStructure.get())
- , m_stringLength(s1->length() + s2->length())
- , m_ropeLength(ropeLength)
+ , m_length(s1->length() + s2->length())
+ , m_fiberCount(fiberCount)
{
- ASSERT(ropeLength <= s_maxInternalRopeLength);
+ ASSERT(fiberCount <= s_maxInternalRopeLength);
unsigned index = 0;
appendStringInConstruct(index, s1);
appendStringInConstruct(index, s2);
- ASSERT(ropeLength == index);
+ ASSERT(fiberCount == index);
}
// This constructor constructs a new string by concatenating s1 & s2.
- // This should only be called with ropeLength <= 3.
- JSString(JSGlobalData* globalData, unsigned ropeLength, JSString* s1, const UString& u2)
+ // This should only be called with fiberCount <= 3.
+ JSString(JSGlobalData* globalData, unsigned fiberCount, JSString* s1, const UString& u2)
: JSCell(globalData->stringStructure.get())
- , m_stringLength(s1->length() + u2.size())
- , m_ropeLength(ropeLength)
+ , m_length(s1->length() + u2.size())
+ , m_fiberCount(fiberCount)
{
- ASSERT(ropeLength <= s_maxInternalRopeLength);
+ ASSERT(fiberCount <= s_maxInternalRopeLength);
unsigned index = 0;
appendStringInConstruct(index, s1);
appendStringInConstruct(index, u2);
- ASSERT(ropeLength == index);
+ ASSERT(fiberCount == index);
}
// This constructor constructs a new string by concatenating s1 & s2.
- // This should only be called with ropeLength <= 3.
- JSString(JSGlobalData* globalData, unsigned ropeLength, const UString& u1, JSString* s2)
+ // This should only be called with fiberCount <= 3.
+ JSString(JSGlobalData* globalData, unsigned fiberCount, const UString& u1, JSString* s2)
: JSCell(globalData->stringStructure.get())
- , m_stringLength(u1.size() + s2->length())
- , m_ropeLength(ropeLength)
+ , m_length(u1.size() + s2->length())
+ , m_fiberCount(fiberCount)
{
- ASSERT(ropeLength <= s_maxInternalRopeLength);
+ ASSERT(fiberCount <= s_maxInternalRopeLength);
unsigned index = 0;
appendStringInConstruct(index, u1);
appendStringInConstruct(index, s2);
- ASSERT(ropeLength == index);
+ ASSERT(fiberCount == index);
}
// This constructor constructs a new string by concatenating v1, v2 & v3.
- // This should only be called with ropeLength <= 3 ... which since every
- // value must require a ropeLength of at least one implies that the length
+ // This should only be called with fiberCount <= 3 ... which since every
+ // value must require a fiberCount of at least one implies that the length
// for each value must be exactly 1!
JSString(ExecState* exec, JSValue v1, JSValue v2, JSValue v3)
: JSCell(exec->globalData().stringStructure.get())
- , m_stringLength(0)
- , m_ropeLength(s_maxInternalRopeLength)
+ , m_length(0)
+ , m_fiberCount(s_maxInternalRopeLength)
{
unsigned index = 0;
appendValueInConstructAndIncrementLength(exec, index, v1);
@@ -253,26 +199,24 @@ namespace JSC {
JSString(JSGlobalData* globalData, const UString& value, JSStringFinalizerCallback finalizer, void* context)
: JSCell(globalData->stringStructure.get())
- , m_stringLength(value.size())
+ , m_length(value.size())
, m_value(value)
- , m_ropeLength(0)
+ , m_fiberCount(0)
{
// nasty hack because we can't union non-POD types
- m_fibers[0] = reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(finalizer));
- m_fibers[1] = context;
+ m_other.m_finalizerCallback = finalizer;
+ m_other.m_finalizerContext = context;
Heap::heap(this)->reportExtraMemoryCost(value.cost());
}
~JSString()
{
ASSERT(vptr() == JSGlobalData::jsStringVPtr);
- for (unsigned i = 0; i < m_ropeLength; ++i)
- m_fibers[i].deref();
+ for (unsigned i = 0; i < m_fiberCount; ++i)
+ m_other.m_fibers[i]->deref();
- if (!m_ropeLength && m_fibers[0].nonFiber()) {
- JSStringFinalizerCallback finalizer = reinterpret_cast<JSStringFinalizerCallback>(m_fibers[0].nonFiber());
- finalizer(this, m_fibers[1].nonFiber());
- }
+ if (!m_fiberCount && m_other.m_finalizerCallback)
+ m_other.m_finalizerCallback(this, m_other.m_finalizerContext);
}
const UString& value(ExecState* exec) const
@@ -288,13 +232,13 @@ namespace JSC {
ASSERT(isRope() == m_value.isNull());
return m_value;
}
- unsigned length() { return m_stringLength; }
+ unsigned length() { return m_length; }
bool getStringPropertySlot(ExecState*, const Identifier& propertyName, PropertySlot&);
bool getStringPropertySlot(ExecState*, unsigned propertyName, PropertySlot&);
bool getStringPropertyDescriptor(ExecState*, const Identifier& propertyName, PropertyDescriptor&);
- bool canGetIndex(unsigned i) { return i < m_stringLength; }
+ bool canGetIndex(unsigned i) { return i < m_length; }
JSString* getIndex(ExecState*, unsigned);
static PassRefPtr<Structure> createStructure(JSValue proto) { return Structure::create(proto, TypeInfo(StringType, OverridesGetOwnPropertySlot | NeedsThisConversion), AnonymousSlotCount); }
@@ -303,7 +247,7 @@ namespace JSC {
enum VPtrStealingHackType { VPtrStealingHack };
JSString(VPtrStealingHackType)
: JSCell(0)
- , m_ropeLength(0)
+ , m_fiberCount(0)
{
}
@@ -311,14 +255,19 @@ namespace JSC {
void appendStringInConstruct(unsigned& index, const UString& string)
{
- m_fibers[index++] = Rope::Fiber(string.rep()->ref());
+ UStringImpl* impl = string.rep();
+ impl->ref();
+ m_other.m_fibers[index++] = impl;
}
void appendStringInConstruct(unsigned& index, JSString* jsString)
{
if (jsString->isRope()) {
- for (unsigned i = 0; i < jsString->m_ropeLength; ++i)
- m_fibers[index++] = jsString->m_fibers[i].ref();
+ for (unsigned i = 0; i < jsString->m_fiberCount; ++i) {
+ Rope::Fiber fiber = jsString->m_other.m_fibers[i];
+ fiber->ref();
+ m_other.m_fibers[index++] = fiber;
+ }
} else
appendStringInConstruct(index, jsString->string());
}
@@ -328,13 +277,15 @@ namespace JSC {
if (v.isString()) {
ASSERT(asCell(v)->isString());
JSString* s = static_cast<JSString*>(asCell(v));
- ASSERT(s->ropeLength() == 1);
+ ASSERT(s->fiberCount() == 1);
appendStringInConstruct(index, s);
- m_stringLength += s->length();
+ m_length += s->length();
} else {
UString u(v.toString(exec));
- m_fibers[index++] = Rope::Fiber(u.rep()->ref());
- m_stringLength += u.size();
+ UStringImpl* impl = u.rep();
+ impl->ref();
+ m_other.m_fibers[index++] = impl;
+ m_length += u.size();
}
}
@@ -357,14 +308,24 @@ namespace JSC {
static const unsigned s_maxInternalRopeLength = 3;
// A string is represented either by a UString or a Rope.
- unsigned m_stringLength;
+ unsigned m_length;
mutable UString m_value;
- mutable unsigned m_ropeLength;
- mutable Rope::Fiber m_fibers[s_maxInternalRopeLength];
+ mutable unsigned m_fiberCount;
+ // This structure exists to support a temporary workaround for a GC issue.
+ struct JSStringFinalizerStruct {
+ JSStringFinalizerStruct() : m_finalizerCallback(0) {}
+ union {
+ mutable Rope::Fiber m_fibers[s_maxInternalRopeLength];
+ struct {
+ JSStringFinalizerCallback m_finalizerCallback;
+ void* m_finalizerContext;
+ };
+ };
+ } m_other;
- bool isRope() const { return m_ropeLength; }
+ bool isRope() const { return m_fiberCount; }
UString& string() { ASSERT(!isRope()); return m_value; }
- unsigned ropeLength() { return m_ropeLength ? m_ropeLength : 1; }
+ unsigned fiberCount() { return m_fiberCount ? m_fiberCount : 1; }
friend JSValue jsString(ExecState* exec, JSString* s1, JSString* s2);
friend JSValue jsString(ExecState* exec, const UString& u1, JSString* s2);
@@ -493,13 +454,13 @@ namespace JSC {
ALWAYS_INLINE bool JSString::getStringPropertySlot(ExecState* exec, const Identifier& propertyName, PropertySlot& slot)
{
if (propertyName == exec->propertyNames().length) {
- slot.setValue(jsNumber(exec, m_stringLength));
+ slot.setValue(jsNumber(exec, m_length));
return true;
}
bool isStrictUInt32;
unsigned i = propertyName.toStrictUInt32(&isStrictUInt32);
- if (isStrictUInt32 && i < m_stringLength) {
+ if (isStrictUInt32 && i < m_length) {
slot.setValue(jsSingleCharacterSubstring(exec, value(exec), i));
return true;
}
@@ -509,7 +470,7 @@ namespace JSC {
ALWAYS_INLINE bool JSString::getStringPropertySlot(ExecState* exec, unsigned propertyName, PropertySlot& slot)
{
- if (propertyName < m_stringLength) {
+ if (propertyName < m_length) {
slot.setValue(jsSingleCharacterSubstring(exec, value(exec), propertyName));
return true;
}
diff --git a/JavaScriptCore/runtime/NumberPrototype.cpp b/JavaScriptCore/runtime/NumberPrototype.cpp
index fa32b86..5680eb1 100644
--- a/JavaScriptCore/runtime/NumberPrototype.cpp
+++ b/JavaScriptCore/runtime/NumberPrototype.cpp
@@ -265,11 +265,11 @@ JSValue JSC_HOST_CALL numberProtoFuncToFixed(ExecState* exec, JSObject*, JSValue
z.append(m);
m = z.build();
k = f + 1;
- ASSERT(k == m.size());
+ ASSERT(k == static_cast<int>(m.size()));
}
int kMinusf = k - f;
- if (kMinusf < m.size())
+ if (kMinusf < static_cast<int>(m.size()))
return jsString(exec, makeString(s, m.substr(0, kMinusf), ".", m.substr(kMinusf)));
return jsString(exec, makeString(s, m.substr(0, kMinusf)));
}
@@ -444,7 +444,7 @@ JSValue JSC_HOST_CALL numberProtoFuncToPrecision(ExecState* exec, JSObject*, JSV
if (e == precision - 1)
return jsString(exec, makeString(s, m));
if (e >= 0) {
- if (e + 1 < m.size())
+ if (e + 1 < static_cast<int>(m.size()))
return jsString(exec, makeString(s, m.substr(0, e + 1), ".", m.substr(e + 1)));
return jsString(exec, makeString(s, m));
}
diff --git a/JavaScriptCore/runtime/Operations.h b/JavaScriptCore/runtime/Operations.h
index 9b27074..cc0d603 100644
--- a/JavaScriptCore/runtime/Operations.h
+++ b/JavaScriptCore/runtime/Operations.h
@@ -37,132 +37,167 @@ namespace JSC {
ALWAYS_INLINE JSValue jsString(ExecState* exec, JSString* s1, JSString* s2)
{
- if (!s1->length())
+ unsigned length1 = s1->length();
+ if (!length1)
return s2;
- if (!s2->length())
+ unsigned length2 = s2->length();
+ if (!length2)
return s1;
+ if ((length1 + length2) < length1)
+ return throwOutOfMemoryError(exec);
- unsigned ropeLength = s1->ropeLength() + s2->ropeLength();
+ unsigned fiberCount = s1->fiberCount() + s2->fiberCount();
JSGlobalData* globalData = &exec->globalData();
- if (ropeLength <= JSString::s_maxInternalRopeLength)
- return new (globalData) JSString(globalData, ropeLength, s1, s2);
+ if (fiberCount <= JSString::s_maxInternalRopeLength)
+ return new (globalData) JSString(globalData, fiberCount, s1, s2);
- unsigned index = 0;
- RefPtr<JSString::Rope> rope = JSString::Rope::createOrNull(ropeLength);
- if (UNLIKELY(!rope))
+ JSString::RopeBuilder ropeBuilder(fiberCount);
+ if (UNLIKELY(ropeBuilder.isOutOfMemory()))
return throwOutOfMemoryError(exec);
- rope->append(index, s1);
- rope->append(index, s2);
- ASSERT(index == ropeLength);
- return new (globalData) JSString(globalData, rope.release());
+ ropeBuilder.append(s1);
+ ropeBuilder.append(s2);
+ return new (globalData) JSString(globalData, ropeBuilder.release());
}
ALWAYS_INLINE JSValue jsString(ExecState* exec, const UString& u1, JSString* s2)
{
- unsigned ropeLength = 1 + s2->ropeLength();
+ unsigned length1 = u1.size();
+ if (!length1)
+ return s2;
+ unsigned length2 = s2->length();
+ if (!length2)
+ return jsString(exec, u1);
+ if ((length1 + length2) < length1)
+ return throwOutOfMemoryError(exec);
+
+ unsigned fiberCount = 1 + s2->fiberCount();
JSGlobalData* globalData = &exec->globalData();
- if (ropeLength <= JSString::s_maxInternalRopeLength)
- return new (globalData) JSString(globalData, ropeLength, u1, s2);
+ if (fiberCount <= JSString::s_maxInternalRopeLength)
+ return new (globalData) JSString(globalData, fiberCount, u1, s2);
- unsigned index = 0;
- RefPtr<JSString::Rope> rope = JSString::Rope::createOrNull(ropeLength);
- if (UNLIKELY(!rope))
+ JSString::RopeBuilder ropeBuilder(fiberCount);
+ if (UNLIKELY(ropeBuilder.isOutOfMemory()))
return throwOutOfMemoryError(exec);
- rope->append(index, u1);
- rope->append(index, s2);
- ASSERT(index == ropeLength);
- return new (globalData) JSString(globalData, rope.release());
+ ropeBuilder.append(u1);
+ ropeBuilder.append(s2);
+ return new (globalData) JSString(globalData, ropeBuilder.release());
}
ALWAYS_INLINE JSValue jsString(ExecState* exec, JSString* s1, const UString& u2)
{
- unsigned ropeLength = s1->ropeLength() + 1;
+ unsigned length1 = s1->length();
+ if (!length1)
+ return jsString(exec, u2);
+ unsigned length2 = u2.size();
+ if (!length2)
+ return s1;
+ if ((length1 + length2) < length1)
+ return throwOutOfMemoryError(exec);
+
+ unsigned fiberCount = s1->fiberCount() + 1;
JSGlobalData* globalData = &exec->globalData();
- if (ropeLength <= JSString::s_maxInternalRopeLength)
- return new (globalData) JSString(globalData, ropeLength, s1, u2);
+ if (fiberCount <= JSString::s_maxInternalRopeLength)
+ return new (globalData) JSString(globalData, fiberCount, s1, u2);
- unsigned index = 0;
- RefPtr<JSString::Rope> rope = JSString::Rope::createOrNull(ropeLength);
- if (UNLIKELY(!rope))
+ JSString::RopeBuilder ropeBuilder(fiberCount);
+ if (UNLIKELY(ropeBuilder.isOutOfMemory()))
return throwOutOfMemoryError(exec);
- rope->append(index, s1);
- rope->append(index, u2);
- ASSERT(index == ropeLength);
- return new (globalData) JSString(globalData, rope.release());
+ ropeBuilder.append(s1);
+ ropeBuilder.append(u2);
+ return new (globalData) JSString(globalData, ropeBuilder.release());
}
ALWAYS_INLINE JSValue jsString(ExecState* exec, Register* strings, unsigned count)
{
ASSERT(count >= 3);
- unsigned ropeLength = 0;
+ unsigned fiberCount = 0;
for (unsigned i = 0; i < count; ++i) {
JSValue v = strings[i].jsValue();
if (LIKELY(v.isString()))
- ropeLength += asString(v)->ropeLength();
+ fiberCount += asString(v)->fiberCount();
else
- ++ropeLength;
+ ++fiberCount;
}
JSGlobalData* globalData = &exec->globalData();
- if (ropeLength == 3)
+ if (fiberCount == 3)
return new (globalData) JSString(exec, strings[0].jsValue(), strings[1].jsValue(), strings[2].jsValue());
- RefPtr<JSString::Rope> rope = JSString::Rope::createOrNull(ropeLength);
- if (UNLIKELY(!rope))
+ JSString::RopeBuilder ropeBuilder(fiberCount);
+ if (UNLIKELY(ropeBuilder.isOutOfMemory()))
return throwOutOfMemoryError(exec);
- unsigned index = 0;
+ unsigned length = 0;
+ bool overflow = false;
+
for (unsigned i = 0; i < count; ++i) {
JSValue v = strings[i].jsValue();
if (LIKELY(v.isString()))
- rope->append(index, asString(v));
+ ropeBuilder.append(asString(v));
else
- rope->append(index, v.toString(exec));
+ ropeBuilder.append(v.toString(exec));
+
+ unsigned newLength = ropeBuilder.length();
+ if (newLength < length)
+ overflow = true;
+ length = newLength;
}
- ASSERT(index == ropeLength);
- return new (globalData) JSString(globalData, rope.release());
+ if (overflow)
+ return throwOutOfMemoryError(exec);
+
+ return new (globalData) JSString(globalData, ropeBuilder.release());
}
ALWAYS_INLINE JSValue jsString(ExecState* exec, JSValue thisValue, const ArgList& args)
{
- unsigned ropeLength = 0;
+ unsigned fiberCount = 0;
if (LIKELY(thisValue.isString()))
- ropeLength += asString(thisValue)->ropeLength();
+ fiberCount += asString(thisValue)->fiberCount();
else
- ++ropeLength;
+ ++fiberCount;
for (unsigned i = 0; i < args.size(); ++i) {
JSValue v = args.at(i);
if (LIKELY(v.isString()))
- ropeLength += asString(v)->ropeLength();
+ fiberCount += asString(v)->fiberCount();
else
- ++ropeLength;
+ ++fiberCount;
}
- RefPtr<JSString::Rope> rope = JSString::Rope::createOrNull(ropeLength);
- if (UNLIKELY(!rope))
+ JSString::RopeBuilder ropeBuilder(fiberCount);
+ if (UNLIKELY(ropeBuilder.isOutOfMemory()))
return throwOutOfMemoryError(exec);
- unsigned index = 0;
if (LIKELY(thisValue.isString()))
- rope->append(index, asString(thisValue));
+ ropeBuilder.append(asString(thisValue));
else
- rope->append(index, thisValue.toString(exec));
+ ropeBuilder.append(thisValue.toString(exec));
+
+ unsigned length = 0;
+ bool overflow = false;
+
for (unsigned i = 0; i < args.size(); ++i) {
JSValue v = args.at(i);
if (LIKELY(v.isString()))
- rope->append(index, asString(v));
+ ropeBuilder.append(asString(v));
else
- rope->append(index, v.toString(exec));
+ ropeBuilder.append(v.toString(exec));
+
+ unsigned newLength = ropeBuilder.length();
+ if (newLength < length)
+ overflow = true;
+ length = newLength;
}
- ASSERT(index == ropeLength);
+
+ if (overflow)
+ return throwOutOfMemoryError(exec);
JSGlobalData* globalData = &exec->globalData();
- return new (globalData) JSString(globalData, rope.release());
+ return new (globalData) JSString(globalData, ropeBuilder.release());
}
// ECMA 11.9.3
diff --git a/JavaScriptCore/runtime/PropertySlot.cpp b/JavaScriptCore/runtime/PropertySlot.cpp
index a0a2f48..8b6ceb9 100644
--- a/JavaScriptCore/runtime/PropertySlot.cpp
+++ b/JavaScriptCore/runtime/PropertySlot.cpp
@@ -35,10 +35,10 @@ JSValue PropertySlot::functionGetter(ExecState* exec, const Identifier&, const P
CallData callData;
CallType callType = slot.m_data.getterFunc->getCallData(callData);
if (callType == CallTypeHost)
- return callData.native.function(exec, slot.m_data.getterFunc, slot.slotBase(), exec->emptyList());
+ return callData.native.function(exec, slot.m_data.getterFunc, slot.thisValue(), exec->emptyList());
ASSERT(callType == CallTypeJS);
// FIXME: Can this be done more efficiently using the callData?
- return asFunction(slot.m_data.getterFunc)->call(exec, slot.slotBase(), exec->emptyList());
+ return asFunction(slot.m_data.getterFunc)->call(exec, slot.thisValue(), exec->emptyList());
}
} // namespace JSC
diff --git a/JavaScriptCore/runtime/PropertySlot.h b/JavaScriptCore/runtime/PropertySlot.h
index 15d9034..a364e42 100644
--- a/JavaScriptCore/runtime/PropertySlot.h
+++ b/JavaScriptCore/runtime/PropertySlot.h
@@ -71,7 +71,9 @@ namespace JSC {
return m_getValue(exec, Identifier::from(exec, propertyName), *this);
}
- bool isCacheable() const { return m_offset != WTF::notFound; }
+ bool isGetter() const { return m_isGetter; }
+ bool isCacheable() const { return m_isCacheable; }
+ bool isCacheableValue() const { return m_isCacheable && !m_isGetter; }
size_t cachedOffset() const
{
ASSERT(isCacheable());
@@ -102,6 +104,8 @@ namespace JSC {
m_slotBase = slotBase;
m_data.valueSlot = valueSlot;
m_offset = offset;
+ m_isCacheable = true;
+ m_isGetter = false;
}
void setValue(JSValue value)
@@ -139,14 +143,28 @@ namespace JSC {
m_slotBase = slotBase;
m_data.index = index;
}
-
+
void setGetterSlot(JSObject* getterFunc)
{
ASSERT(getterFunc);
+ m_thisValue = m_slotBase;
m_getValue = functionGetter;
m_data.getterFunc = getterFunc;
+ m_isGetter = true;
}
-
+
+ void setCacheableGetterSlot(JSValue slotBase, JSObject* getterFunc, unsigned offset)
+ {
+ ASSERT(getterFunc);
+ m_getValue = functionGetter;
+ m_thisValue = m_slotBase;
+ m_slotBase = slotBase;
+ m_data.getterFunc = getterFunc;
+ m_offset = offset;
+ m_isCacheable = true;
+ m_isGetter = true;
+ }
+
void setUndefined()
{
setValue(jsUndefined());
@@ -182,11 +200,14 @@ namespace JSC {
{
// Clear offset even in release builds, in case this PropertySlot has been used before.
// (For other data members, we don't need to clear anything because reuse would meaningfully overwrite them.)
- m_offset = WTF::notFound;
+ m_offset = 0;
+ m_isCacheable = false;
+ m_isGetter = false;
}
unsigned index() const { return m_data.index; }
+ JSValue thisValue() const { return m_thisValue; }
private:
static JSValue functionGetter(ExecState*, const Identifier&, const PropertySlot&);
@@ -201,8 +222,11 @@ namespace JSC {
} m_data;
JSValue m_value;
+ JSValue m_thisValue;
size_t m_offset;
+ bool m_isCacheable : 1;
+ bool m_isGetter : 1;
};
} // namespace JSC
diff --git a/JavaScriptCore/runtime/RegExp.cpp b/JavaScriptCore/runtime/RegExp.cpp
index 4e958f4..85d41ee 100644
--- a/JavaScriptCore/runtime/RegExp.cpp
+++ b/JavaScriptCore/runtime/RegExp.cpp
@@ -71,11 +71,11 @@ inline RegExp::RegExp(JSGlobalData* globalData, const UString& pattern, const US
{
// NOTE: The global flag is handled on a case-by-case basis by functions like
// String::match and RegExpObject::match.
- if (flags.find('g') != -1)
+ if (flags.find('g') != UString::NotFound)
m_flagBits |= Global;
- if (flags.find('i') != -1)
+ if (flags.find('i') != UString::NotFound)
m_flagBits |= IgnoreCase;
- if (flags.find('m') != -1)
+ if (flags.find('m') != UString::NotFound)
m_flagBits |= Multiline;
compile(globalData);
@@ -117,7 +117,7 @@ int RegExp::match(const UString& s, int startOffset, Vector<int, 32>* ovector)
if (ovector)
ovector->clear();
- if (startOffset > s.size() || s.isNull())
+ if (static_cast<unsigned>(startOffset) > s.size() || s.isNull())
return -1;
#if ENABLE(YARR_JIT)
@@ -188,7 +188,7 @@ int RegExp::match(const UString& s, int startOffset, Vector<int, 32>* ovector)
if (ovector)
ovector->clear();
- if (startOffset > s.size() || s.isNull())
+ if (static_cast<unsigned>(startOffset) > s.size() || s.isNull())
return -1;
#if ENABLE(WREC)
diff --git a/JavaScriptCore/runtime/SmallStrings.cpp b/JavaScriptCore/runtime/SmallStrings.cpp
index d9d4377..78bd4e4 100644
--- a/JavaScriptCore/runtime/SmallStrings.cpp
+++ b/JavaScriptCore/runtime/SmallStrings.cpp
@@ -83,7 +83,7 @@ void SmallStrings::markChildren(MarkStack& markStack)
bool isAnyStringMarked = isMarked(m_emptyString);
for (unsigned i = 0; i < numCharactersToStore && !isAnyStringMarked; ++i)
- isAnyStringMarked |= isMarked(m_singleCharacterStrings[i]);
+ isAnyStringMarked = isMarked(m_singleCharacterStrings[i]);
if (!isAnyStringMarked) {
clear();
diff --git a/JavaScriptCore/runtime/StringPrototype.cpp b/JavaScriptCore/runtime/StringPrototype.cpp
index 8c014ec..bef4083 100644
--- a/JavaScriptCore/runtime/StringPrototype.cpp
+++ b/JavaScriptCore/runtime/StringPrototype.cpp
@@ -150,7 +150,7 @@ bool StringPrototype::getOwnPropertyDescriptor(ExecState* exec, const Identifier
// ------------------------------ Functions --------------------------
-static NEVER_INLINE UString substituteBackreferencesSlow(const UString& replacement, const UString& source, const int* ovector, RegExp* reg, int i)
+static NEVER_INLINE UString substituteBackreferencesSlow(const UString& replacement, const UString& source, const int* ovector, RegExp* reg, unsigned i)
{
Vector<UChar> substitutedReplacement;
int offset = 0;
@@ -206,7 +206,7 @@ static NEVER_INLINE UString substituteBackreferencesSlow(const UString& replacem
i += 1 + advance;
offset = i + 1;
substitutedReplacement.append(source.data() + backrefStart, backrefLength);
- } while ((i = replacement.find('$', i + 1)) != -1);
+ } while ((i = replacement.find('$', i + 1)) != UString::NotFound);
if (replacement.size() - offset)
substitutedReplacement.append(replacement.data() + offset, replacement.size() - offset);
@@ -217,8 +217,8 @@ static NEVER_INLINE UString substituteBackreferencesSlow(const UString& replacem
static inline UString substituteBackreferences(const UString& replacement, const UString& source, const int* ovector, RegExp* reg)
{
- int i = replacement.find('$', 0);
- if (UNLIKELY(i != -1))
+ unsigned i = replacement.find('$', 0);
+ if (UNLIKELY(i != UString::NotFound))
return substituteBackreferencesSlow(replacement, source, ovector, reg, i);
return replacement;
}
@@ -329,7 +329,7 @@ JSValue JSC_HOST_CALL stringProtoFuncReplace(ExecState* exec, JSObject*, JSValue
RegExpConstructor* regExpConstructor = exec->lexicalGlobalObject()->regExpConstructor();
int lastIndex = 0;
- int startPosition = 0;
+ unsigned startPosition = 0;
Vector<StringRange, 16> sourceRanges;
Vector<UString, 16> replacements;
@@ -432,7 +432,7 @@ JSValue JSC_HOST_CALL stringProtoFuncReplace(ExecState* exec, JSObject*, JSValue
if (!lastIndex && replacements.isEmpty())
return sourceVal;
- if (lastIndex < source.size())
+ if (static_cast<unsigned>(lastIndex) < source.size())
sourceRanges.append(StringRange(lastIndex, source.size() - lastIndex));
return jsSpliceSubstringsWithSeparators(exec, sourceVal, source, sourceRanges.data(), sourceRanges.size(), replacements.data(), replacements.size());
@@ -441,9 +441,9 @@ JSValue JSC_HOST_CALL stringProtoFuncReplace(ExecState* exec, JSObject*, JSValue
// Not a regular expression, so treat the pattern as a string.
UString patternString = pattern.toString(exec);
- int matchPos = source.find(patternString);
+ unsigned matchPos = source.find(patternString);
- if (matchPos == -1)
+ if (matchPos == UString::NotFound)
return sourceVal;
int matchLen = patternString.size();
@@ -541,7 +541,10 @@ JSValue JSC_HOST_CALL stringProtoFuncIndexOf(ExecState* exec, JSObject*, JSValue
pos = static_cast<int>(dpos);
}
- return jsNumber(exec, s.find(u2, pos));
+ unsigned result = s.find(u2, pos);
+ if (result == UString::NotFound)
+ return jsNumber(exec, -1);
+ return jsNumber(exec, result);
}
JSValue JSC_HOST_CALL stringProtoFuncLastIndexOf(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
@@ -563,7 +566,11 @@ JSValue JSC_HOST_CALL stringProtoFuncLastIndexOf(ExecState* exec, JSObject*, JSV
else if (isnan(dpos))
dpos = len;
#endif
- return jsNumber(exec, s.rfind(u2, static_cast<int>(dpos)));
+
+ unsigned result = s.rfind(u2, static_cast<unsigned>(dpos));
+ if (result == UString::NotFound)
+ return jsNumber(exec, -1);
+ return jsNumber(exec, result);
}
JSValue JSC_HOST_CALL stringProtoFuncMatch(ExecState* exec, JSObject*, JSValue thisValue, const ArgList& args)
@@ -675,7 +682,7 @@ JSValue JSC_HOST_CALL stringProtoFuncSplit(ExecState* exec, JSObject*, JSValue t
JSArray* result = constructEmptyArray(exec);
unsigned i = 0;
- int p0 = 0;
+ unsigned p0 = 0;
unsigned limit = a1.isUndefined() ? 0xFFFFFFFFU : a1.toUInt32(exec);
if (a0.inherits(&RegExpObject::info)) {
RegExp* reg = asRegExpObject(a0)->regExp();
@@ -683,7 +690,7 @@ JSValue JSC_HOST_CALL stringProtoFuncSplit(ExecState* exec, JSObject*, JSValue t
// empty string matched by regexp -> empty array
return result;
}
- int pos = 0;
+ unsigned pos = 0;
while (i != limit && pos < s.size()) {
Vector<int, 32> ovector;
int mpos = reg->match(s, pos, &ovector);
@@ -691,7 +698,7 @@ JSValue JSC_HOST_CALL stringProtoFuncSplit(ExecState* exec, JSObject*, JSValue t
break;
int mlen = ovector[1] - ovector[0];
pos = mpos + (mlen == 0 ? 1 : mlen);
- if (mpos != p0 || mlen) {
+ if (static_cast<unsigned>(mpos) != p0 || mlen) {
result->put(exec, i++, jsSubstring(exec, s, p0, mpos - p0));
p0 = mpos + mlen;
}
@@ -713,8 +720,9 @@ JSValue JSC_HOST_CALL stringProtoFuncSplit(ExecState* exec, JSObject*, JSValue t
while (i != limit && p0 < s.size() - 1)
result->put(exec, i++, jsSingleCharacterSubstring(exec, s, p0++));
} else {
- int pos;
- while (i != limit && (pos = s.find(u2, p0)) >= 0) {
+ unsigned pos;
+
+ while (i != limit && (pos = s.find(u2, p0)) != UString::NotFound) {
result->put(exec, i++, jsSubstring(exec, s, p0, pos - p0));
p0 = pos + u2.size();
}
@@ -1022,12 +1030,12 @@ static inline bool isTrimWhitespace(UChar c)
static inline JSValue trimString(ExecState* exec, JSValue thisValue, int trimKind)
{
UString str = thisValue.toThisString(exec);
- int left = 0;
+ unsigned left = 0;
if (trimKind & TrimLeft) {
while (left < str.size() && isTrimWhitespace(str[left]))
left++;
}
- int right = str.size();
+ unsigned right = str.size();
if (trimKind & TrimRight) {
while (right > left && isTrimWhitespace(str[right - 1]))
right--;
diff --git a/JavaScriptCore/runtime/Structure.cpp b/JavaScriptCore/runtime/Structure.cpp
index 546e2bf..ebf8a4c 100644
--- a/JavaScriptCore/runtime/Structure.cpp
+++ b/JavaScriptCore/runtime/Structure.cpp
@@ -79,6 +79,106 @@ static HashSet<Structure*>& liveStructureSet = *(new HashSet<Structure*>);
static int comparePropertyMapEntryIndices(const void* a, const void* b);
+inline void Structure::setTransitionTable(TransitionTable* table)
+{
+ ASSERT(m_isUsingSingleSlot);
+#ifndef NDEBUG
+ setSingleTransition(0);
+#endif
+ m_isUsingSingleSlot = false;
+ m_transitions.m_table = table;
+ // This implicitly clears the flag that indicates we're using a single transition
+ ASSERT(!m_isUsingSingleSlot);
+}
+
+// The contains and get methods accept imprecise matches, so if an unspecialised transition exists
+// for the given key they will consider that transition to be a match. If a specialised transition
+// exists and it matches the provided specificValue, get will return the specific transition.
+inline bool Structure::transitionTableContains(const StructureTransitionTableHash::Key& key, JSCell* specificValue)
+{
+ if (m_isUsingSingleSlot) {
+ Structure* existingTransition = singleTransition();
+ return existingTransition && existingTransition->m_nameInPrevious.get() == key.first
+ && existingTransition->m_attributesInPrevious == key.second
+ && (existingTransition->m_specificValueInPrevious == specificValue || existingTransition->m_specificValueInPrevious == 0);
+ }
+ TransitionTable::iterator find = transitionTable()->find(key);
+ if (find == transitionTable()->end())
+ return false;
+
+ return find->second.first || find->second.second->transitionedFor(specificValue);
+}
+
+inline Structure* Structure::transitionTableGet(const StructureTransitionTableHash::Key& key, JSCell* specificValue) const
+{
+ if (m_isUsingSingleSlot) {
+ Structure* existingTransition = singleTransition();
+ if (existingTransition && existingTransition->m_nameInPrevious.get() == key.first
+ && existingTransition->m_attributesInPrevious == key.second
+ && (existingTransition->m_specificValueInPrevious == specificValue || existingTransition->m_specificValueInPrevious == 0))
+ return existingTransition;
+ return 0;
+ }
+
+ Transition transition = transitionTable()->get(key);
+ if (transition.second && transition.second->transitionedFor(specificValue))
+ return transition.second;
+ return transition.first;
+}
+
+inline bool Structure::transitionTableHasTransition(const StructureTransitionTableHash::Key& key) const
+{
+ if (m_isUsingSingleSlot) {
+ Structure* transition = singleTransition();
+ return transition && transition->m_nameInPrevious == key.first
+ && transition->m_attributesInPrevious == key.second;
+ }
+ return transitionTable()->contains(key);
+}
+
+inline void Structure::transitionTableRemove(const StructureTransitionTableHash::Key& key, JSCell* specificValue)
+{
+ if (m_isUsingSingleSlot) {
+ ASSERT(transitionTableContains(key, specificValue));
+ setSingleTransition(0);
+ return;
+ }
+ TransitionTable::iterator find = transitionTable()->find(key);
+ if (!specificValue)
+ find->second.first = 0;
+ else
+ find->second.second = 0;
+ if (!find->second.first && !find->second.second)
+ transitionTable()->remove(find);
+}
+
+inline void Structure::transitionTableAdd(const StructureTransitionTableHash::Key& key, Structure* structure, JSCell* specificValue)
+{
+ if (m_isUsingSingleSlot) {
+ if (!singleTransition()) {
+ setSingleTransition(structure);
+ return;
+ }
+ Structure* existingTransition = singleTransition();
+ TransitionTable* transitionTable = new TransitionTable;
+ setTransitionTable(transitionTable);
+ if (existingTransition)
+ transitionTableAdd(std::make_pair(existingTransition->m_nameInPrevious.get(), existingTransition->m_attributesInPrevious), existingTransition, existingTransition->m_specificValueInPrevious);
+ }
+ if (!specificValue) {
+ TransitionTable::iterator find = transitionTable()->find(key);
+ if (find == transitionTable()->end())
+ transitionTable()->add(key, Transition(structure, 0));
+ else
+ find->second.first = structure;
+ } else {
+ // If we're adding a transition to a specific value, then there cannot be
+ // an existing transition
+ ASSERT(!transitionTable()->contains(key));
+ transitionTable()->add(key, Transition(0, structure));
+ }
+}
+
void Structure::dumpStatistics()
{
#if DUMP_STRUCTURE_ID_STATISTICS
@@ -136,7 +236,10 @@ Structure::Structure(JSValue prototype, const TypeInfo& typeInfo, unsigned anony
, m_attributesInPrevious(0)
, m_specificFunctionThrashCount(0)
, m_anonymousSlotCount(anonymousSlotCount)
+ , m_isUsingSingleSlot(true)
{
+ m_transitions.m_singleTransition = 0;
+
ASSERT(m_prototype);
ASSERT(m_prototype.isObject() || m_prototype.isNull());
@@ -159,7 +262,7 @@ Structure::~Structure()
{
if (m_previous) {
ASSERT(m_nameInPrevious);
- m_previous->table.remove(make_pair(m_nameInPrevious.get(), m_attributesInPrevious), m_specificValueInPrevious);
+ m_previous->transitionTableRemove(make_pair(m_nameInPrevious.get(), m_attributesInPrevious), m_specificValueInPrevious);
}
@@ -177,6 +280,9 @@ Structure::~Structure()
fastFree(m_propertyTable);
}
+ if (!m_isUsingSingleSlot)
+ delete transitionTable();
+
#ifndef NDEBUG
#if ENABLE(JSC_MULTIPLE_THREADS)
MutexLocker protect(ignoreSetMutex);
@@ -340,7 +446,7 @@ PassRefPtr<Structure> Structure::addPropertyTransitionToExistingStructure(Struct
ASSERT(!structure->isDictionary());
ASSERT(structure->typeInfo().type() == ObjectType);
- if (Structure* existingTransition = structure->table.get(make_pair(propertyName.ustring().rep(), attributes), specificValue)) {
+ if (Structure* existingTransition = structure->transitionTableGet(make_pair(propertyName.ustring().rep(), attributes), specificValue)) {
ASSERT(existingTransition->m_offset != noOffset);
offset = existingTransition->m_offset + existingTransition->m_anonymousSlotCount;
ASSERT(offset >= structure->m_anonymousSlotCount);
@@ -405,7 +511,7 @@ PassRefPtr<Structure> Structure::addPropertyTransition(Structure* structure, con
transition->m_offset = offset - structure->m_anonymousSlotCount;
ASSERT(structure->anonymousSlotCount() == transition->anonymousSlotCount());
- structure->table.add(make_pair(propertyName.ustring().rep(), attributes), transition.get(), specificValue);
+ structure->transitionTableAdd(make_pair(propertyName.ustring().rep(), attributes), transition.get(), specificValue);
return transition.release();
}
@@ -852,7 +958,7 @@ size_t Structure::put(const Identifier& propertyName, unsigned attributes, JSCel
bool Structure::hasTransition(UString::Rep* rep, unsigned attributes)
{
- return table.hasTransition(make_pair(rep, attributes));
+ return transitionTableHasTransition(make_pair(rep, attributes));
}
size_t Structure::remove(const Identifier& propertyName)
diff --git a/JavaScriptCore/runtime/Structure.h b/JavaScriptCore/runtime/Structure.h
index 95cf94c..968443a 100644
--- a/JavaScriptCore/runtime/Structure.h
+++ b/JavaScriptCore/runtime/Structure.h
@@ -179,6 +179,20 @@ namespace JSC {
// Since the number of transitions is always the same as m_offset, we keep the size of Structure down by not storing both.
return m_offset == noOffset ? 0 : m_offset + 1;
}
+
+ typedef std::pair<Structure*, Structure*> Transition;
+ typedef HashMap<StructureTransitionTableHash::Key, Transition, StructureTransitionTableHash, StructureTransitionTableHashTraits> TransitionTable;
+
+ inline bool transitionTableContains(const StructureTransitionTableHash::Key& key, JSCell* specificValue);
+ inline void transitionTableRemove(const StructureTransitionTableHash::Key& key, JSCell* specificValue);
+ inline void transitionTableAdd(const StructureTransitionTableHash::Key& key, Structure* structure, JSCell* specificValue);
+ inline bool transitionTableHasTransition(const StructureTransitionTableHash::Key& key) const;
+ inline Structure* transitionTableGet(const StructureTransitionTableHash::Key& key, JSCell* specificValue) const;
+
+ TransitionTable* transitionTable() const { ASSERT(!m_isUsingSingleSlot); return m_transitions.m_table; }
+ inline void setTransitionTable(TransitionTable* table);
+ Structure* singleTransition() const { ASSERT(m_isUsingSingleSlot); return m_transitions.m_singleTransition; }
+ void setSingleTransition(Structure* structure) { ASSERT(m_isUsingSingleSlot); m_transitions.m_singleTransition = structure; }
bool isValid(ExecState*, StructureChain* cachedPrototypeChain) const;
@@ -199,7 +213,11 @@ namespace JSC {
RefPtr<UString::Rep> m_nameInPrevious;
JSCell* m_specificValueInPrevious;
- StructureTransitionTable table;
+ // 'm_isUsingSingleSlot' indicates whether we are using the single transition optimisation.
+ union {
+ TransitionTable* m_table;
+ Structure* m_singleTransition;
+ } m_transitions;
WeakGCPtr<JSPropertyNameIterator> m_enumerationCache;
@@ -224,7 +242,8 @@ namespace JSC {
#endif
unsigned m_specificFunctionThrashCount : 2;
unsigned m_anonymousSlotCount : 5;
- // 5 free bits
+ unsigned m_isUsingSingleSlot : 1;
+ // 4 free bits
};
inline size_t Structure::get(const Identifier& propertyName)
@@ -271,58 +290,7 @@ namespace JSC {
return m_propertyTable->entries()[entryIndex - 1].offset;
}
}
-
- bool StructureTransitionTable::contains(const StructureTransitionTableHash::Key& key, JSCell* specificValue)
- {
- if (usingSingleTransitionSlot()) {
- Structure* existingTransition = singleTransition();
- return existingTransition && existingTransition->m_nameInPrevious.get() == key.first
- && existingTransition->m_attributesInPrevious == key.second
- && (existingTransition->m_specificValueInPrevious == specificValue || existingTransition->m_specificValueInPrevious == 0);
- }
- TransitionTable::iterator find = table()->find(key);
- if (find == table()->end())
- return false;
-
- return find->second.first || find->second.second->transitionedFor(specificValue);
- }
- Structure* StructureTransitionTable::get(const StructureTransitionTableHash::Key& key, JSCell* specificValue) const
- {
- if (usingSingleTransitionSlot()) {
- Structure* existingTransition = singleTransition();
- if (existingTransition && existingTransition->m_nameInPrevious.get() == key.first
- && existingTransition->m_attributesInPrevious == key.second
- && (existingTransition->m_specificValueInPrevious == specificValue || existingTransition->m_specificValueInPrevious == 0))
- return existingTransition;
- return 0;
- }
-
- Transition transition = table()->get(key);
- if (transition.second && transition.second->transitionedFor(specificValue))
- return transition.second;
- return transition.first;
- }
-
- bool StructureTransitionTable::hasTransition(const StructureTransitionTableHash::Key& key) const
- {
- if (usingSingleTransitionSlot()) {
- Structure* transition = singleTransition();
- return transition && transition->m_nameInPrevious == key.first
- && transition->m_attributesInPrevious == key.second;
- }
- return table()->contains(key);
- }
-
- void StructureTransitionTable::reifySingleTransition()
- {
- ASSERT(usingSingleTransitionSlot());
- Structure* existingTransition = singleTransition();
- TransitionTable* transitionTable = new TransitionTable;
- setTransitionTable(transitionTable);
- if (existingTransition)
- add(std::make_pair(existingTransition->m_nameInPrevious.get(), existingTransition->m_attributesInPrevious), existingTransition, existingTransition->m_specificValueInPrevious);
- }
} // namespace JSC
#endif // Structure_h
diff --git a/JavaScriptCore/runtime/StructureTransitionTable.h b/JavaScriptCore/runtime/StructureTransitionTable.h
index 320dbdd..d1dc2d9 100644
--- a/JavaScriptCore/runtime/StructureTransitionTable.h
+++ b/JavaScriptCore/runtime/StructureTransitionTable.h
@@ -30,7 +30,6 @@
#include <wtf/HashFunctions.h>
#include <wtf/HashMap.h>
#include <wtf/HashTraits.h>
-#include <wtf/PtrAndFlags.h>
#include <wtf/OwnPtr.h>
#include <wtf/RefPtr.h>
@@ -67,99 +66,6 @@ namespace JSC {
static bool isDeletedValue(const TraitType& value) { return FirstTraits::isDeletedValue(value.first); }
};
- class StructureTransitionTable {
- typedef std::pair<Structure*, Structure*> Transition;
- typedef HashMap<StructureTransitionTableHash::Key, Transition, StructureTransitionTableHash, StructureTransitionTableHashTraits> TransitionTable;
- public:
- StructureTransitionTable() {
- m_transitions.m_singleTransition.set(0);
- m_transitions.m_singleTransition.setFlag(usingSingleSlot);
- }
-
- ~StructureTransitionTable() {
- if (!usingSingleTransitionSlot())
- delete table();
- }
-
- // The contains and get methods accept imprecise matches, so if an unspecialised transition exists
- // for the given key they will consider that transition to be a match. If a specialised transition
- // exists and it matches the provided specificValue, get will return the specific transition.
- inline bool contains(const StructureTransitionTableHash::Key&, JSCell* specificValue);
- inline Structure* get(const StructureTransitionTableHash::Key&, JSCell* specificValue) const;
- inline bool hasTransition(const StructureTransitionTableHash::Key& key) const;
- void remove(const StructureTransitionTableHash::Key& key, JSCell* specificValue)
- {
- if (usingSingleTransitionSlot()) {
- ASSERT(contains(key, specificValue));
- setSingleTransition(0);
- return;
- }
- TransitionTable::iterator find = table()->find(key);
- if (!specificValue)
- find->second.first = 0;
- else
- find->second.second = 0;
- if (!find->second.first && !find->second.second)
- table()->remove(find);
- }
- void add(const StructureTransitionTableHash::Key& key, Structure* structure, JSCell* specificValue)
- {
- if (usingSingleTransitionSlot()) {
- if (!singleTransition()) {
- setSingleTransition(structure);
- return;
- }
- reifySingleTransition();
- }
- if (!specificValue) {
- TransitionTable::iterator find = table()->find(key);
- if (find == table()->end())
- table()->add(key, Transition(structure, 0));
- else
- find->second.first = structure;
- } else {
- // If we're adding a transition to a specific value, then there cannot be
- // an existing transition
- ASSERT(!table()->contains(key));
- table()->add(key, Transition(0, structure));
- }
- }
-
- private:
- TransitionTable* table() const { ASSERT(!usingSingleTransitionSlot()); return m_transitions.m_table; }
- Structure* singleTransition() const {
- ASSERT(usingSingleTransitionSlot());
- return m_transitions.m_singleTransition.get();
- }
- bool usingSingleTransitionSlot() const { return m_transitions.m_singleTransition.isFlagSet(usingSingleSlot); }
- void setSingleTransition(Structure* structure)
- {
- ASSERT(usingSingleTransitionSlot());
- m_transitions.m_singleTransition.set(structure);
- }
-
- void setTransitionTable(TransitionTable* table)
- {
- ASSERT(usingSingleTransitionSlot());
-#ifndef NDEBUG
- setSingleTransition(0);
-#endif
- m_transitions.m_table = table;
- // This implicitly clears the flag that indicates we're using a single transition
- ASSERT(!usingSingleTransitionSlot());
- }
- inline void reifySingleTransition();
-
- enum UsingSingleSlot {
- usingSingleSlot
- };
- // Last bit indicates whether we are using the single transition optimisation
- union {
- TransitionTable* m_table;
- PtrAndFlagsBase<Structure, UsingSingleSlot> m_singleTransition;
- } m_transitions;
- };
-
} // namespace JSC
#endif // StructureTransitionTable_h
diff --git a/JavaScriptCore/runtime/UString.cpp b/JavaScriptCore/runtime/UString.cpp
index 4a89a23..1684ec2 100644
--- a/JavaScriptCore/runtime/UString.cpp
+++ b/JavaScriptCore/runtime/UString.cpp
@@ -167,12 +167,12 @@ UString::UString(const char* c)
{
}
-UString::UString(const char* c, int length)
+UString::UString(const char* c, unsigned length)
: m_rep(Rep::create(c, length))
{
}
-UString::UString(const UChar* c, int length)
+UString::UString(const UChar* c, unsigned length)
{
if (length == 0)
m_rep = &Rep::empty();
@@ -206,7 +206,7 @@ UString UString::from(int i)
*--p = '-';
}
- return UString(p, static_cast<int>(end - p));
+ return UString(p, static_cast<unsigned>(end - p));
}
UString UString::from(long long i)
@@ -239,7 +239,7 @@ UString UString::from(long long i)
*--p = '-';
}
- return UString(p, static_cast<int>(end - p));
+ return UString(p, static_cast<unsigned>(end - p));
}
UString UString::from(unsigned int u)
@@ -257,7 +257,7 @@ UString UString::from(unsigned int u)
}
}
- return UString(p, static_cast<int>(end - p));
+ return UString(p, static_cast<unsigned>(end - p));
}
UString UString::from(long l)
@@ -286,7 +286,7 @@ UString UString::from(long l)
*--p = '-';
}
- return UString(p, static_cast<int>(end - p));
+ return UString(p, end - p);
}
UString UString::from(double d)
@@ -299,8 +299,8 @@ UString UString::from(double d)
bool UString::getCString(CStringBuffer& buffer) const
{
- int length = size();
- int neededSize = length + 1;
+ unsigned length = size();
+ unsigned neededSize = length + 1;
buffer.resize(neededSize);
char* buf = buffer.data();
@@ -324,8 +324,8 @@ char* UString::ascii() const
{
static char* asciiBuffer = 0;
- int length = size();
- int neededSize = length + 1;
+ unsigned length = size();
+ unsigned neededSize = length + 1;
delete[] asciiBuffer;
asciiBuffer = new char[neededSize];
@@ -355,7 +355,7 @@ bool UString::is8Bit() const
return true;
}
-UChar UString::operator[](int pos) const
+UChar UString::operator[](unsigned pos) const
{
if (pos >= size())
return '\0';
@@ -495,7 +495,7 @@ uint32_t UString::toStrictUInt32(bool* ok) const
*ok = false;
// Empty string is not OK.
- int len = m_rep->size();
+ unsigned len = m_rep->length();
if (len == 0)
return 0;
const UChar* p = m_rep->data();
@@ -539,12 +539,9 @@ uint32_t UString::toStrictUInt32(bool* ok) const
}
}
-int UString::find(const UString& f, int pos) const
+unsigned UString::find(const UString& f, unsigned pos) const
{
- int fsz = f.size();
-
- if (pos < 0)
- pos = 0;
+ unsigned fsz = f.size();
if (fsz == 1) {
UChar ch = f[0];
@@ -553,16 +550,16 @@ int UString::find(const UString& f, int pos) const
if (*c == ch)
return static_cast<int>(c - data());
}
- return -1;
+ return NotFound;
}
- int sz = size();
+ unsigned sz = size();
if (sz < fsz)
- return -1;
+ return NotFound;
if (fsz == 0)
return pos;
const UChar* end = data() + sz - fsz;
- int fsizeminusone = (fsz - 1) * sizeof(UChar);
+ unsigned fsizeminusone = (fsz - 1) * sizeof(UChar);
const UChar* fdata = f.data();
unsigned short fchar = fdata[0];
++fdata;
@@ -571,48 +568,44 @@ int UString::find(const UString& f, int pos) const
return static_cast<int>(c - data());
}
- return -1;
+ return NotFound;
}
-int UString::find(UChar ch, int pos) const
+unsigned UString::find(UChar ch, unsigned pos) const
{
- if (pos < 0)
- pos = 0;
const UChar* end = data() + size();
for (const UChar* c = data() + pos; c < end; c++) {
if (*c == ch)
return static_cast<int>(c - data());
}
- return -1;
+ return NotFound;
}
-int UString::rfind(const UString& f, int pos) const
+unsigned UString::rfind(const UString& f, unsigned pos) const
{
- int sz = size();
- int fsz = f.size();
+ unsigned sz = size();
+ unsigned fsz = f.size();
if (sz < fsz)
- return -1;
- if (pos < 0)
- pos = 0;
+ return NotFound;
if (pos > sz - fsz)
pos = sz - fsz;
if (fsz == 0)
return pos;
- int fsizeminusone = (fsz - 1) * sizeof(UChar);
+ unsigned fsizeminusone = (fsz - 1) * sizeof(UChar);
const UChar* fdata = f.data();
for (const UChar* c = data() + pos; c >= data(); c--) {
if (*c == *fdata && !memcmp(c + 1, fdata + 1, fsizeminusone))
return static_cast<int>(c - data());
}
- return -1;
+ return NotFound;
}
-int UString::rfind(UChar ch, int pos) const
+unsigned UString::rfind(UChar ch, unsigned pos) const
{
if (isEmpty())
- return -1;
+ return NotFound;
if (pos + 1 >= size())
pos = size() - 1;
for (const UChar* c = data() + pos; c >= data(); c--) {
@@ -620,21 +613,18 @@ int UString::rfind(UChar ch, int pos) const
return static_cast<int>(c - data());
}
- return -1;
+ return NotFound;
}
-UString UString::substr(int pos, int len) const
+UString UString::substr(unsigned pos, unsigned len) const
{
- int s = size();
+ unsigned s = size();
- if (pos < 0)
- pos = 0;
- else if (pos >= s)
+ if (pos >= s)
pos = s;
- if (len < 0)
- len = s;
- if (pos + len >= s)
- len = s - pos;
+ unsigned limit = s - pos;
+ if (len > limit)
+ len = limit;
if (pos == 0 && len == s)
return *this;
@@ -661,12 +651,12 @@ bool operator==(const UString& s1, const char *s2)
bool operator<(const UString& s1, const UString& s2)
{
- const int l1 = s1.size();
- const int l2 = s2.size();
- const int lmin = l1 < l2 ? l1 : l2;
+ const unsigned l1 = s1.size();
+ const unsigned l2 = s2.size();
+ const unsigned lmin = l1 < l2 ? l1 : l2;
const UChar* c1 = s1.data();
const UChar* c2 = s2.data();
- int l = 0;
+ unsigned l = 0;
while (l < lmin && *c1 == *c2) {
c1++;
c2++;
@@ -680,12 +670,12 @@ bool operator<(const UString& s1, const UString& s2)
bool operator>(const UString& s1, const UString& s2)
{
- const int l1 = s1.size();
- const int l2 = s2.size();
- const int lmin = l1 < l2 ? l1 : l2;
+ const unsigned l1 = s1.size();
+ const unsigned l2 = s2.size();
+ const unsigned lmin = l1 < l2 ? l1 : l2;
const UChar* c1 = s1.data();
const UChar* c2 = s2.data();
- int l = 0;
+ unsigned l = 0;
while (l < lmin && *c1 == *c2) {
c1++;
c2++;
@@ -699,12 +689,12 @@ bool operator>(const UString& s1, const UString& s2)
int compare(const UString& s1, const UString& s2)
{
- const int l1 = s1.size();
- const int l2 = s2.size();
- const int lmin = l1 < l2 ? l1 : l2;
+ const unsigned l1 = s1.size();
+ const unsigned l2 = s2.size();
+ const unsigned lmin = l1 < l2 ? l1 : l2;
const UChar* c1 = s1.data();
const UChar* c2 = s2.data();
- int l = 0;
+ unsigned l = 0;
while (l < lmin && *c1 == *c2) {
c1++;
c2++;
@@ -722,12 +712,12 @@ int compare(const UString& s1, const UString& s2)
bool equal(const UString::Rep* r, const UString::Rep* b)
{
- int length = r->size();
- if (length != b->size())
+ unsigned length = r->length();
+ if (length != b->length())
return false;
const UChar* d = r->data();
const UChar* s = b->data();
- for (int i = 0; i != length; ++i) {
+ for (unsigned i = 0; i != length; ++i) {
if (d[i] != s[i])
return false;
}
@@ -737,7 +727,7 @@ bool equal(const UString::Rep* r, const UString::Rep* b)
CString UString::UTF8String(bool strict) const
{
// Allocate a buffer big enough to hold all the characters.
- const int length = size();
+ const unsigned length = size();
Vector<char, 1024> buffer(length * 3);
// Convert to runs of 8-bit characters.
diff --git a/JavaScriptCore/runtime/UString.h b/JavaScriptCore/runtime/UString.h
index 7d9ec49..75b43b7 100644
--- a/JavaScriptCore/runtime/UString.h
+++ b/JavaScriptCore/runtime/UString.h
@@ -31,7 +31,6 @@
#include <wtf/CrossThreadRefCounted.h>
#include <wtf/OwnFastMallocPtr.h>
#include <wtf/PassRefPtr.h>
-#include <wtf/PtrAndFlags.h>
#include <wtf/RefPtr.h>
#include <wtf/Vector.h>
#include <wtf/unicode/Unicode.h>
@@ -83,8 +82,8 @@ namespace JSC {
public:
UString();
UString(const char*); // Constructor for null-terminated string.
- UString(const char*, int length);
- UString(const UChar*, int length);
+ UString(const char*, unsigned length);
+ UString(const UChar*, unsigned length);
UString(const Vector<UChar>& buffer);
UString(const UString& s)
@@ -133,13 +132,13 @@ namespace JSC {
const UChar* data() const { return m_rep->data(); }
bool isNull() const { return m_rep == s_nullRep; }
- bool isEmpty() const { return !m_rep->size(); }
+ bool isEmpty() const { return !m_rep->length(); }
bool is8Bit() const;
- int size() const { return m_rep->size(); }
+ unsigned size() const { return m_rep->length(); }
- UChar operator[](int pos) const;
+ UChar operator[](unsigned pos) const;
double toDouble(bool tolerateTrailingJunk, bool tolerateEmptyString) const;
double toDouble(bool tolerateTrailingJunk) const;
@@ -151,12 +150,13 @@ namespace JSC {
unsigned toArrayIndex(bool* ok = 0) const;
- int find(const UString& f, int pos = 0) const;
- int find(UChar, int pos = 0) const;
- int rfind(const UString& f, int pos) const;
- int rfind(UChar, int pos) const;
+ static const unsigned NotFound = 0xFFFFFFFFu;
+ unsigned find(const UString& f, unsigned pos = 0) const;
+ unsigned find(UChar, unsigned pos = 0) const;
+ unsigned rfind(const UString& f, unsigned pos) const;
+ unsigned rfind(UChar, unsigned pos) const;
- UString substr(int pos = 0, int len = -1) const;
+ UString substr(unsigned pos = 0, unsigned len = 0xFFFFFFFF) const;
static const UString& null() { return *s_nullUString; }
@@ -182,7 +182,7 @@ namespace JSC {
ALWAYS_INLINE bool operator==(const UString& s1, const UString& s2)
{
- int size = s1.size();
+ unsigned size = s1.size();
switch (size) {
case 0:
return !s2.size();
@@ -246,9 +246,7 @@ namespace JSC {
// We'd rather not do shared substring append for small strings, since
// this runs too much risk of a tiny initial string holding down a
// huge buffer.
- // FIXME: this should be size_t but that would cause warnings until we
- // fix UString sizes to be size_t instead of int
- static const int minShareSize = Heap::minExtraCost / sizeof(UChar);
+ static const unsigned minShareSize = Heap::minExtraCost / sizeof(UChar);
struct IdentifierRepHash : PtrHash<RefPtr<JSC::UString::Rep> > {
static unsigned hash(const RefPtr<JSC::UString::Rep>& key) { return key->existingHash(); }
@@ -327,6 +325,14 @@ namespace JSC {
unsigned m_length;
};
+ inline void sumWithOverflow(unsigned& total, unsigned addend, bool& overflow)
+ {
+ unsigned oldTotal = total;
+ total = oldTotal + addend;
+ if (total < oldTotal)
+ overflow = true;
+ }
+
template<typename StringType1, typename StringType2>
PassRefPtr<UStringImpl> tryMakeString(StringType1 string1, StringType2 string2)
{
@@ -334,7 +340,11 @@ namespace JSC {
StringTypeAdapter<StringType2> adapter2(string2);
UChar* buffer;
- unsigned length = adapter1.length() + adapter2.length();
+ bool overflow = false;
+ unsigned length = adapter1.length();
+ sumWithOverflow(length, adapter2.length(), overflow);
+ if (overflow)
+ return 0;
PassRefPtr<UStringImpl> resultImpl = UStringImpl::tryCreateUninitialized(length, buffer);
if (!resultImpl)
return 0;
@@ -355,7 +365,12 @@ namespace JSC {
StringTypeAdapter<StringType3> adapter3(string3);
UChar* buffer;
- unsigned length = adapter1.length() + adapter2.length() + adapter3.length();
+ bool overflow = false;
+ unsigned length = adapter1.length();
+ sumWithOverflow(length, adapter2.length(), overflow);
+ sumWithOverflow(length, adapter3.length(), overflow);
+ if (overflow)
+ return 0;
PassRefPtr<UStringImpl> resultImpl = UStringImpl::tryCreateUninitialized(length, buffer);
if (!resultImpl)
return 0;
@@ -379,7 +394,13 @@ namespace JSC {
StringTypeAdapter<StringType4> adapter4(string4);
UChar* buffer;
- unsigned length = adapter1.length() + adapter2.length() + adapter3.length() + adapter4.length();
+ bool overflow = false;
+ unsigned length = adapter1.length();
+ sumWithOverflow(length, adapter2.length(), overflow);
+ sumWithOverflow(length, adapter3.length(), overflow);
+ sumWithOverflow(length, adapter4.length(), overflow);
+ if (overflow)
+ return 0;
PassRefPtr<UStringImpl> resultImpl = UStringImpl::tryCreateUninitialized(length, buffer);
if (!resultImpl)
return 0;
@@ -406,7 +427,14 @@ namespace JSC {
StringTypeAdapter<StringType5> adapter5(string5);
UChar* buffer;
- unsigned length = adapter1.length() + adapter2.length() + adapter3.length() + adapter4.length() + adapter5.length();
+ bool overflow = false;
+ unsigned length = adapter1.length();
+ sumWithOverflow(length, adapter2.length(), overflow);
+ sumWithOverflow(length, adapter3.length(), overflow);
+ sumWithOverflow(length, adapter4.length(), overflow);
+ sumWithOverflow(length, adapter5.length(), overflow);
+ if (overflow)
+ return 0;
PassRefPtr<UStringImpl> resultImpl = UStringImpl::tryCreateUninitialized(length, buffer);
if (!resultImpl)
return 0;
@@ -436,7 +464,15 @@ namespace JSC {
StringTypeAdapter<StringType6> adapter6(string6);
UChar* buffer;
- unsigned length = adapter1.length() + adapter2.length() + adapter3.length() + adapter4.length() + adapter5.length() + adapter6.length();
+ bool overflow = false;
+ unsigned length = adapter1.length();
+ sumWithOverflow(length, adapter2.length(), overflow);
+ sumWithOverflow(length, adapter3.length(), overflow);
+ sumWithOverflow(length, adapter4.length(), overflow);
+ sumWithOverflow(length, adapter5.length(), overflow);
+ sumWithOverflow(length, adapter6.length(), overflow);
+ if (overflow)
+ return 0;
PassRefPtr<UStringImpl> resultImpl = UStringImpl::tryCreateUninitialized(length, buffer);
if (!resultImpl)
return 0;
@@ -469,7 +505,16 @@ namespace JSC {
StringTypeAdapter<StringType7> adapter7(string7);
UChar* buffer;
- unsigned length = adapter1.length() + adapter2.length() + adapter3.length() + adapter4.length() + adapter5.length() + adapter6.length() + adapter7.length();
+ bool overflow = false;
+ unsigned length = adapter1.length();
+ sumWithOverflow(length, adapter2.length(), overflow);
+ sumWithOverflow(length, adapter3.length(), overflow);
+ sumWithOverflow(length, adapter4.length(), overflow);
+ sumWithOverflow(length, adapter5.length(), overflow);
+ sumWithOverflow(length, adapter6.length(), overflow);
+ sumWithOverflow(length, adapter7.length(), overflow);
+ if (overflow)
+ return 0;
PassRefPtr<UStringImpl> resultImpl = UStringImpl::tryCreateUninitialized(length, buffer);
if (!resultImpl)
return 0;
@@ -505,7 +550,17 @@ namespace JSC {
StringTypeAdapter<StringType8> adapter8(string8);
UChar* buffer;
- unsigned length = adapter1.length() + adapter2.length() + adapter3.length() + adapter4.length() + adapter5.length() + adapter6.length() + adapter7.length() + adapter8.length();
+ bool overflow = false;
+ unsigned length = adapter1.length();
+ sumWithOverflow(length, adapter2.length(), overflow);
+ sumWithOverflow(length, adapter3.length(), overflow);
+ sumWithOverflow(length, adapter4.length(), overflow);
+ sumWithOverflow(length, adapter5.length(), overflow);
+ sumWithOverflow(length, adapter6.length(), overflow);
+ sumWithOverflow(length, adapter7.length(), overflow);
+ sumWithOverflow(length, adapter8.length(), overflow);
+ if (overflow)
+ return 0;
PassRefPtr<UStringImpl> resultImpl = UStringImpl::tryCreateUninitialized(length, buffer);
if (!resultImpl)
return 0;
diff --git a/JavaScriptCore/runtime/UStringImpl.cpp b/JavaScriptCore/runtime/UStringImpl.cpp
index 9882007..b7d9a40 100644
--- a/JavaScriptCore/runtime/UStringImpl.cpp
+++ b/JavaScriptCore/runtime/UStringImpl.cpp
@@ -50,7 +50,7 @@ PassRefPtr<UStringImpl> UStringImpl::create(const char* c)
return result;
}
-PassRefPtr<UStringImpl> UStringImpl::create(const char* c, int length)
+PassRefPtr<UStringImpl> UStringImpl::create(const char* c, unsigned length)
{
ASSERT(c);
@@ -59,12 +59,12 @@ PassRefPtr<UStringImpl> UStringImpl::create(const char* c, int length)
UChar* d;
PassRefPtr<UStringImpl> result = UStringImpl::createUninitialized(length, d);
- for (int i = 0; i < length; i++)
+ for (unsigned i = 0; i < length; i++)
d[i] = static_cast<unsigned char>(c[i]); // use unsigned char to zero-extend instead of sign-extend
return result;
}
-PassRefPtr<UStringImpl> UStringImpl::create(const UChar* buffer, int length)
+PassRefPtr<UStringImpl> UStringImpl::create(const UChar* buffer, unsigned length)
{
UChar* newBuffer;
PassRefPtr<UStringImpl> impl = createUninitialized(length, newBuffer);
@@ -75,12 +75,14 @@ PassRefPtr<UStringImpl> UStringImpl::create(const UChar* buffer, int length)
SharedUChar* UStringImpl::baseSharedBuffer()
{
ASSERT((bufferOwnership() == BufferShared)
- || ((bufferOwnership() == BufferOwned) && !m_dataBuffer.asPtr<void*>()));
+ || ((bufferOwnership() == BufferOwned) && !m_buffer));
- if (bufferOwnership() != BufferShared)
- m_dataBuffer = UntypedPtrAndBitfield(SharedUChar::create(new OwnFastMallocPtr<UChar>(m_data)).releaseRef(), BufferShared);
+ if (bufferOwnership() != BufferShared) {
+ m_refCountAndFlags = (m_refCountAndFlags & ~s_refCountMaskBufferOwnership) | BufferShared;
+ m_bufferShared = SharedUChar::create(new OwnFastMallocPtr<UChar>(m_data)).releaseRef();
+ }
- return m_dataBuffer.asPtr<SharedUChar*>();
+ return m_bufferShared;
}
SharedUChar* UStringImpl::sharedBuffer()
@@ -108,12 +110,43 @@ UStringImpl::~UStringImpl()
if (bufferOwnership() == BufferOwned)
fastFree(m_data);
else if (bufferOwnership() == BufferSubstring)
- m_dataBuffer.asPtr<UStringImpl*>()->deref();
+ m_bufferSubstring->deref();
else {
ASSERT(bufferOwnership() == BufferShared);
- m_dataBuffer.asPtr<SharedUChar*>()->deref();
+ m_bufferShared->deref();
}
}
}
+void URopeImpl::derefFibersNonRecursive(Vector<URopeImpl*, 32>& workQueue)
+{
+ unsigned length = fiberCount();
+ for (unsigned i = 0; i < length; ++i) {
+ Fiber& fiber = fibers(i);
+ if (fiber->isRope()) {
+ URopeImpl* nextRope = static_cast<URopeImpl*>(fiber);
+ if (nextRope->hasOneRef())
+ workQueue.append(nextRope);
+ else
+ nextRope->deref();
+ } else
+ static_cast<UStringImpl*>(fiber)->deref();
+ }
+}
+
+void URopeImpl::destructNonRecursive()
+{
+ Vector<URopeImpl*, 32> workQueue;
+
+ derefFibersNonRecursive(workQueue);
+ delete this;
+
+ while (!workQueue.isEmpty()) {
+ URopeImpl* rope = workQueue.last();
+ workQueue.removeLast();
+ rope->derefFibersNonRecursive(workQueue);
+ delete rope;
+ }
}
+
+} // namespace JSC
diff --git a/JavaScriptCore/runtime/UStringImpl.h b/JavaScriptCore/runtime/UStringImpl.h
index bbea0aa..142e01d 100644
--- a/JavaScriptCore/runtime/UStringImpl.h
+++ b/JavaScriptCore/runtime/UStringImpl.h
@@ -40,72 +40,93 @@ class IdentifierTable;
typedef CrossThreadRefCounted<OwnFastMallocPtr<UChar> > SharedUChar;
-class UntypedPtrAndBitfield {
+class UStringOrRopeImpl : public Noncopyable {
public:
- UntypedPtrAndBitfield() {}
+ bool isRope() { return (m_refCountAndFlags & s_refCountIsRope) == s_refCountIsRope; }
+ unsigned length() const { return m_length; }
- UntypedPtrAndBitfield(void* ptrValue, uintptr_t bitValue)
- : m_value(reinterpret_cast<uintptr_t>(ptrValue) | bitValue)
-#ifndef NDEBUG
- , m_leaksPtr(ptrValue)
-#endif
- {
- ASSERT(ptrValue == asPtr<void*>());
- ASSERT((*this & ~s_alignmentMask) == bitValue);
- }
+ void ref() { m_refCountAndFlags += s_refCountIncrement; }
+ inline void deref();
+
+protected:
+ enum BufferOwnership {
+ BufferInternal,
+ BufferOwned,
+ BufferSubstring,
+ BufferShared,
+ };
+
+ using Noncopyable::operator new;
+ void* operator new(size_t, void* inPlace) { return inPlace; }
- template<typename T>
- T asPtr() const { return reinterpret_cast<T>(m_value & s_alignmentMask); }
+ // For SmallStringStorage, which allocates an array and uses an in-place new.
+ UStringOrRopeImpl() { }
- UntypedPtrAndBitfield& operator&=(uintptr_t bits)
+ UStringOrRopeImpl(unsigned length, BufferOwnership ownership)
+ : m_refCountAndFlags(s_refCountIncrement | s_refCountFlagShouldReportedCost | ownership)
+ , m_length(length)
{
- m_value &= bits | s_alignmentMask;
- return *this;
+ ASSERT(!isRope());
}
- UntypedPtrAndBitfield& operator|=(uintptr_t bits)
+ enum StaticStringConstructType { ConstructStaticString };
+ UStringOrRopeImpl(unsigned length, StaticStringConstructType)
+ : m_refCountAndFlags(s_refCountFlagStatic | BufferOwned)
+ , m_length(length)
{
- m_value |= bits & ~s_alignmentMask;
- return *this;
+ ASSERT(!isRope());
}
- uintptr_t operator&(uintptr_t mask) const
+ enum RopeConstructType { ConstructRope };
+ UStringOrRopeImpl(RopeConstructType)
+ : m_refCountAndFlags(s_refCountIncrement | s_refCountIsRope)
+ , m_length(0)
{
- return m_value & mask & ~s_alignmentMask;
+ ASSERT(isRope());
}
-private:
- static const uintptr_t s_alignmentMask = ~static_cast<uintptr_t>(0x7);
- uintptr_t m_value;
-#ifndef NDEBUG
- void* m_leaksPtr; // Only used to allow tools like leaks on OSX to detect that the memory is referenced.
-#endif
+ // The bottom 5 bits hold flags, the top 27 bits hold the ref count.
+ // When dereferencing UStringImpls we check for the ref count AND the
+ // static bit both being zero - static strings are never deleted.
+ static const unsigned s_refCountMask = 0xFFFFFFE0;
+ static const unsigned s_refCountIncrement = 0x20;
+ static const unsigned s_refCountFlagStatic = 0x10;
+ static const unsigned s_refCountFlagShouldReportedCost = 0x8;
+ static const unsigned s_refCountFlagIsIdentifier = 0x4;
+ static const unsigned s_refCountMaskBufferOwnership = 0x3;
+ // Use an otherwise invalid permutation of flags (static & shouldReportedCost -
+ // static strings do not set shouldReportedCost in the constructor, and this bit
+ // is only ever cleared, not set) to identify objects that are ropes.
+ static const unsigned s_refCountIsRope = s_refCountFlagStatic | s_refCountFlagShouldReportedCost;
+
+ unsigned m_refCountAndFlags;
+ unsigned m_length;
};
-class UStringImpl : Noncopyable {
+class UStringImpl : public UStringOrRopeImpl {
public:
template<size_t inlineCapacity>
static PassRefPtr<UStringImpl> adopt(Vector<UChar, inlineCapacity>& vector)
{
if (unsigned length = vector.size()) {
ASSERT(vector.data());
- return adoptRef(new UStringImpl(vector.releaseBuffer(), length, BufferOwned));
+ return adoptRef(new UStringImpl(vector.releaseBuffer(), length));
}
return &empty();
}
static PassRefPtr<UStringImpl> create(const char* c);
- static PassRefPtr<UStringImpl> create(const char* c, int length);
- static PassRefPtr<UStringImpl> create(const UChar* buffer, int length);
+ static PassRefPtr<UStringImpl> create(const char* c, unsigned length);
+ static PassRefPtr<UStringImpl> create(const UChar* buffer, unsigned length);
- static PassRefPtr<UStringImpl> create(PassRefPtr<UStringImpl> rep, int offset, int length)
+ static PassRefPtr<UStringImpl> create(PassRefPtr<UStringImpl> rep, unsigned offset, unsigned length)
{
ASSERT(rep);
rep->checkConsistency();
return adoptRef(new UStringImpl(rep->m_data + offset, length, rep->bufferOwnerString()));
}
- static PassRefPtr<UStringImpl> create(PassRefPtr<SharedUChar> sharedBuffer, UChar* buffer, int length)
+ static PassRefPtr<UStringImpl> create(PassRefPtr<SharedUChar> sharedBuffer, UChar* buffer, unsigned length)
{
return adoptRef(new UStringImpl(buffer, length, sharedBuffer));
}
@@ -121,7 +142,7 @@ public:
CRASH();
UStringImpl* resultImpl = static_cast<UStringImpl*>(fastMalloc(sizeof(UChar) * length + sizeof(UStringImpl)));
output = reinterpret_cast<UChar*>(resultImpl + 1);
- return adoptRef(new(resultImpl) UStringImpl(output, length, BufferInternal));
+ return adoptRef(new(resultImpl) UStringImpl(length));
}
static PassRefPtr<UStringImpl> tryCreateUninitialized(unsigned length, UChar*& output)
@@ -137,31 +158,36 @@ public:
if (!tryFastMalloc(sizeof(UChar) * length + sizeof(UStringImpl)).getValue(resultImpl))
return 0;
output = reinterpret_cast<UChar*>(resultImpl + 1);
- return adoptRef(new(resultImpl) UStringImpl(output, length, BufferInternal));
+ return adoptRef(new(resultImpl) UStringImpl(length));
}
SharedUChar* sharedBuffer();
UChar* data() const { return m_data; }
- int size() const { return m_length; }
size_t cost()
{
// For substrings, return the cost of the base string.
if (bufferOwnership() == BufferSubstring)
- return m_dataBuffer.asPtr<UStringImpl*>()->cost();
+ return m_bufferSubstring->cost();
- if (m_dataBuffer & s_reportedCostBit)
- return 0;
- m_dataBuffer |= s_reportedCostBit;
- return m_length;
+ if (m_refCountAndFlags & s_refCountFlagShouldReportedCost) {
+ m_refCountAndFlags &= ~s_refCountFlagShouldReportedCost;
+ return m_length;
+ }
+ return 0;
}
unsigned hash() const { if (!m_hash) m_hash = computeHash(data(), m_length); return m_hash; }
unsigned existingHash() const { ASSERT(m_hash); return m_hash; } // fast path for Identifiers
void setHash(unsigned hash) { ASSERT(hash == computeHash(data(), m_length)); m_hash = hash; } // fast path for Identifiers
- bool isIdentifier() const { return m_isIdentifier; }
- void setIsIdentifier(bool isIdentifier) { m_isIdentifier = isIdentifier; }
+ bool isIdentifier() const { return m_refCountAndFlags & s_refCountFlagIsIdentifier; }
+ void setIsIdentifier(bool isIdentifier)
+ {
+ if (isIdentifier)
+ m_refCountAndFlags |= s_refCountFlagIsIdentifier;
+ else
+ m_refCountAndFlags &= ~s_refCountFlagIsIdentifier;
+ }
- UStringImpl* ref() { m_refCount += s_refCountIncrement; return this; }
- ALWAYS_INLINE void deref() { if (!(m_refCount -= s_refCountIncrement)) delete this; }
+ ALWAYS_INLINE void deref() { m_refCountAndFlags -= s_refCountIncrement; if (!(m_refCountAndFlags & (s_refCountMask | s_refCountFlagStatic))) delete this; }
static void copyChars(UChar* destination, const UChar* source, unsigned numCharacters)
{
@@ -172,8 +198,8 @@ public:
memcpy(destination, source, numCharacters * sizeof(UChar));
}
- static unsigned computeHash(const UChar* s, int length) { ASSERT(length >= 0); return WTF::stringHash(s, length); }
- static unsigned computeHash(const char* s, int length) { ASSERT(length >= 0); return WTF::stringHash(s, length); }
+ static unsigned computeHash(const UChar* s, unsigned length) { return WTF::stringHash(s, length); }
+ static unsigned computeHash(const char* s, unsigned length) { return WTF::stringHash(s, length); }
static unsigned computeHash(const char* s) { return WTF::stringHash(s); }
static UStringImpl& empty() { return *s_empty; }
@@ -187,109 +213,145 @@ public:
}
private:
- enum BufferOwnership {
- BufferInternal,
- BufferOwned,
- BufferSubstring,
- BufferShared,
- };
-
// For SmallStringStorage, which allocates an array and uses an in-place new.
UStringImpl() { }
- // Used to construct normal strings with an internal or external buffer.
- UStringImpl(UChar* data, int length, BufferOwnership ownership)
- : m_data(data)
- , m_length(length)
- , m_refCount(s_refCountIncrement)
+ // Used to construct normal strings with an internal buffer.
+ UStringImpl(unsigned length)
+ : UStringOrRopeImpl(length, BufferInternal)
+ , m_data(reinterpret_cast<UChar*>(this + 1))
+ , m_buffer(0)
+ , m_hash(0)
+ {
+ checkConsistency();
+ }
+
+ // Used to construct normal strings with an external buffer.
+ UStringImpl(UChar* data, unsigned length)
+ : UStringOrRopeImpl(length, BufferOwned)
+ , m_data(data)
+ , m_buffer(0)
, m_hash(0)
- , m_isIdentifier(false)
- , m_dataBuffer(0, ownership)
{
- ASSERT((ownership == BufferInternal) || (ownership == BufferOwned));
checkConsistency();
}
// Used to construct static strings, which have an special refCount that can never hit zero.
// This means that the static string will never be destroyed, which is important because
// static strings will be shared across threads & ref-counted in a non-threadsafe manner.
- enum StaticStringConstructType { ConstructStaticString };
- UStringImpl(UChar* data, int length, StaticStringConstructType)
- : m_data(data)
- , m_length(length)
- , m_refCount(s_staticRefCountInitialValue)
+ UStringImpl(UChar* data, unsigned length, StaticStringConstructType)
+ : UStringOrRopeImpl(length, ConstructStaticString)
+ , m_data(data)
+ , m_buffer(0)
, m_hash(0)
- , m_isIdentifier(false)
- , m_dataBuffer(0, BufferOwned)
{
checkConsistency();
}
// Used to create new strings that are a substring of an existing string.
- UStringImpl(UChar* data, int length, PassRefPtr<UStringImpl> base)
- : m_data(data)
- , m_length(length)
- , m_refCount(s_refCountIncrement)
+ UStringImpl(UChar* data, unsigned length, PassRefPtr<UStringImpl> base)
+ : UStringOrRopeImpl(length, BufferSubstring)
+ , m_data(data)
+ , m_bufferSubstring(base.releaseRef())
, m_hash(0)
- , m_isIdentifier(false)
- , m_dataBuffer(base.releaseRef(), BufferSubstring)
{
// Do use static strings as a base for substrings; UntypedPtrAndBitfield assumes
// that all pointers will be at least 8-byte aligned, we cannot guarantee that of
// UStringImpls that are not heap allocated.
- ASSERT(m_dataBuffer.asPtr<UStringImpl*>()->size());
- ASSERT(!m_dataBuffer.asPtr<UStringImpl*>()->isStatic());
+ ASSERT(m_bufferSubstring->length());
+ ASSERT(!m_bufferSubstring->isStatic());
checkConsistency();
}
// Used to construct new strings sharing an existing shared buffer.
- UStringImpl(UChar* data, int length, PassRefPtr<SharedUChar> sharedBuffer)
- : m_data(data)
- , m_length(length)
- , m_refCount(s_refCountIncrement)
+ UStringImpl(UChar* data, unsigned length, PassRefPtr<SharedUChar> sharedBuffer)
+ : UStringOrRopeImpl(length, BufferShared)
+ , m_data(data)
+ , m_bufferShared(sharedBuffer.releaseRef())
, m_hash(0)
- , m_isIdentifier(false)
- , m_dataBuffer(sharedBuffer.releaseRef(), BufferShared)
{
checkConsistency();
}
- using Noncopyable::operator new;
- void* operator new(size_t, void* inPlace) { return inPlace; }
-
~UStringImpl();
// This number must be at least 2 to avoid sharing empty, null as well as 1 character strings from SmallStrings.
- static const int s_minLengthToShare = 10;
+ static const unsigned s_minLengthToShare = 10;
static const unsigned s_copyCharsInlineCutOff = 20;
- static const uintptr_t s_bufferOwnershipMask = 3;
- static const uintptr_t s_reportedCostBit = 4;
- // We initialize and increment/decrement the refCount for all normal (non-static) strings by the value 2.
- // We initialize static strings with an odd number (specifically, 1), such that the refCount cannot reach zero.
- static const int s_refCountIncrement = 2;
- static const int s_staticRefCountInitialValue = 1;
-
- UStringImpl* bufferOwnerString() { return (bufferOwnership() == BufferSubstring) ? m_dataBuffer.asPtr<UStringImpl*>() : this; }
- const UStringImpl* bufferOwnerString() const { return (bufferOwnership() == BufferSubstring) ? m_dataBuffer.asPtr<UStringImpl*>() : this; }
+
+ UStringImpl* bufferOwnerString() { return (bufferOwnership() == BufferSubstring) ? m_bufferSubstring : this; }
+ const UStringImpl* bufferOwnerString() const { return (bufferOwnership() == BufferSubstring) ? m_bufferSubstring : this; }
SharedUChar* baseSharedBuffer();
- unsigned bufferOwnership() const { return m_dataBuffer & s_bufferOwnershipMask; }
- bool isStatic() const { return m_refCount & 1; }
+ unsigned bufferOwnership() const { return m_refCountAndFlags & s_refCountMaskBufferOwnership; }
+ bool isStatic() const { return m_refCountAndFlags & s_refCountFlagStatic; }
// unshared data
UChar* m_data;
- int m_length;
- unsigned m_refCount;
- mutable unsigned m_hash : 31;
- mutable unsigned m_isIdentifier : 1;
- UntypedPtrAndBitfield m_dataBuffer;
+ union {
+ void* m_buffer;
+ UStringImpl* m_bufferSubstring;
+ SharedUChar* m_bufferShared;
+ };
+ mutable unsigned m_hash;
JS_EXPORTDATA static UStringImpl* s_empty;
friend class JIT;
friend class SmallStringsStorage;
+ friend class UStringOrRopeImpl;
friend void initializeUString();
};
+class URopeImpl : public UStringOrRopeImpl {
+public:
+ // A URopeImpl is composed from a set of smaller strings called Fibers.
+ // Each Fiber in a rope is either UStringImpl or another URopeImpl.
+ typedef UStringOrRopeImpl* Fiber;
+
+ // Creates a URopeImpl comprising of 'fiberCount' Fibers.
+ // The URopeImpl is constructed in an uninitialized state - initialize must be called for each Fiber in the URopeImpl.
+ static PassRefPtr<URopeImpl> tryCreateUninitialized(unsigned fiberCount)
+ {
+ void* allocation;
+ if (tryFastMalloc(sizeof(URopeImpl) + (fiberCount - 1) * sizeof(Fiber)).getValue(allocation))
+ return adoptRef(new (allocation) URopeImpl(fiberCount));
+ return 0;
+ }
+
+ void initializeFiber(unsigned &index, Fiber fiber)
+ {
+ m_fibers[index++] = fiber;
+ fiber->ref();
+ m_length += fiber->length();
+ }
+
+ unsigned fiberCount() { return m_fiberCount; }
+ Fiber& fibers(unsigned index) { return m_fibers[index]; }
+
+ ALWAYS_INLINE void deref() { m_refCountAndFlags -= s_refCountIncrement; if (!(m_refCountAndFlags & s_refCountMask)) destructNonRecursive(); }
+
+private:
+ URopeImpl(unsigned fiberCount) : UStringOrRopeImpl(ConstructRope), m_fiberCount(fiberCount) {}
+
+ void destructNonRecursive();
+ void derefFibersNonRecursive(Vector<URopeImpl*, 32>& workQueue);
+
+ bool hasOneRef() { return (m_refCountAndFlags & s_refCountMask) == s_refCountIncrement; }
+
+ unsigned m_fiberCount;
+ Fiber m_fibers[1];
+
+ friend class UStringOrRopeImpl;
+};
+
+inline void UStringOrRopeImpl::deref()
+{
+ if (isRope())
+ static_cast<URopeImpl*>(this)->deref();
+ else
+ static_cast<UStringImpl*>(this)->deref();
+}
+
bool equal(const UStringImpl*, const UStringImpl*);
}
diff --git a/JavaScriptCore/wtf/AlwaysInline.h b/JavaScriptCore/wtf/AlwaysInline.h
index 4e7224c..ce27df6 100644
--- a/JavaScriptCore/wtf/AlwaysInline.h
+++ b/JavaScriptCore/wtf/AlwaysInline.h
@@ -33,6 +33,8 @@
#ifndef NEVER_INLINE
#if COMPILER(GCC)
#define NEVER_INLINE __attribute__((__noinline__))
+#elif COMPILER(RVCT)
+#define NEVER_INLINE __declspec(noinline)
#else
#define NEVER_INLINE
#endif
@@ -57,6 +59,8 @@
#ifndef NO_RETURN
#if COMPILER(GCC)
#define NO_RETURN __attribute((__noreturn__))
+#elif COMPILER(RVCT)
+#define NO_RETURN __declspec(noreturn)
#else
#define NO_RETURN
#endif
diff --git a/JavaScriptCore/wtf/FastMalloc.cpp b/JavaScriptCore/wtf/FastMalloc.cpp
index 79d2bfb..90d1e3f 100644
--- a/JavaScriptCore/wtf/FastMalloc.cpp
+++ b/JavaScriptCore/wtf/FastMalloc.cpp
@@ -1248,13 +1248,6 @@ static const int kScavengeTimerDelayInSeconds = 5;
// Number of free committed pages that we want to keep around.
static const size_t kMinimumFreeCommittedPageCount = 512;
-// During a scavenge, we'll release up to a fraction of the free committed pages.
-#if OS(WINDOWS)
-// We are slightly less aggressive in releasing memory on Windows due to performance reasons.
-static const int kMaxScavengeAmountFactor = 3;
-#else
-static const int kMaxScavengeAmountFactor = 2;
-#endif
#endif
class TCMalloc_PageHeap {
@@ -1507,39 +1500,35 @@ ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
#endif
-void TCMalloc_PageHeap::scavenge()
+void TCMalloc_PageHeap::scavenge()
{
- // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages
- // for the amount of allocations that we do. So hold off on releasing memory back to the system.
+ // If we've recently commited pages, our working set is growing, so now is
+ // not a good time to free pages.
if (pages_committed_since_last_scavenge_ > 0) {
pages_committed_since_last_scavenge_ = 0;
return;
}
- Length pagesDecommitted = 0;
- for (int i = kMaxPages; i >= 0; i--) {
+
+ for (int i = kMaxPages; i >= 0 && shouldContinueScavenging(); i--) {
SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
if (!DLL_IsEmpty(&slist->normal)) {
// Release the last span on the normal portion of this list
Span* s = slist->normal.prev;
- // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0.
- if ((pagesDecommitted + s->length) * kMaxScavengeAmountFactor > free_committed_pages_)
- continue;
DLL_Remove(s);
- TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
- static_cast<size_t>(s->length << kPageShift));
+ ASSERT(!s->decommitted);
if (!s->decommitted) {
- pagesDecommitted += s->length;
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
+ static_cast<size_t>(s->length << kPageShift));
+ ASSERT(free_committed_pages_ >= s->length);
+ free_committed_pages_ -= s->length;
s->decommitted = true;
}
DLL_Prepend(&slist->returned, s);
- // We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around.
- if (free_committed_pages_ <= kMinimumFreeCommittedPageCount + pagesDecommitted)
- break;
}
}
+
+ ASSERT(!shouldContinueScavenging());
pages_committed_since_last_scavenge_ = 0;
- ASSERT(free_committed_pages_ >= pagesDecommitted);
- free_committed_pages_ -= pagesDecommitted;
}
ALWAYS_INLINE bool TCMalloc_PageHeap::shouldContinueScavenging() const
diff --git a/JavaScriptCore/wtf/PtrAndFlags.h b/JavaScriptCore/wtf/PtrAndFlags.h
deleted file mode 100644
index 1e1bee0..0000000
--- a/JavaScriptCore/wtf/PtrAndFlags.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright (C) 2009 Google Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PtrAndFlags_h
-#define PtrAndFlags_h
-
-#include <wtf/Assertions.h>
-
-namespace WTF {
- template<class T, typename FlagEnum> class PtrAndFlagsBase {
- public:
- bool isFlagSet(FlagEnum flagNumber) const { ASSERT(flagNumber < 2); return m_ptrAndFlags & (1 << flagNumber); }
- void setFlag(FlagEnum flagNumber) { ASSERT(flagNumber < 2); m_ptrAndFlags |= (1 << flagNumber);}
- void clearFlag(FlagEnum flagNumber) { ASSERT(flagNumber < 2); m_ptrAndFlags &= ~(1 << flagNumber);}
- T* get() const { return reinterpret_cast<T*>(m_ptrAndFlags & ~3); }
- void set(T* ptr)
- {
- ASSERT(!(reinterpret_cast<intptr_t>(ptr) & 3));
- m_ptrAndFlags = reinterpret_cast<intptr_t>(ptr) | (m_ptrAndFlags & 3);
-#ifndef NDEBUG
- m_leaksPtr = ptr;
-#endif
- }
-
- bool operator!() const { return !get(); }
- T* operator->() const { return reinterpret_cast<T*>(m_ptrAndFlags & ~3); }
-
- protected:
- intptr_t m_ptrAndFlags;
-#ifndef NDEBUG
- void* m_leaksPtr; // Only used to allow tools like leaks on OSX to detect that the memory is referenced.
-#endif
- };
-
- template<class T, typename FlagEnum> class PtrAndFlags : public PtrAndFlagsBase<T, FlagEnum> {
- public:
- PtrAndFlags()
- {
- PtrAndFlagsBase<T, FlagEnum>::m_ptrAndFlags = 0;
- }
- PtrAndFlags(T* ptr)
- {
- PtrAndFlagsBase<T, FlagEnum>::m_ptrAndFlags = 0;
- PtrAndFlagsBase<T, FlagEnum>::set(ptr);
- }
- };
-} // namespace WTF
-
-using WTF::PtrAndFlagsBase;
-using WTF::PtrAndFlags;
-
-#endif // PtrAndFlags_h
diff --git a/JavaScriptCore/wtf/gtk/GOwnPtr.cpp b/JavaScriptCore/wtf/gtk/GOwnPtr.cpp
index 1a151b9..da0d839 100644
--- a/JavaScriptCore/wtf/gtk/GOwnPtr.cpp
+++ b/JavaScriptCore/wtf/gtk/GOwnPtr.cpp
@@ -19,6 +19,7 @@
#include "config.h"
#include "GOwnPtr.h"
+#include <gio/gio.h>
#include <glib.h>
namespace WTF {
@@ -57,4 +58,10 @@ template <> void freeOwnedGPtr<GDir>(GDir* ptr)
if (ptr)
g_dir_close(ptr);
}
+
+template <> void freeOwnedGPtr<GFile>(GFile* ptr)
+{
+ if (ptr)
+ g_object_unref(ptr);
+}
} // namespace WTF
diff --git a/JavaScriptCore/wtf/gtk/GOwnPtr.h b/JavaScriptCore/wtf/gtk/GOwnPtr.h
index ad2c30e..1fc594c 100644
--- a/JavaScriptCore/wtf/gtk/GOwnPtr.h
+++ b/JavaScriptCore/wtf/gtk/GOwnPtr.h
@@ -35,6 +35,7 @@ typedef struct _GMutex GMutex;
typedef struct _GPatternSpec GPatternSpec;
typedef struct _GDir GDir;
typedef struct _GHashTable GHashTable;
+typedef struct _GFile GFile;
extern "C" void g_free(void*);
namespace WTF {
@@ -47,6 +48,7 @@ template<> void freeOwnedGPtr<GMutex>(GMutex*);
template<> void freeOwnedGPtr<GPatternSpec>(GPatternSpec*);
template<> void freeOwnedGPtr<GDir>(GDir*);
template<> void freeOwnedGPtr<GHashTable>(GHashTable*);
+template<> void freeOwnedGPtr<GFile>(GFile*);
template <typename T> class GOwnPtr : public Noncopyable {
public: