summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore')
-rw-r--r--JavaScriptCore/API/JSValueRef.cpp6
-rw-r--r--JavaScriptCore/Android.mk4
-rw-r--r--JavaScriptCore/CMakeLists.txt2
-rw-r--r--JavaScriptCore/ChangeLog1286
-rw-r--r--JavaScriptCore/Configurations/FeatureDefines.xcconfig11
-rw-r--r--JavaScriptCore/Configurations/Version.xcconfig2
-rw-r--r--JavaScriptCore/DerivedSources.pro15
-rw-r--r--JavaScriptCore/GNUmakefile.am7
-rw-r--r--JavaScriptCore/JavaScriptCore.exp230
-rw-r--r--JavaScriptCore/JavaScriptCore.gypi7
-rw-r--r--JavaScriptCore/JavaScriptCore.pro3
-rw-r--r--JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def20
-rw-r--r--JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj16
-rw-r--r--JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj4
-rw-r--r--JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj48
-rw-r--r--JavaScriptCore/assembler/ARMAssembler.cpp2
-rw-r--r--JavaScriptCore/assembler/ARMv7Assembler.cpp38
-rw-r--r--JavaScriptCore/assembler/ARMv7Assembler.h217
-rw-r--r--JavaScriptCore/assembler/AbstractMacroAssembler.h9
-rw-r--r--JavaScriptCore/assembler/LinkBuffer.h149
-rw-r--r--JavaScriptCore/assembler/MIPSAssembler.h6
-rw-r--r--JavaScriptCore/assembler/MacroAssemblerARMv7.h55
-rw-r--r--JavaScriptCore/assembler/X86Assembler.h4
-rw-r--r--JavaScriptCore/bytecode/CodeBlock.cpp10
-rw-r--r--JavaScriptCore/bytecode/CodeBlock.h20
-rw-r--r--JavaScriptCore/bytecode/Opcode.h19
-rw-r--r--JavaScriptCore/bytecode/StructureStubInfo.h24
-rw-r--r--JavaScriptCore/bytecompiler/BytecodeGenerator.cpp14
-rw-r--r--JavaScriptCore/bytecompiler/BytecodeGenerator.h3
-rw-r--r--JavaScriptCore/create_jit_stubs13
-rw-r--r--JavaScriptCore/interpreter/Interpreter.cpp48
-rw-r--r--JavaScriptCore/interpreter/RegisterFile.cpp19
-rw-r--r--JavaScriptCore/interpreter/RegisterFile.h97
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.cpp4
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.h67
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp32
-rw-r--r--JavaScriptCore/jit/JIT.cpp10
-rw-r--r--JavaScriptCore/jit/JIT.h65
-rw-r--r--JavaScriptCore/jit/JITArithmetic32_64.cpp2
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h2
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp54
-rw-r--r--JavaScriptCore/jit/JITOpcodes32_64.cpp51
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp149
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess32_64.cpp165
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp410
-rw-r--r--JavaScriptCore/jit/JITStubs.h4
-rw-r--r--JavaScriptCore/jit/JSInterfaceJIT.h2
-rw-r--r--JavaScriptCore/jit/SpecializedThunkJIT.h5
-rw-r--r--JavaScriptCore/jsc.pro6
-rw-r--r--JavaScriptCore/parser/Lexer.cpp129
-rw-r--r--JavaScriptCore/parser/Lexer.h1
-rw-r--r--JavaScriptCore/profiler/ProfilerServer.mm6
-rw-r--r--JavaScriptCore/runtime/AlignedMemoryAllocator.h239
-rw-r--r--JavaScriptCore/runtime/Collector.cpp148
-rw-r--r--JavaScriptCore/runtime/Collector.h52
-rw-r--r--JavaScriptCore/runtime/CollectorHeapIterator.h6
-rw-r--r--JavaScriptCore/runtime/ExceptionHelpers.cpp5
-rw-r--r--JavaScriptCore/runtime/ExceptionHelpers.h1
-rw-r--r--JavaScriptCore/runtime/Executable.cpp35
-rw-r--r--JavaScriptCore/runtime/GCActivityCallback.cpp50
-rw-r--r--JavaScriptCore/runtime/GCActivityCallback.h70
-rw-r--r--JavaScriptCore/runtime/GCActivityCallbackCF.cpp83
-rw-r--r--JavaScriptCore/runtime/GCHandle.cpp91
-rw-r--r--JavaScriptCore/runtime/GCHandle.h120
-rw-r--r--JavaScriptCore/runtime/JSArray.cpp260
-rw-r--r--JavaScriptCore/runtime/JSArray.h30
-rw-r--r--JavaScriptCore/runtime/JSGlobalData.cpp2
-rw-r--r--JavaScriptCore/runtime/JSGlobalData.h2
-rw-r--r--JavaScriptCore/runtime/JSLock.cpp6
-rw-r--r--JavaScriptCore/runtime/JSLock.h2
-rw-r--r--JavaScriptCore/runtime/UStringImpl.h2
-rw-r--r--JavaScriptCore/runtime/WeakGCPtr.h46
-rw-r--r--JavaScriptCore/wscript4
-rw-r--r--JavaScriptCore/wtf/Bitmap.h131
-rw-r--r--JavaScriptCore/wtf/FastMalloc.cpp4
-rw-r--r--JavaScriptCore/wtf/Forward.h12
-rw-r--r--JavaScriptCore/wtf/PageAllocation.cpp197
-rw-r--r--JavaScriptCore/wtf/PageAllocation.h298
-rw-r--r--JavaScriptCore/wtf/PageReservation.h258
-rw-r--r--JavaScriptCore/wtf/Platform.h83
-rw-r--r--JavaScriptCore/wtf/WTFThreadData.cpp2
-rw-r--r--JavaScriptCore/wtf/WTFThreadData.h19
-rw-r--r--JavaScriptCore/wtf/dtoa.cpp1
-rw-r--r--JavaScriptCore/wtf/gobject/GOwnPtr.h2
-rw-r--r--JavaScriptCore/wtf/qt/StringQt.cpp2
-rw-r--r--JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.cpp132
-rw-r--r--JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.h120
-rw-r--r--JavaScriptCore/wtf/text/AtomicString.cpp6
-rw-r--r--JavaScriptCore/wtf/text/AtomicString.h28
-rw-r--r--JavaScriptCore/wtf/text/AtomicStringImpl.h6
-rw-r--r--JavaScriptCore/wtf/text/StringBuffer.h4
-rw-r--r--JavaScriptCore/wtf/text/StringHash.h19
-rw-r--r--JavaScriptCore/wtf/text/StringImpl.cpp13
-rw-r--r--JavaScriptCore/wtf/text/StringImpl.h44
-rw-r--r--JavaScriptCore/wtf/text/StringStatics.cpp2
-rw-r--r--JavaScriptCore/wtf/text/WTFString.cpp13
-rw-r--r--JavaScriptCore/wtf/text/WTFString.h40
-rw-r--r--JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp2
-rw-r--r--JavaScriptCore/yarr/RegexJIT.cpp12
99 files changed, 4512 insertions, 1764 deletions
diff --git a/JavaScriptCore/API/JSValueRef.cpp b/JavaScriptCore/API/JSValueRef.cpp
index a29a207..c22e8d8 100644
--- a/JavaScriptCore/API/JSValueRef.cpp
+++ b/JavaScriptCore/API/JSValueRef.cpp
@@ -213,6 +213,12 @@ JSValueRef JSValueMakeNumber(JSContextRef ctx, double value)
ExecState* exec = toJS(ctx);
APIEntryShim entryShim(exec);
+ // Our JSValue representation relies on a standard bit pattern for NaN. NaNs
+ // generated internally to JavaScriptCore naturally have that representation,
+ // but an external NaN might not.
+ if (isnan(value))
+ value = NaN;
+
return toRef(exec, jsNumber(exec, value));
}
diff --git a/JavaScriptCore/Android.mk b/JavaScriptCore/Android.mk
index a728a4a..959fbc4 100644
--- a/JavaScriptCore/Android.mk
+++ b/JavaScriptCore/Android.mk
@@ -28,6 +28,8 @@ LOCAL_SRC_FILES := \
API/JSCallbackObject.cpp \
API/OpaqueJSString.cpp \
\
+ assembler/ARMv7Assembler.cpp \
+ \
bytecode/CodeBlock.cpp \
bytecode/JumpTable.cpp \
bytecode/Opcode.cpp \
@@ -98,6 +100,8 @@ LOCAL_SRC_FILES := \
runtime/Executable.cpp \
runtime/FunctionConstructor.cpp \
runtime/FunctionPrototype.cpp \
+ runtime/GCActivityCallback.cpp \
+ runtime/GCHandle.cpp \
runtime/GetterSetter.cpp \
runtime/GlobalEvalFunction.cpp \
runtime/Identifier.cpp \
diff --git a/JavaScriptCore/CMakeLists.txt b/JavaScriptCore/CMakeLists.txt
index c33146d..a944363 100644
--- a/JavaScriptCore/CMakeLists.txt
+++ b/JavaScriptCore/CMakeLists.txt
@@ -103,6 +103,8 @@ SET(JavaScriptCore_SOURCES
runtime/Executable.cpp
runtime/FunctionConstructor.cpp
runtime/FunctionPrototype.cpp
+ runtime/GCActivityCallback.cpp
+ runtime/GCHandle.cpp
runtime/GetterSetter.cpp
runtime/GlobalEvalFunction.cpp
runtime/Identifier.cpp
diff --git a/JavaScriptCore/ChangeLog b/JavaScriptCore/ChangeLog
index aafb0aa..c7c09b0 100644
--- a/JavaScriptCore/ChangeLog
+++ b/JavaScriptCore/ChangeLog
@@ -1,3 +1,1289 @@
+2010-08-09 Oliver Hunt <oliver@apple.com>
+
+ Fix Qt/ARM again, this time including the other changed file.
+
+ * jit/JIT.h:
+
+2010-08-09 Oliver Hunt <oliver@apple.com>
+
+ Fix Qt/ARM
+
+ C++ overload resolution I stab at thee
+
+ * jit/JITInlineMethods.h:
+ (JSC::JIT::beginUninterruptedSequence):
+ (JSC::JIT::endUninterruptedSequence):
+
+2010-08-09 Oliver Hunt <oliver@apple.com>
+
+ Reviewed by Gavin Barraclough.
+
+ Allow an assembler/macroassembler to compact branches to more concise forms when linking
+ https://bugs.webkit.org/show_bug.cgi?id=43745
+
+ This patch makes it possible for an assembler to convert jumps into a different
+ (presumably more efficient) form at link time. Currently implemented in the
+ ARMv7 JIT as that already had logic to delay linking of jumps until the end of
+ compilation already. The ARMv7 JIT chooses between either a 4 byte short jump
+ or a full 32-bit offset (and rewrites ITTT instructions as appropriate), so does
+ not yet produce the most compact form possible. The general design of the linker
+ should make it relatively simple to introduce new branch types with little effort,
+ as the linker has no knowledge of the exact form of any of the branches.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * assembler/ARMv7Assembler.cpp: Added.
+ (JSC::):
+ Record jump sizes
+
+ * assembler/ARMv7Assembler.h:
+ (JSC::ARMv7Assembler::LinkRecord::LinkRecord):
+ (JSC::ARMv7Assembler::LinkRecord::from):
+ (JSC::ARMv7Assembler::LinkRecord::setFrom):
+ (JSC::ARMv7Assembler::LinkRecord::to):
+ (JSC::ARMv7Assembler::LinkRecord::type):
+ (JSC::ARMv7Assembler::LinkRecord::linkType):
+ (JSC::ARMv7Assembler::LinkRecord::setLinkType):
+ Encapsulate LinkRecord fields so we can compress the values somewhat
+
+ (JSC::ARMv7Assembler::JmpSrc::JmpSrc):
+ Need to record the jump type now
+
+ (JSC::ARMv7Assembler::b):
+ (JSC::ARMv7Assembler::blx):
+ (JSC::ARMv7Assembler::bx):
+ Need to pass the jump types
+
+ (JSC::ARMv7Assembler::executableOffsetFor):
+ (JSC::ARMv7Assembler::jumpSizeDelta):
+ (JSC::ARMv7Assembler::linkRecordSourceComparator):
+ (JSC::ARMv7Assembler::computeJumpType):
+ (JSC::ARMv7Assembler::convertJumpTo):
+ (JSC::ARMv7Assembler::recordLinkOffsets):
+ (JSC::ARMv7Assembler::jumpsToLink):
+ (JSC::ARMv7Assembler::link):
+ (JSC::ARMv7Assembler::unlinkedCode):
+ Helper functions for the linker
+
+ (JSC::ARMv7Assembler::linkJump):
+ (JSC::ARMv7Assembler::canBeShortJump):
+ (JSC::ARMv7Assembler::linkLongJump):
+ (JSC::ARMv7Assembler::linkShortJump):
+ (JSC::ARMv7Assembler::linkJumpAbsolute):
+ Moving code around for the various jump linking functions
+
+ * assembler/AbstractMacroAssembler.h:
+ (JSC::AbstractMacroAssembler::beginUninterruptedSequence):
+ (JSC::AbstractMacroAssembler::endUninterruptedSequence):
+ We have to track uninterrupted sequences in any assembler that compacts
+ branches as that's not something we're allowed to do in such sequences.
+ AbstractMacroAssembler has a nop version of these functions as it makes the
+ code elsewhere nicer.
+
+ * assembler/LinkBuffer.h:
+ (JSC::LinkBuffer::LinkBuffer):
+ (JSC::LinkBuffer::link):
+ (JSC::LinkBuffer::patch):
+ (JSC::LinkBuffer::locationOf):
+ (JSC::LinkBuffer::locationOfNearCall):
+ (JSC::LinkBuffer::returnAddressOffset):
+ (JSC::LinkBuffer::trampolineAt):
+ Updated these functions to adjust for any changed offsets in the linked code
+
+ (JSC::LinkBuffer::applyOffset):
+ A helper function to deal with the now potentially moved labels
+
+ (JSC::LinkBuffer::linkCode):
+ The new and mighty linker function
+
+ * assembler/MacroAssemblerARMv7.h:
+ (JSC::MacroAssemblerARMv7::MacroAssemblerARMv7):
+ (JSC::MacroAssemblerARMv7::beginUninterruptedSequence):
+ (JSC::MacroAssemblerARMv7::endUninterruptedSequence):
+ (JSC::MacroAssemblerARMv7::jumpsToLink):
+ (JSC::MacroAssemblerARMv7::unlinkedCode):
+ (JSC::MacroAssemblerARMv7::computeJumpType):
+ (JSC::MacroAssemblerARMv7::convertJumpTo):
+ (JSC::MacroAssemblerARMv7::recordLinkOffsets):
+ (JSC::MacroAssemblerARMv7::jumpSizeDelta):
+ (JSC::MacroAssemblerARMv7::link):
+ (JSC::MacroAssemblerARMv7::jump):
+ (JSC::MacroAssemblerARMv7::branchMul32):
+ (JSC::MacroAssemblerARMv7::breakpoint):
+ (JSC::MacroAssemblerARMv7::nearCall):
+ (JSC::MacroAssemblerARMv7::call):
+ (JSC::MacroAssemblerARMv7::ret):
+ (JSC::MacroAssemblerARMv7::tailRecursiveCall):
+ (JSC::MacroAssemblerARMv7::executableOffsetFor):
+ (JSC::MacroAssemblerARMv7::inUninterruptedSequence):
+ (JSC::MacroAssemblerARMv7::makeJump):
+ (JSC::MacroAssemblerARMv7::makeBranch):
+ All branches need to pass on their type now
+
+ * jit/ExecutableAllocator.h:
+ (JSC::ExecutablePool::returnLastBytes):
+ We can't know ahead of time how much space will be necessary to
+ hold the linked code if we're compacting branches, this new
+ function allows us to return the unused bytes at the end of linking
+
+ * jit/JIT.cpp:
+ (JSC::JIT::JIT):
+ (JSC::JIT::privateCompile):
+ * jit/JIT.h:
+ (JSC::JIT::compile):
+ The JIT class now needs to take a linker offset so that recompilation
+ can generate the same jumps when using branch compaction.
+ * jit/JITArithmetic32_64.cpp:
+ (JSC::JIT::emitSlow_op_mod):
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::privateCompileCTIMachineTrampolines):
+ * jit/JITOpcodes32_64.cpp:
+ (JSC::JIT::privateCompileCTIMachineTrampolines):
+ (JSC::JIT::privateCompileCTINativeCall):
+ Update for new trampolineAt changes
+
+ * wtf/FastMalloc.cpp:
+ (WTF::TCMallocStats::):
+ * wtf/Platform.h:
+
+2010-08-09 Gavin Barraclough <barraclough@apple.com>
+
+ Qt build fix III.
+
+ * wtf/text/WTFString.h:
+
+2010-08-09 Gavin Barraclough <barraclough@apple.com>
+
+ Qt build fix.
+
+ * wtf/qt/StringQt.cpp:
+
+2010-08-06 Gavin Barraclough <barraclough@apple.com>
+
+ Rubber stamped by Sam Weinig
+
+ Bug 43594 - Add string forwards to Forward.h
+ This allows us to remove forward declarations for these classes from
+ WebCore/WebKit (a step in moving these class from WebCore:: to WTF::).
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * wtf/Forward.h:
+
+2010-08-07 Sheriff Bot <webkit.review.bot@gmail.com>
+
+ Unreviewed, rolling out r64938.
+ http://trac.webkit.org/changeset/64938
+ https://bugs.webkit.org/show_bug.cgi?id=43685
+
+ Did not compile on several ports (Requested by abarth on
+ #webkit).
+
+ * Android.mk:
+ * CMakeLists.txt:
+ * GNUmakefile.am:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.pro:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * assembler/AbstractMacroAssembler.h:
+ * assembler/MacroAssembler.h:
+ * assembler/MacroAssemblerX86.h:
+ (JSC::MacroAssemblerX86::load32):
+ (JSC::MacroAssemblerX86::store32):
+ * assembler/X86Assembler.h:
+ (JSC::X86Assembler::movl_rm):
+ (JSC::X86Assembler::movl_mr):
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::markAggregate):
+ * bytecode/Instruction.h:
+ (JSC::PolymorphicAccessStructureList::PolymorphicStubInfo::):
+ (JSC::PolymorphicAccessStructureList::PolymorphicStubInfo::set):
+ (JSC::PolymorphicAccessStructureList::PolymorphicAccessStructureList):
+ * bytecode/StructureStubInfo.cpp:
+ (JSC::StructureStubInfo::deref):
+ * bytecode/StructureStubInfo.h:
+ (JSC::StructureStubInfo::initGetByIdProto):
+ (JSC::StructureStubInfo::initGetByIdChain):
+ (JSC::StructureStubInfo::):
+ * jit/JIT.h:
+ * jit/JITMarkObjects.cpp: Removed.
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::compileGetDirectOffset):
+ (JSC::JIT::testPrototype):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::privateCompileGetByIdProto):
+ (JSC::JIT::privateCompileGetByIdProtoList):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::compileGetDirectOffset):
+ (JSC::JIT::testPrototype):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::privateCompileGetByIdProto):
+ (JSC::JIT::privateCompileGetByIdProtoList):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+ * jit/JITStubs.cpp:
+ (JSC::setupPolymorphicProtoList):
+ * wtf/Platform.h:
+
+2010-08-07 Nathan Lawrence <nlawrence@apple.com>
+
+ Reviewed by Geoffrey Garen.
+
+ The JIT code contains a number of direct references to GC'd objects.
+ When we have movable objects, these references will need to be
+ updated.
+
+ * Android.mk:
+ * CMakeLists.txt:
+ * GNUmakefile.am:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.pro:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * assembler/AbstractMacroAssembler.h:
+ (JSC::AbstractMacroAssembler::int32AtLocation):
+ (JSC::AbstractMacroAssembler::pointerAtLocation):
+ (JSC::AbstractMacroAssembler::jumpTarget):
+ * assembler/MacroAssembler.h:
+ (JSC::MacroAssembler::loadPtrWithPatch):
+ Normally, loadPtr will optimize when the register is eax. Since
+ the slightly smaller instruction changes the offsets, it messes up
+ our ability to repatch the code. We added this new instruction
+ that garuntees a constant size.
+ * assembler/MacroAssemblerX86.h:
+ (JSC::MacroAssemblerX86::load32WithPatch):
+ Changed load32 in the same way described above.
+ (JSC::MacroAssemblerX86::load32):
+ Moved the logic to optimize laod32 from movl_mr to load32
+ (JSC::MacroAssemblerX86::store32):
+ Moved the logic to optimize store32 from movl_rm to store32
+ * assembler/X86Assembler.h:
+ (JSC::X86Assembler::movl_rm):
+ (JSC::X86Assembler::movl_mr):
+ (JSC::X86Assembler::int32AtLocation):
+ (JSC::X86Assembler::pointerAtLocation):
+ (JSC::X86Assembler::jumpTarget):
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::markAggregate):
+ * bytecode/Instruction.h:
+ As described in StructureStubInfo.h, we needed to add additional
+ fields to both StructureStubInfo and
+ PolymorphicAccessStructureList so that we can determine the
+ structure of the JITed code at patch time.
+ (JSC::PolymorphicAccessStructureList::PolymorphicStubInfo::set):
+ (JSC::PolymorphicAccessStructureList::PolymorphicAccessStructureList):
+ * bytecode/StructureStubInfo.cpp:
+ (JSC::StructureStubInfo::markAggregate):
+ Added this function to mark the JITed code that correosponds to
+ this structure stub info.
+ * bytecode/StructureStubInfo.h:
+ (JSC::StructureStubInfo::initGetByIdProto):
+ (JSC::StructureStubInfo::initGetByIdChain):
+ (JSC::StructureStubInfo::):
+ * jit/JIT.h:
+ * jit/JITMarkObjects.cpp: Added.
+ (JSC::JIT::patchPrototypeStructureAddress):
+ (JSC::JIT::patchGetDirectOffset):
+ (JSC::JIT::markGetByIdProto):
+ (JSC::JIT::markGetByIdChain):
+ (JSC::JIT::markGetByIdProtoList):
+ (JSC::JIT::markPutByIdTransition):
+ (JSC::JIT::markGlobalObjectReference):
+ * jit/JITPropertyAccess.cpp:
+ Added asserts for the patch offsets.
+ (JSC::JIT::compileGetDirectOffset):
+ (JSC::JIT::testPrototype):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::privateCompileGetByIdProto):
+ (JSC::JIT::privateCompileGetByIdProtoList):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::compileGetDirectOffset):
+ (JSC::JIT::testPrototype):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::privateCompileGetByIdProto):
+ (JSC::JIT::privateCompileGetByIdProtoList):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+ * jit/JITStubs.cpp:
+ (JSC::setupPolymorphicProtoList):
+ * wtf/Platform.h:
+ Added ENABLE_MOVABLE_GC_OBJECTS flag
+
+2010-08-07 Michael Saboff <msaboff@apple.com>
+
+ Reviewed by Geoffrey Garen.
+
+ Revert JSArray to point to the beginning of the contained ArrayStorage
+ struct. This is described in
+ https://bugs.webkit.org/show_bug.cgi?id=43526.
+
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emit_op_get_by_val):
+ (JSC::JIT::emit_op_put_by_val):
+ (JSC::JIT::privateCompilePatchGetArrayLength):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::emit_op_get_by_val):
+ (JSC::JIT::emit_op_put_by_val):
+ (JSC::JIT::privateCompilePatchGetArrayLength):
+ * runtime/JSArray.cpp:
+ (JSC::JSArray::JSArray):
+ (JSC::JSArray::~JSArray):
+ (JSC::JSArray::getOwnPropertySlot):
+ (JSC::JSArray::getOwnPropertyDescriptor):
+ (JSC::JSArray::put):
+ (JSC::JSArray::putSlowCase):
+ (JSC::JSArray::deleteProperty):
+ (JSC::JSArray::getOwnPropertyNames):
+ (JSC::JSArray::getNewVectorLength):
+ (JSC::JSArray::increaseVectorLength):
+ (JSC::JSArray::increaseVectorPrefixLength):
+ (JSC::JSArray::setLength):
+ (JSC::JSArray::pop):
+ (JSC::JSArray::push):
+ (JSC::JSArray::shiftCount):
+ (JSC::JSArray::unshiftCount):
+ (JSC::JSArray::sortNumeric):
+ (JSC::JSArray::sort):
+ (JSC::JSArray::fillArgList):
+ (JSC::JSArray::copyToRegisters):
+ (JSC::JSArray::compactForSorting):
+ (JSC::JSArray::subclassData):
+ (JSC::JSArray::setSubclassData):
+ (JSC::JSArray::checkConsistency):
+ * runtime/JSArray.h:
+ (JSC::JSArray::length):
+ (JSC::JSArray::canGetIndex):
+ (JSC::JSArray::getIndex):
+ (JSC::JSArray::setIndex):
+ (JSC::JSArray::uncheckedSetIndex):
+ (JSC::JSArray::markChildrenDirect):
+
+2010-08-07 Kwang Yul Seo <skyul@company100.net>
+
+ Reviewed by Eric Seidel.
+
+ Add ENABLE(YARR) guard around JSGlobalData::m_regexAllocator
+ https://bugs.webkit.org/show_bug.cgi?id=43399
+
+ m_regexAllocator is used only by RegExp::compile which is guarded with ENABLE(YARR).
+
+ * runtime/JSGlobalData.h:
+
+2010-08-07 Patrick Roland Gansterer <paroga@paroga.com>
+
+ Reviewed by Eric Seidel.
+
+ [Qt] Enable JIT on WinCE
+ https://bugs.webkit.org/show_bug.cgi?id=43303
+
+ Add ExtraCompiler for generating GeneratedJITStubs_MSVC.asm.
+
+ * DerivedSources.pro:
+
+2010-08-07 Dan Bernstein <mitz@apple.com>
+
+ Reviewed by Anders Carlsson.
+
+ Created a separate SimpleFontData constructor exclusively for SVG fonts and moved the CTFontRef
+ from SimpleFontData to FontPlatformData.
+ https://bugs.webkit.org/show_bug.cgi?id=43674
+
+ * wtf/Platform.h: Moved definitions of WTF_USE_CORE_TEXT and WTF_USE_ATSUI here from WebCore/config.h.
+
+2010-08-07 Zoltan Herczeg <zherczeg@webkit.org>
+
+ Reviewed by Eric Seidel.
+
+ Bitmap.h has no default constructor
+ https://bugs.webkit.org/show_bug.cgi?id=43619
+
+ Without a constructor, the initial bits of the Bitmap class
+ are undefinied. If only a few, or zero bits are 0, the memory
+ area provided by AlignedMemoryAllocator can be easly exhausted.
+
+ Csaba Osztrogonác helped to find this bug.
+
+ * wtf/Bitmap.h:
+ (WTF::::Bitmap):
+
+2010-08-06 Rafael Antognolli <antognolli@profusion.mobi>
+
+ [EFL] Build fix.
+
+ * CMakeLists.txt: add runtime/CGHandle.cpp.
+
+2010-08-06 Jessie Berlin <jberlin@apple.com>
+
+ Roll out http://trac.webkit.org/changeset/64801, which broke the Safari Windows Build.
+ Unreviewed.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * wtf/Forward.h:
+
+2010-08-06 Jessie Berlin <jberlin@apple.com>
+
+ Windows Build Fix (continued). Unreviewed.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2010-08-06 Jessie Berlin <jberlin@apple.com>
+
+ Windows Build Fix. Unreviewed.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ Add GCHandle.h and GCHandle.cpp.
+
+2010-08-06 Nathan Lawrence <nlawrence@apple.com>
+
+ Reviewed by Geoffrey Garen.
+
+ https://bugs.webkit.org/show_bug.cgi?id=43207
+
+ WeakGCPtr's should instead of directly pointing to the GC'd object
+ should be directed to an array of pointers that can be updated for
+ movable objects.
+
+ * Android.mk:
+ * GNUmakefile.am:
+ * JavaScriptCore.exp:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.pro:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * runtime/Collector.cpp:
+ (JSC::Heap::destroy):
+ (JSC::Heap::allocateBlock):
+ (JSC::Heap::freeBlock):
+ (JSC::Heap::updateWeakGCHandles):
+ (JSC::WeakGCHandlePool::update):
+ (JSC::Heap::addWeakGCHandle):
+ (JSC::Heap::markRoots):
+ * runtime/Collector.h:
+ (JSC::Heap::weakGCHandlePool):
+ * runtime/GCHandle.cpp: Added.
+ (JSC::WeakGCHandle::pool):
+ (JSC::WeakGCHandlePool::WeakGCHandlePool):
+ (JSC::WeakGCHandlePool::allocate):
+ (JSC::WeakGCHandlePool::free):
+ (JSC::WeakGCHandlePool::operator new):
+ * runtime/GCHandle.h: Added.
+ (JSC::WeakGCHandle::isValidPtr):
+ (JSC::WeakGCHandle::isPtr):
+ (JSC::WeakGCHandle::isNext):
+ (JSC::WeakGCHandle::invalidate):
+ (JSC::WeakGCHandle::get):
+ (JSC::WeakGCHandle::set):
+ (JSC::WeakGCHandle::getNextInFreeList):
+ (JSC::WeakGCHandle::setNextInFreeList):
+ (JSC::WeakGCHandlePool::isFull):
+ * runtime/WeakGCPtr.h:
+ (JSC::WeakGCPtr::WeakGCPtr):
+ (JSC::WeakGCPtr::~WeakGCPtr):
+ (JSC::WeakGCPtr::get):
+ (JSC::WeakGCPtr::clear):
+ (JSC::WeakGCPtr::assign):
+ (JSC::get):
+
+2010-08-06 Tor Arne Vestbø <tor.arne.vestbo@nokia.com>
+
+ Reviewed by Antonio Gomes.
+
+ [Qt] Fix warnings about difference in symbol visiblity on Mac OS X
+
+ * jsc.pro:
+
+2010-08-06 Zoltan Herczeg <zherczeg@webkit.org>
+
+ Reviewed by Darin Adler.
+
+ Refactor identifier parsing in lexer
+ https://bugs.webkit.org/show_bug.cgi?id=41845
+
+ The code is refactored to avoid gotos. The new code
+ has the same performance as the old one.
+
+ SunSpider --parse-only: no change (from 34.0ms to 33.6ms)
+ SunSpider: no change (from 523.2ms to 523.5ms)
+
+ * parser/Lexer.cpp:
+ (JSC::Lexer::parseIdent):
+ (JSC::Lexer::lex):
+ * parser/Lexer.h:
+
+2010-08-06 Gabor Loki <loki@webkit.org>
+
+ Reviewed by Gavin Barraclough.
+
+ The ARM JIT does not support JSValue32_64 with RVCT
+ https://bugs.webkit.org/show_bug.cgi?id=43411
+
+ JSValue32_64 is enabled for RVCT by default.
+
+ * create_jit_stubs:
+ * jit/JITStubs.cpp:
+ (JSC::ctiTrampoline):
+ (JSC::ctiVMThrowTrampoline):
+ (JSC::ctiOpThrowNotCaught):
+ * wtf/Platform.h:
+
+2010-08-05 Chao-ying Fu <fu@mips.com>
+
+ Reviewed by Darin Adler.
+
+ Define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER for MIPS
+ https://bugs.webkit.org/show_bug.cgi?id=43514
+
+ MIPS needs to define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER, so that
+ RenderArena::allocate() can return 8-byte aligned memory to avoid
+ exceptions on sdc1/ldc1.
+
+ * wtf/Platform.h:
+
+2010-08-05 Gavin Barraclough <barraclough@apple.com>
+
+ Rubber stamped by Sam Weinig
+
+ Bug 43594 - Add string forwards to Forward.h
+ This allows us to remove forward declarations for these classes from
+ WebCore/WebKit (a step in moving these class from WebCore:: to WTF::).
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * wtf/Forward.h:
+
+2010-08-05 Geoffrey Garen <ggaren@apple.com>
+
+ Reviewed by Mark Rowe.
+
+ Fixed leak seen on buildbot.
+
+ * runtime/GCActivityCallbackCF.cpp:
+ (JSC::DefaultGCActivityCallback::DefaultGCActivityCallback):
+ (JSC::DefaultGCActivityCallback::~DefaultGCActivityCallback):
+ (JSC::DefaultGCActivityCallback::operator()): Make out timer a RetainPtr,
+ since anything less would be uncivilized.
+
+2010-08-05 Andy Estes <aestes@apple.com>
+
+ Reviewed by David Kilzer.
+
+ Rename iOS-related OS and PLATFORM macros.
+ https://bugs.webkit.org/show_bug.cgi?id=43493
+
+ Rename WTF_OS_IPHONE_OS to WTF_OS_IOS, WTF_PLATFORM_IPHONE to
+ WTF_PLATFORM_IOS, and WTF_PLATFORM_IPHONE_SIMULATOR to
+ WTF_PLATFORM_IOS_SIMULATOR.
+
+ * jit/ExecutableAllocator.h:
+ * jit/JITStubs.cpp:
+ * profiler/ProfilerServer.mm:
+ (-[ProfilerServer init]):
+ * wtf/FastMalloc.cpp:
+ (WTF::TCMallocStats::):
+ * wtf/Platform.h:
+ * wtf/unicode/icu/CollatorICU.cpp:
+ (WTF::Collator::userDefault):
+
+2010-08-05 Nathan Lawrence <nlawrence@apple.com>
+
+ Reviewed by Darin Adler.
+
+ https://bugs.webkit.org/show_bug.cgi?id=43464
+
+ Currently, the global object is being embedded in the JavaScriptCore
+ bytecode, however since the global object is the same for all opcodes
+ in a code block, we can have the global object just be a member of the
+ associated code block.
+
+ Additionally, I added an assert inside of emitOpcode that verifies
+ that the last generated opcode was of the correct length.
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::CodeBlock):
+ (JSC::CodeBlock::derefStructures):
+ (JSC::CodeBlock::markAggregate):
+ * bytecode/CodeBlock.h:
+ (JSC::CodeBlock::globalObject):
+ (JSC::GlobalCodeBlock::GlobalCodeBlock):
+ (JSC::ProgramCodeBlock::ProgramCodeBlock):
+ (JSC::EvalCodeBlock::EvalCodeBlock):
+ (JSC::FunctionCodeBlock::FunctionCodeBlock):
+ * bytecode/Opcode.h:
+ (JSC::opcodeLength):
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::BytecodeGenerator):
+ (JSC::BytecodeGenerator::emitOpcode):
+ Added an assert to check that the last generated opcode is the
+ correct length.
+ (JSC::BytecodeGenerator::rewindBinaryOp):
+ Changed the last opcode to op_end since the length will no longer
+ be correct.
+ (JSC::BytecodeGenerator::rewindUnaryOp):
+ Changed the last opcode to op_end since the length will no longer
+ be correct.
+ (JSC::BytecodeGenerator::emitResolve):
+ (JSC::BytecodeGenerator::emitGetScopedVar):
+ (JSC::BytecodeGenerator::emitPutScopedVar):
+ (JSC::BytecodeGenerator::emitResolveWithBase):
+ * bytecompiler/BytecodeGenerator.h:
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::resolveGlobal):
+ (JSC::Interpreter::resolveGlobalDynamic):
+ (JSC::Interpreter::privateExecute):
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::emit_op_get_global_var):
+ (JSC::JIT::emit_op_put_global_var):
+ (JSC::JIT::emit_op_resolve_global):
+ (JSC::JIT::emitSlow_op_resolve_global):
+ (JSC::JIT::emit_op_resolve_global_dynamic):
+ (JSC::JIT::emitSlow_op_resolve_global_dynamic):
+ * jit/JITOpcodes32_64.cpp:
+ (JSC::JIT::emit_op_get_global_var):
+ (JSC::JIT::emit_op_put_global_var):
+ (JSC::JIT::emit_op_resolve_global):
+ (JSC::JIT::emitSlow_op_resolve_global):
+ * jit/JITStubs.cpp:
+ (JSC::cti_op_resolve_global):
+ * runtime/Executable.cpp:
+ (JSC::FunctionExecutable::compileForCallInternal):
+ (JSC::FunctionExecutable::compileForConstructInternal):
+ (JSC::FunctionExecutable::reparseExceptionInfo):
+
+2010-08-05 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Sam Weinig.
+
+ Bug 43185 - Switch RegisterFile over to use PageAllocation
+
+ Remove platform-specific memory allocation code.
+
+ * interpreter/RegisterFile.cpp:
+ (JSC::RegisterFile::~RegisterFile):
+ (JSC::RegisterFile::releaseExcessCapacity):
+ * interpreter/RegisterFile.h:
+ (JSC::RegisterFile::RegisterFile):
+ (JSC::RegisterFile::grow):
+ (JSC::RegisterFile::checkAllocatedOkay):
+ * wtf/PageAllocation.cpp:
+ (WTF::PageAllocation::lastError):
+ * wtf/PageAllocation.h:
+ (WTF::PageAllocation::allocate):
+ (WTF::PageAllocation::allocateAt):
+ (WTF::PageAllocation::allocateAligned):
+ (WTF::PageAllocation::pageSize):
+ (WTF::PageAllocation::isPageAligned):
+ (WTF::PageAllocation::isPowerOfTwo):
+ * wtf/PageReservation.h:
+ (WTF::PageReservation::commit):
+ (WTF::PageReservation::decommit):
+ (WTF::PageReservation::reserve):
+ (WTF::PageReservation::reserveAt):
+
+2010-08-05 Michael Saboff <msaboff@apple.com>
+
+ Reviewed by Darin Adler.
+
+ Fixed https://bugs.webkit.org/show_bug.cgi?id=43401 where array
+ content aren't properly initialized as part of unshift.
+
+ * runtime/JSArray.cpp:
+ (JSC::JSArray::unshiftCount):
+
+2010-08-05 Jian Li <jianli@chromium.org>
+
+ Reviewed by David Levin.
+
+ Unify blob related feature defines to ENABLE(BLOB).
+ https://bugs.webkit.org/show_bug.cgi?id=43081
+
+ * Configurations/FeatureDefines.xcconfig:
+
+2010-08-05 Gustavo Noronha Silva <gustavo.noronha@collabora.co.uk>
+
+ Rubber-stamped by Xan Lopez.
+
+ Remove GHashTable left-overs. GHashTable is ref-counted, and is
+ correctly supported by GRefPtr.
+
+ * wtf/gobject/GOwnPtr.h:
+
+2010-08-05 Gustavo Noronha Silva <gustavo.noronha@collabora.co.uk>
+
+ Unreviewed.
+
+ Typo fix that makes distcheck happy.
+
+ * GNUmakefile.am:
+
+2010-08-03 Geoffrey Garen <ggaren@apple.com>
+
+ Reviewed by Oliver Hunt and Beth Dakin.
+
+ https://bugs.webkit.org/show_bug.cgi?id=43461
+ Invalid NaN parsing
+
+ * wtf/dtoa.cpp: Turn off the dtoa feature that allows you to specify a
+ non-standard NaN representation, since our NaN encoding assumes that all
+ true NaNs have the standard bit pattern.
+
+ * API/JSValueRef.cpp:
+ (JSValueMakeNumber): Don't allow an API client to accidentally specify
+ a non-standard NaN either.
+
+2010-08-04 Gavin Barraclough <barraclough@apple.com>
+
+ Windows build fix part II.
+
+ * wtf/PageReservation.h:
+ (WTF::PageReservation::systemReserve):
+
+2010-08-04 Gavin Barraclough <barraclough@apple.com>
+
+ Windows build fix.
+
+ * wtf/PageReservation.h:
+ (WTF::PageReservation::systemReserve):
+
+2010-08-04 Gavin Barraclough <barraclough@apple.com>
+
+ Build fix - add new header to !Mac projects.
+
+ * GNUmakefile.am:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.vcproj/WTF/WTF.vcproj:
+
+2010-08-04 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Sam Weinig.
+
+ Bug 43515 - Fix small design issues with PageAllocation, split out PageReservation.
+
+ The PageAllocation class has a number of issues:
+ * Changes in bug #43269 accidentally switched SYMBIAN over to use malloc/free to allocate
+ blocks of memory for the GC heap, instead of allocating RChunks. Revert this change in
+ behaviour.
+ * In order for PageAllocation to work correctly on WinCE we should be decommitting memory
+ before deallocating. In order to simplify understanding the expected state at deallocate,
+ split behaviour out into PageAllocation and PageReservation classes. Require that all
+ memory be decommitted before calling deallocate on a PageReservation, add asserts to
+ enforce this.
+ * add many missing asserts.
+ * inline more functions.
+ * remove ability to create sub-PageAllocations from an existing PageAllocations object -
+ this presented an interface that would allow sub regions to be deallocated, which would
+ not have provided expected behaviour.
+ * remove writable/executable arguments to commit, this value can be cached at the point
+ the memory is reserved.
+ * remove writable/executable arguments to allocateAligned, protection other than RW is not
+ supported.
+ * add missing checks for overflow & failed allocation to mmap path through allocateAligned.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * jit/ExecutableAllocator.cpp:
+ (JSC::ExecutableAllocator::intializePageSize):
+ * jit/ExecutableAllocator.h:
+ (JSC::ExecutablePool::Allocation::Allocation):
+ (JSC::ExecutablePool::Allocation::base):
+ (JSC::ExecutablePool::Allocation::size):
+ (JSC::ExecutablePool::Allocation::operator!):
+ * jit/ExecutableAllocatorFixedVMPool.cpp:
+ (JSC::FixedVMPoolAllocator::reuse):
+ (JSC::FixedVMPoolAllocator::coalesceFreeSpace):
+ (JSC::FixedVMPoolAllocator::FixedVMPoolAllocator):
+ (JSC::FixedVMPoolAllocator::alloc):
+ (JSC::FixedVMPoolAllocator::free):
+ (JSC::FixedVMPoolAllocator::allocInternal):
+ * runtime/AlignedMemoryAllocator.h:
+ (JSC::::allocate):
+ (JSC::::AlignedMemoryAllocator):
+ * runtime/Collector.cpp:
+ (JSC::Heap::allocateBlock):
+ * runtime/Collector.h:
+ * wtf/PageAllocation.cpp:
+ * wtf/PageAllocation.h:
+ (WTF::PageAllocation::operator!):
+ (WTF::PageAllocation::allocate):
+ (WTF::PageAllocation::allocateAt):
+ (WTF::PageAllocation::allocateAligned):
+ (WTF::PageAllocation::deallocate):
+ (WTF::PageAllocation::pageSize):
+ (WTF::PageAllocation::systemAllocate):
+ (WTF::PageAllocation::systemAllocateAt):
+ (WTF::PageAllocation::systemAllocateAligned):
+ (WTF::PageAllocation::systemDeallocate):
+ (WTF::PageAllocation::systemPageSize):
+ * wtf/PageReservation.h: Copied from JavaScriptCore/wtf/PageAllocation.h.
+ (WTF::PageReservation::PageReservation):
+ (WTF::PageReservation::commit):
+ (WTF::PageReservation::decommit):
+ (WTF::PageReservation::reserve):
+ (WTF::PageReservation::reserveAt):
+ (WTF::PageReservation::deallocate):
+ (WTF::PageReservation::systemCommit):
+ (WTF::PageReservation::systemDecommit):
+ (WTF::PageReservation::systemReserve):
+ (WTF::PageReservation::systemReserveAt):
+ * wtf/Platform.h:
+
+2010-08-04 Sheriff Bot <webkit.review.bot@gmail.com>
+
+ Unreviewed, rolling out r64655.
+ http://trac.webkit.org/changeset/64655
+ https://bugs.webkit.org/show_bug.cgi?id=43496
+
+ JavaScriptCore references patch seems to have caused
+ regressions in QT and GTK builds (Requested by nlawrence on
+ #webkit).
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::markAggregate):
+ * runtime/Collector.cpp:
+ (JSC::Heap::markConservatively):
+ * runtime/JSCell.h:
+ (JSC::JSValue::asCell):
+ (JSC::MarkStack::append):
+ * runtime/JSGlobalObject.cpp:
+ (JSC::markIfNeeded):
+ * runtime/JSONObject.cpp:
+ (JSC::Stringifier::Holder::object):
+ * runtime/JSObject.h:
+ (JSC::JSObject::prototype):
+ * runtime/JSStaticScopeObject.cpp:
+ (JSC::JSStaticScopeObject::markChildren):
+ * runtime/JSValue.h:
+ (JSC::JSValue::):
+ (JSC::JSValue::JSValue):
+ (JSC::JSValue::asCell):
+ * runtime/MarkStack.h:
+ * runtime/NativeErrorConstructor.cpp:
+ * runtime/NativeErrorConstructor.h:
+ * runtime/Structure.h:
+ (JSC::Structure::storedPrototype):
+
+2010-08-04 Gavin Barraclough <barraclough@apple.com>
+
+ Rubber stamped by Sam Weinig.
+
+ Enable JSVALUE64 for CPU(PPC64).
+ Basic browsing seems to work.
+
+ * wtf/Platform.h:
+
+2010-08-04 Nathan Lawrence <nlawrence@apple.com>
+
+ Reviewed by Darin Adler.
+
+ Refactoring MarkStack::append to take a reference. This is in
+ preparation for movable objects when we will need to update pointers.
+ http://bugs.webkit.org/show_bug.cgi?id=41177
+
+ Unless otherwise noted, all changes are to either return by reference
+ or pass a reference to MarkStack::append.
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::markAggregate):
+ * runtime/Collector.cpp:
+ (JSC::Heap::markConservatively):
+ Added a temporary variable to prevent marking from changing an
+ unknown value on the stack
+ * runtime/JSCell.h:
+ (JSC::JSValue::asCell):
+ (JSC::MarkStack::append):
+ (JSC::MarkStack::appendInternal):
+ * runtime/JSGlobalObject.cpp:
+ (JSC::markIfNeeded):
+ * runtime/JSONObject.cpp:
+ (JSC::Stringifier::Holder::object):
+ * runtime/JSObject.h:
+ (JSC::JSObject::prototype):
+ * runtime/JSStaticScopeObject.cpp:
+ (JSC::JSStaticScopeObject::markChildren):
+ * runtime/JSValue.h:
+ (JSC::JSValue::JSValue):
+ (JSC::JSValue::asCell):
+ * runtime/MarkStack.h:
+ * runtime/NativeErrorConstructor.cpp:
+ (JSC::NativeErrorConstructor::createStructure):
+ Changed the structure flags to include a custom markChildren.
+ (JSC::NativeErrorConstructor::markChildren):
+ Update the prototype of the stored structure.
+ * runtime/NativeErrorConstructor.h:
+ Added structure flags.
+ * runtime/Structure.h:
+ (JSC::Structure::storedPrototype):
+
+2010-08-03 Nathan Lawrence <nlawrence@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ Tightened up some get_by_id_chain* code generation
+ https://bugs.webkit.org/show_bug.cgi?id=40935
+
+ This is in the style of
+ https://bugs.webkit.org/show_bug.cgi?id=30539, and changed code to
+ call accessor functions when it was not necessary to directly access
+ the private variables.
+
+ * jit/JIT.h:
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::compileGetDirectOffset):
+ (JSC::JIT::testPrototype):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::testPrototype):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+
+2010-08-03 Adam Roben <aroben@apple.com>
+
+ Turn on PLATFORM_STRATEGIES on Windows
+
+ Fixes <http://webkit.org/b/43431>.
+
+ Reviewed by Anders Carlsson.
+
+ * wtf/Platform.h:
+
+2010-08-04 Gabor Loki <loki@webkit.org>
+
+ Reviewed by Geoffrey Garen.
+
+ Enable JSValue32_64 for GCC on ARM by default
+ https://bugs.webkit.org/show_bug.cgi?id=43410
+
+ * wtf/Platform.h:
+
+2010-08-03 Gavin Barraclough <barraclough@apple.com>
+
+ Speculative windows build fix.
+
+ * wtf/Bitmap.h:
+
+2010-08-03 Gavin Barraclough <barraclough@apple.com>
+
+ Build fix following r64624.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * wtf/PageAllocation.h:
+
+2010-08-03 Nathan Lawrence <nlawrence@apple.com>
+
+ Reviewed by Gavin Barraclough.
+
+ https://bugs.webkit.org/show_bug.cgi?id=43269
+
+ Added new allocateAligned methods to PageAllocation. In order to
+ prevent a regress in performance, the function needs to be inlined.
+
+ Additionally, I ported the symbian block allocator to use
+ PageAllocation and added a new WTF::Bitmap class to support this.
+
+ * GNUmakefile.am:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * runtime/AlignedMemoryAllocator.h: Added.
+ (JSC::AlignedMemory::deallocate):
+ (JSC::AlignedMemory::base):
+ (JSC::AlignedMemory::AlignedMemory):
+ (JSC::AlignedMemoryAllocator::destroy):
+ (JSC::AlignedMemoryAllocator::allocate):
+ (JSC::AlignedMemoryAllocator::AlignedMemoryAllocator):
+ (JSC::AlignedMemoryAllocator::~AlignedMemoryAllocator):
+ (JSC::AlignedMemoryAllocator::free):
+ * runtime/Collector.cpp:
+ (JSC::Heap::Heap):
+ (JSC::Heap::destroy):
+ (JSC::Heap::allocateBlock):
+ (JSC::Heap::freeBlock):
+ (JSC::Heap::freeBlocks):
+ (JSC::Heap::allocate):
+ (JSC::Heap::shrinkBlocks):
+ (JSC::Heap::markConservatively):
+ (JSC::Heap::clearMarkBits):
+ (JSC::Heap::markedCells):
+ * runtime/Collector.h:
+ (JSC::CollectorHeap::collectorBlock):
+ * runtime/CollectorHeapIterator.h:
+ (JSC::CollectorHeapIterator::operator*):
+ (JSC::LiveObjectIterator::operator++):
+ (JSC::DeadObjectIterator::operator++):
+ * wtf/Bitmap.h: Added.
+ (WTF::Bitmap::get):
+ (WTF::Bitmap::set):
+ (WTF::Bitmap::clear):
+ (WTF::Bitmap::clearAll):
+ (WTF::Bitmap::advanceToNextFreeBit):
+ (WTF::Bitmap::count):
+ (WTF::Bitmap::isEmpty):
+ (WTF::Bitmap::isFull):
+ * wtf/PageAllocation.h:
+ (WTF::PageAllocation::operator UnspecifiedBoolType):
+ (WTF::PageAllocation::allocateAligned):
+ (WTF::PageAllocation::reserveAligned):
+ * wtf/Platform.h:
+ * wtf/symbian: Removed.
+ * wtf/symbian/BlockAllocatorSymbian.cpp: Removed.
+ * wtf/symbian/BlockAllocatorSymbian.h: Removed.
+
+2010-08-03 Michael Saboff <msaboff@apple.com>
+
+ Reviewed by Gavin Barraclough.
+
+ Fix for https://bugs.webkit.org/show_bug.cgi?id=43314. The prior code
+ was using the wrong "length" value to move array contents when adding
+ space to the beginning of an array for an unshift() or similar
+ operation. Instead of using m_vectorLength, the length of the
+ allocated JSValue array, the code was using m_length, the declared
+ length of the array. These two values do not need to match.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * runtime/JSArray.cpp:
+ (JSC::JSArray::increaseVectorPrefixLength):
+
+2010-08-03 Chao-ying Fu <fu@mips.com>
+
+ Reviewed by Gavin Barraclough.
+
+ Fix following https://bugs.webkit.org/show_bug.cgi?id=43089
+ (accidentally inverted a compiler version check).
+
+ * jit/ExecutableAllocator.h:
+ (JSC::ExecutableAllocator::cacheFlush):
+
+2010-08-03 Patrick Gansterer <paroga@paroga.com>
+
+ Reviewed by Gavin Barraclough.
+
+ Implement DEFINE_STUB_FUNCTION for WinCE.
+ https://bugs.webkit.org/show_bug.cgi?id=34953
+
+ * jit/JITStubs.cpp:
+ (JSC::):
+ (JSC::DEFINE_STUB_FUNCTION):
+
+2010-08-02 Gavin Barraclough <barraclough@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ Bug 43390 - Do not CRASH if we run out of room for jit code.
+
+ Change the ExecutableAllocator implementations not to crash, and to return 0 if memory cannot be allocated.
+ The assemblers should pass this through without trying to use it in executableCopy.
+ Change the LinkBuffer to handle this, and to provide an allocationSuccessful() method to test for this.
+
+ Change the JIT to throw an exception if allocation fails.
+ Make JIT optimizations fail gracefully if memory cannot be allocated (use non-optimized path).
+ Change YARR JIT to fallback to PCRE
+
+ * assembler/ARMAssembler.cpp:
+ (JSC::ARMAssembler::executableCopy):
+ * assembler/ARMv7Assembler.h:
+ (JSC::ARMv7Assembler::executableCopy):
+ * assembler/LinkBuffer.h:
+ (JSC::LinkBuffer::allocationSuccessful):
+ * assembler/MIPSAssembler.h:
+ (JSC::MIPSAssembler::executableCopy):
+ * assembler/X86Assembler.h:
+ (JSC::X86Assembler::executableCopy):
+ * bytecode/StructureStubInfo.h:
+ (JSC::StructureStubInfo::initGetByIdProto):
+ (JSC::StructureStubInfo::initGetByIdChain):
+ (JSC::StructureStubInfo::initGetByIdSelfList):
+ (JSC::StructureStubInfo::initGetByIdProtoList):
+ (JSC::StructureStubInfo::initPutByIdTransition):
+ * jit/ExecutableAllocator.cpp:
+ (JSC::ExecutablePool::systemAlloc):
+ * jit/ExecutableAllocatorFixedVMPool.cpp:
+ (JSC::FixedVMPoolAllocator::allocInternal):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompile):
+ * jit/JIT.h:
+ (JSC::JIT::compileGetByIdProto):
+ (JSC::JIT::compileGetByIdSelfList):
+ (JSC::JIT::compileGetByIdProtoList):
+ (JSC::JIT::compileGetByIdChainList):
+ (JSC::JIT::compileGetByIdChain):
+ (JSC::JIT::compilePutByIdTransition):
+ (JSC::JIT::compilePatchGetArrayLength):
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::privateCompileCTIMachineTrampolines):
+ * jit/JITOpcodes32_64.cpp:
+ (JSC::JIT::privateCompileCTIMachineTrampolines):
+ (JSC::JIT::privateCompileCTINativeCall):
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::stringGetByValStubGenerator):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::privateCompilePatchGetArrayLength):
+ (JSC::JIT::privateCompileGetByIdProto):
+ (JSC::JIT::privateCompileGetByIdSelfList):
+ (JSC::JIT::privateCompileGetByIdProtoList):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::stringGetByValStubGenerator):
+ (JSC::JIT::privateCompilePutByIdTransition):
+ (JSC::JIT::privateCompilePatchGetArrayLength):
+ (JSC::JIT::privateCompileGetByIdProto):
+ (JSC::JIT::privateCompileGetByIdSelfList):
+ (JSC::JIT::privateCompileGetByIdProtoList):
+ (JSC::JIT::privateCompileGetByIdChainList):
+ (JSC::JIT::privateCompileGetByIdChain):
+ * jit/JITStubs.cpp:
+ (JSC::JITThunks::tryCachePutByID):
+ (JSC::JITThunks::tryCacheGetByID):
+ (JSC::DEFINE_STUB_FUNCTION):
+ (JSC::setupPolymorphicProtoList):
+ * jit/JITStubs.h:
+ * jit/SpecializedThunkJIT.h:
+ (JSC::SpecializedThunkJIT::finalize):
+ * runtime/ExceptionHelpers.cpp:
+ (JSC::createOutOfMemoryError):
+ * runtime/ExceptionHelpers.h:
+ * runtime/Executable.cpp:
+ (JSC::EvalExecutable::compileInternal):
+ (JSC::ProgramExecutable::compileInternal):
+ (JSC::FunctionExecutable::compileForCallInternal):
+ (JSC::FunctionExecutable::compileForConstructInternal):
+ (JSC::FunctionExecutable::reparseExceptionInfo):
+ (JSC::EvalExecutable::reparseExceptionInfo):
+ * yarr/RegexJIT.cpp:
+ (JSC::Yarr::RegexGenerator::compile):
+
+2010-08-03 Geoffrey Garen <ggaren@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ Fixed a crash seen on the GTK 64bit buildbot.
+
+ When JSArray is allocated for the vptr stealing hack, it's not allocated
+ in the heap, so the JSArray constructor can't safely call Heap::heap().
+
+ Since this was subtle enough to confuse smart people, I've changed JSArray
+ to have an explicit vptr stealing constructor.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * runtime/JSArray.cpp:
+ (JSC::JSArray::JSArray):
+ * runtime/JSArray.h:
+ (JSC::JSArray::):
+ * runtime/JSGlobalData.cpp:
+ (JSC::JSGlobalData::storeVPtrs):
+
+2010-08-03 Alex Milowski <alex@milowski.com>
+
+ Reviewed by Beth Dakin.
+
+ Changed the ENABLE_MATHML value to enable MathML by default.
+
+ * Configurations/FeatureDefines.xcconfig:
+
+2010-08-03 Michael Saboff <msaboff@apple.com>
+
+ Reviewed by Gavin Barraclough.
+
+ Change to keep returned pointer from malloc family functions to
+ quiet memory leak detect. The pointer is saved in the new m_allocBase
+ member of the ArrayStorage structure. This fixes the issue found in
+ https://bugs.webkit.org/show_bug.cgi?id=43229.
+
+ As part of this change, we use m_allocBase when reallocating and
+ freeing the memory associated with ArrayStorage.
+
+ * runtime/JSArray.cpp:
+ (JSC::JSArray::JSArray):
+ (JSC::JSArray::~JSArray):
+ (JSC::JSArray::putSlowCase):
+ (JSC::JSArray::increaseVectorLength):
+ (JSC::JSArray::increaseVectorPrefixLength):
+ * runtime/JSArray.h:
+
+2010-08-03 Geoffrey Garen <ggaren@apple.com>
+
+ Reviewed by Mark Rowe.
+
+ https://bugs.webkit.org/show_bug.cgi?id=43444
+ PLATFORM(CF) is false on Windows in JavaScriptCore
+
+ Moved some PLATFORM(WIN) #defines down into JavaScriptCore.
+
+ * wtf/Platform.h: Added WTF_PLATFORM_CF 1 and WTF_USE_PTHREADS 0, inherited
+ from WebCore/config.h. Removed WTF_USE_WININET 1 since WebCore/config.h
+ just #undefined that later.
+
+2010-08-03 Geoffrey Garen <ggaren@apple.com>
+
+ Try to fix Windows build: Don't use GCActivityCallbackCF on Windows, since
+ PLATFORM(CF) is not defined on Windows.
+
+ We'll need to enable the GC activity callback some other way, but this
+ change should get the build back to normal.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+
+ * runtime/GCActivityCallbackCF.cpp: Make it easier to detect this error
+ in the future with an explicit error message.
+
+2010-08-03 Geoffrey Garen <ggaren@apple.com>
+
+ Try to fix Windows build: update .def file.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2010-08-03 Nathan Lawrence <nlawrence@apple.com>
+
+ Reviewed by Oliver Hunt.
+
+ https://bugs.webkit.org/show_bug.cgi?id=41318
+ GC should reclaim garbage even when new objects are not being allocated rapidly
+
+ Added a callback in JavaScriptCore that gets triggered after an
+ allocation causes the heap to reset. This is useful for adding a
+ timer that will trigger garbage collection after the "last" allocation.
+
+ Also needed was to add lock and unlock methods to JSLock that needed
+ only a JSGlobalData object versus an ExecState object.
+
+ * CMakeLists.txt:
+ * GNUmakefile.am:
+ * JavaScriptCore.exp:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.pro:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::emit_op_put_by_val):
+ * runtime/Collector.cpp:
+ (JSC::Heap::Heap):
+ (JSC::Heap::reset):
+ (JSC::Heap::setActivityCallback):
+ * runtime/Collector.h:
+ * runtime/GCActivityCallback.cpp: Added.
+ (JSC::DefaultGCActivityCallback::DefaultGCActivityCallback):
+ (JSC::DefaultGCActivityCallback::~DefaultGCActivityCallback):
+ (JSC::DefaultGCActivityCallback::operator()):
+ * runtime/GCActivityCallback.h: Added.
+ (JSC::GCActivityCallback::~GCActivityCallback):
+ (JSC::GCActivityCallback::operator()):
+ (JSC::GCActivityCallback::GCActivityCallback):
+ (JSC::DefaultGCActivityCallback::create):
+ * runtime/GCActivityCallbackCF.cpp: Added.
+ (JSC::DefaultGCActivityCallbackPlatformData::trigger):
+ (JSC::DefaultGCActivityCallback::DefaultGCActivityCallback):
+ (JSC::DefaultGCActivityCallback::~DefaultGCActivityCallback):
+ (JSC::DefaultGCActivityCallback::operator()):
+ * runtime/JSLock.cpp:
+ (JSC::JSLock::JSLock):
+ * runtime/JSLock.h:
+
+2010-08-02 Kevin Ollivier <kevino@theolliviers.com>
+
+ [wx] Build fix after removal of need to compile ExecutableAllocatorPosix.cpp
+
+ * wscript:
+
2010-08-02 Mahesh Kulkarni <mahesh.kulkarni@nokia.com>
Reviewed by Simon Hausmann.
diff --git a/JavaScriptCore/Configurations/FeatureDefines.xcconfig b/JavaScriptCore/Configurations/FeatureDefines.xcconfig
index 9810cf7..f2b4c09 100644
--- a/JavaScriptCore/Configurations/FeatureDefines.xcconfig
+++ b/JavaScriptCore/Configurations/FeatureDefines.xcconfig
@@ -46,8 +46,8 @@ ENABLE_3D_RENDERING_macosx_1050 = ENABLE_3D_RENDERING;
ENABLE_3D_RENDERING_macosx_1060 = ENABLE_3D_RENDERING;
ENABLE_3D_RENDERING_macosx_1070 = ENABLE_3D_RENDERING;
-ENABLE_BLOB_SLICE = $(ENABLE_BLOB_SLICE_$(REAL_PLATFORM_NAME));
-ENABLE_BLOB_SLICE_macosx = ENABLE_BLOB_SLICE;
+ENABLE_BLOB = $(ENABLE_BLOB_$(REAL_PLATFORM_NAME));
+ENABLE_BLOB_macosx = ENABLE_BLOB;
ENABLE_CHANNEL_MESSAGING = $(ENABLE_CHANNEL_MESSAGING_$(REAL_PLATFORM_NAME));
ENABLE_CHANNEL_MESSAGING_macosx = ENABLE_CHANNEL_MESSAGING;
@@ -69,9 +69,6 @@ ENABLE_EVENTSOURCE = ENABLE_EVENTSOURCE;
ENABLE_FILTERS = $(ENABLE_FILTERS_$(REAL_PLATFORM_NAME));
ENABLE_FILTERS_macosx = ENABLE_FILTERS;
-ENABLE_FILE_READER = $(ENABLE_FILE_READER_$(REAL_PLATFORM_NAME));
-ENABLE_FILE_READER_macosx = ENABLE_FILE_READER;
-
ENABLE_FILE_WRITER = ;
ENABLE_FILE_SYSTEM = ;
ENABLE_GEOLOCATION = ENABLE_GEOLOCATION;
@@ -83,7 +80,7 @@ ENABLE_IMAGE_RESIZER = ;
ENABLE_INDEXED_DATABASE = ;
ENABLE_INPUT_SPEECH = ;
ENABLE_JAVASCRIPT_DEBUGGER = ENABLE_JAVASCRIPT_DEBUGGER;
-ENABLE_MATHML = ;
+ENABLE_MATHML = ENABLE_MATHML;
ENABLE_METER_TAG = ENABLE_METER_TAG;
ENABLE_NOTIFICATIONS = ;
ENABLE_OFFLINE_WEB_APPLICATIONS = ENABLE_OFFLINE_WEB_APPLICATIONS;
@@ -123,4 +120,4 @@ ENABLE_XHTMLMP = ;
ENABLE_XPATH = ENABLE_XPATH;
ENABLE_XSLT = ENABLE_XSLT;
-FEATURE_DEFINES = $(ENABLE_LINK_PREFETCH) $(ENABLE_3D_CANVAS) $(ENABLE_3D_RENDERING) $(ENABLE_BLOB_SLICE) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CLIENT_BASED_GEOLOCATION) $(ENABLE_DATABASE) $(ENABLE_DATAGRID) $(ENABLE_DATALIST) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_DOM_STORAGE) $(ENABLE_EVENTSOURCE) $(ENABLE_FILTERS) $(ENABLE_FILE_READER) $(ENABLE_FILE_WRITER) $(ENABLE_FILE_SYSTEM) $(ENABLE_GEOLOCATION) $(ENABLE_ICONDATABASE) $(ENABLE_IMAGE_RESIZER) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_SPEECH) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_MATHML) $(ENABLE_METER_TAG) $(ENABLE_NOTIFICATIONS) $(ENABLE_OFFLINE_WEB_APPLICATIONS) $(ENABLE_PROGRESS_TAG) $(ENABLE_RUBY) $(ENABLE_SANDBOX) $(ENABLE_SHARED_WORKERS) $(ENABLE_SVG) $(ENABLE_SVG_ANIMATION) $(ENABLE_SVG_AS_IMAGE) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_SVG_FOREIGN_OBJECT) $(ENABLE_SVG_USE) $(ENABLE_VIDEO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WML) $(ENABLE_WORKERS) $(ENABLE_XHTMLMP) $(ENABLE_XPATH) $(ENABLE_XSLT);
+FEATURE_DEFINES = $(ENABLE_LINK_PREFETCH) $(ENABLE_3D_CANVAS) $(ENABLE_3D_RENDERING) $(ENABLE_BLOB) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CLIENT_BASED_GEOLOCATION) $(ENABLE_DATABASE) $(ENABLE_DATAGRID) $(ENABLE_DATALIST) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_DOM_STORAGE) $(ENABLE_EVENTSOURCE) $(ENABLE_FILTERS) $(ENABLE_FILE_WRITER) $(ENABLE_FILE_SYSTEM) $(ENABLE_GEOLOCATION) $(ENABLE_ICONDATABASE) $(ENABLE_IMAGE_RESIZER) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_SPEECH) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_MATHML) $(ENABLE_METER_TAG) $(ENABLE_NOTIFICATIONS) $(ENABLE_OFFLINE_WEB_APPLICATIONS) $(ENABLE_PROGRESS_TAG) $(ENABLE_RUBY) $(ENABLE_SANDBOX) $(ENABLE_SHARED_WORKERS) $(ENABLE_SVG) $(ENABLE_SVG_ANIMATION) $(ENABLE_SVG_AS_IMAGE) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_SVG_FOREIGN_OBJECT) $(ENABLE_SVG_USE) $(ENABLE_VIDEO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WML) $(ENABLE_WORKERS) $(ENABLE_XHTMLMP) $(ENABLE_XPATH) $(ENABLE_XSLT);
diff --git a/JavaScriptCore/Configurations/Version.xcconfig b/JavaScriptCore/Configurations/Version.xcconfig
index 2893a3c..811d9c4 100644
--- a/JavaScriptCore/Configurations/Version.xcconfig
+++ b/JavaScriptCore/Configurations/Version.xcconfig
@@ -22,7 +22,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR_VERSION = 534;
-MINOR_VERSION = 5;
+MINOR_VERSION = 6;
TINY_VERSION = 0;
FULL_VERSION = $(MAJOR_VERSION).$(MINOR_VERSION);
diff --git a/JavaScriptCore/DerivedSources.pro b/JavaScriptCore/DerivedSources.pro
index f358c8b..4d724be 100644
--- a/JavaScriptCore/DerivedSources.pro
+++ b/JavaScriptCore/DerivedSources.pro
@@ -26,7 +26,7 @@ LUT_FILES += \
KEYWORDLUT_FILES += \
parser/Keywords.table
-RVCT_STUB_FILES += \
+JIT_STUB_FILES += \
jit/JITStubs.cpp
defineTest(addExtraCompiler) {
@@ -65,15 +65,24 @@ keywordlut.commands = perl $$keywordlut.wkScript ${QMAKE_FILE_NAME} -i > ${QMAKE
keywordlut.depends = ${QMAKE_FILE_NAME}
addExtraCompiler(keywordlut)
-# GENERATOR 3: JIT Stub functions for RVCT
+# GENERATOR 2-A: JIT Stub functions for RVCT
rvctstubs.output = $${JSC_GENERATED_SOURCES_DIR}$${QMAKE_DIR_SEP}Generated${QMAKE_FILE_BASE}_RVCT.h
rvctstubs.wkScript = $$PWD/create_jit_stubs
rvctstubs.commands = perl -i $$rvctstubs.wkScript --prefix RVCT ${QMAKE_FILE_NAME} > ${QMAKE_FILE_OUT}
rvctstubs.depends = ${QMAKE_FILE_NAME}
-rvctstubs.input = RVCT_STUB_FILES
+rvctstubs.input = JIT_STUB_FILES
rvctstubs.CONFIG += no_link
addExtraCompiler(rvctstubs)
+# GENERATOR 2-B: JIT Stub functions for MSVC
+msvcstubs.output = $${JSC_GENERATED_SOURCES_DIR}$${QMAKE_DIR_SEP}Generated${QMAKE_FILE_BASE}_MSVC.asm
+msvcstubs.wkScript = $$PWD/create_jit_stubs
+msvcstubs.commands = perl -i $$msvcstubs.wkScript --prefix MSVC ${QMAKE_FILE_NAME} > ${QMAKE_FILE_OUT}
+msvcstubs.depends = ${QMAKE_FILE_NAME}
+msvcstubs.input = JIT_STUB_FILES
+msvcstubs.CONFIG += no_link
+addExtraCompiler(msvcstubs)
+
# GENERATOR: "chartables.c": compile and execute the chartables generator (and add it to sources)
win32-msvc*|wince*: PREPROCESSOR = "--preprocessor=\"$$QMAKE_CC /E\""
ctgen.output = $$JSC_GENERATED_SOURCES_DIR/chartables.c
diff --git a/JavaScriptCore/GNUmakefile.am b/JavaScriptCore/GNUmakefile.am
index 1dd15e4..2ead8e0 100644
--- a/JavaScriptCore/GNUmakefile.am
+++ b/JavaScriptCore/GNUmakefile.am
@@ -212,6 +212,7 @@ javascriptcore_sources += \
JavaScriptCore/profiler/ProfileNode.h \
JavaScriptCore/profiler/Profiler.cpp \
JavaScriptCore/profiler/Profiler.h \
+ JavaScriptCore/runtime/AlignedMemoryAllocator.h \
JavaScriptCore/runtime/ArgList.cpp \
JavaScriptCore/runtime/ArgList.h \
JavaScriptCore/runtime/Arguments.cpp \
@@ -265,6 +266,10 @@ javascriptcore_sources += \
JavaScriptCore/runtime/FunctionConstructor.h \
JavaScriptCore/runtime/FunctionPrototype.cpp \
JavaScriptCore/runtime/FunctionPrototype.h \
+ JavaScriptCore/runtime/GCActivityCallback.cpp \
+ JavaScriptCore/runtime/GCActivityCallback.h \
+ JavaScriptCore/runtime/GCHandle.cpp \
+ JavaScriptCore/runtime/GCHandle.h \
JavaScriptCore/runtime/GetterSetter.cpp \
JavaScriptCore/runtime/GetterSetter.h \
JavaScriptCore/runtime/GlobalEvalFunction.cpp \
@@ -409,6 +414,7 @@ javascriptcore_sources += \
JavaScriptCore/wtf/Assertions.h \
JavaScriptCore/wtf/Atomics.h \
JavaScriptCore/wtf/AVLTree.h \
+ JavaScriptCore/wtf/Bitmap.h \
JavaScriptCore/wtf/BumpPointerAllocator.h \
JavaScriptCore/wtf/ByteArray.cpp \
JavaScriptCore/wtf/ByteArray.h \
@@ -459,6 +465,7 @@ javascriptcore_sources += \
JavaScriptCore/wtf/OwnPtr.h \
JavaScriptCore/wtf/PageAllocation.cpp \
JavaScriptCore/wtf/PageAllocation.h \
+ JavaScriptCore/wtf/PageReservation.h \
JavaScriptCore/wtf/PassOwnPtr.h \
JavaScriptCore/wtf/PassRefPtr.h \
JavaScriptCore/wtf/Platform.h \
diff --git a/JavaScriptCore/JavaScriptCore.exp b/JavaScriptCore/JavaScriptCore.exp
index b66d8df..241ed5c 100644
--- a/JavaScriptCore/JavaScriptCore.exp
+++ b/JavaScriptCore/JavaScriptCore.exp
@@ -99,14 +99,14 @@ __Z12jsRegExpFreeP8JSRegExp
__Z15jsRegExpCompilePKti24JSRegExpIgnoreCaseOption23JSRegExpMultilineOptionPjPPKc
__Z15jsRegExpExecutePK8JSRegExpPKtiiPii
__ZN14OpaqueJSString6createERKN3JSC7UStringE
-__ZN3JSC10Identifier11addSlowCaseEPNS_12JSGlobalDataEPN7WebCore10StringImplE
-__ZN3JSC10Identifier11addSlowCaseEPNS_9ExecStateEPN7WebCore10StringImplE
+__ZN3JSC10Identifier11addSlowCaseEPNS_12JSGlobalDataEPN3WTF10StringImplE
+__ZN3JSC10Identifier11addSlowCaseEPNS_9ExecStateEPN3WTF10StringImplE
__ZN3JSC10Identifier27checkCurrentIdentifierTableEPNS_12JSGlobalDataE
__ZN3JSC10Identifier27checkCurrentIdentifierTableEPNS_9ExecStateE
__ZN3JSC10Identifier3addEPNS_9ExecStateEPKc
__ZN3JSC10Identifier4fromEPNS_9ExecStateEi
__ZN3JSC10Identifier4fromEPNS_9ExecStateEj
-__ZN3JSC10Identifier5equalEPKN7WebCore10StringImplEPKc
+__ZN3JSC10Identifier5equalEPKN3WTF10StringImplEPKc
__ZN3JSC10JSFunction4infoE
__ZN3JSC10JSFunction4nameEPNS_9ExecStateE
__ZN3JSC10throwErrorEPNS_9ExecStateENS_7JSValueE
@@ -141,6 +141,7 @@ __ZN3JSC12StringObject24getOwnPropertyDescriptorEPNS_9ExecStateERKNS_10Identifie
__ZN3JSC12StringObject3putEPNS_9ExecStateERKNS_10IdentifierENS_7JSValueERNS_15PutPropertySlotE
__ZN3JSC12StringObject4infoE
__ZN3JSC12StringObjectC2EPNS_9ExecStateEN3WTF17NonNullPassRefPtrINS_9StructureEEERKNS_7UStringE
+__ZN3JSC12WeakGCHandle4poolEv
__ZN3JSC12jsNumberCellEPNS_9ExecStateEd
__ZN3JSC12nonInlineNaNEv
__ZN3JSC13SamplingFlags4stopEv
@@ -171,11 +172,12 @@ __ZN3JSC16InternalFunctionC2EPNS_12JSGlobalDataEPNS_14JSGlobalObjectEN3WTF17NonN
__ZN3JSC16JSVariableObject14deletePropertyEPNS_9ExecStateERKNS_10IdentifierE
__ZN3JSC16JSVariableObject14symbolTableGetERKNS_10IdentifierERNS_18PropertyDescriptorE
__ZN3JSC16JSVariableObject19getOwnPropertyNamesEPNS_9ExecStateERNS_17PropertyNameArrayENS_15EnumerationModeE
+__ZN3JSC16WeakGCHandlePool4freeEPNS_12WeakGCHandleE
__ZN3JSC16createRangeErrorEPNS_9ExecStateERKNS_7UStringE
__ZN3JSC16throwSyntaxErrorEPNS_9ExecStateE
__ZN3JSC16toUInt32SlowCaseEdRb
__ZN3JSC17BytecodeGenerator21setDumpsGeneratedCodeEb
-__ZN3JSC17PropertyNameArray3addEPN7WebCore10StringImplE
+__ZN3JSC17PropertyNameArray3addEPN3WTF10StringImplE
__ZN3JSC17constructFunctionEPNS_9ExecStateERKNS_7ArgListERKNS_10IdentifierERKNS_7UStringEi
__ZN3JSC17createSyntaxErrorEPNS_9ExecStateERKNS_7UStringE
__ZN3JSC18DebuggerActivationC1EPNS_8JSObjectE
@@ -201,11 +203,13 @@ __ZN3JSC25evaluateInGlobalCallFrameERKNS_7UStringERNS_7JSValueEPNS_14JSGlobalObj
__ZN3JSC35createInterruptedExecutionExceptionEPNS_12JSGlobalDataE
__ZN3JSC3NaNE
__ZN3JSC4Heap14primaryHeapEndEv
+__ZN3JSC4Heap15addWeakGCHandleEPNS_6JSCellE
__ZN3JSC4Heap15recordExtraCostEm
__ZN3JSC4Heap16objectTypeCountsEv
__ZN3JSC4Heap16primaryHeapBeginEv
__ZN3JSC4Heap17collectAllGarbageEv
__ZN3JSC4Heap17globalObjectCountEv
+__ZN3JSC4Heap19setActivityCallbackEN3WTF10PassOwnPtrINS_18GCActivityCallbackEEE
__ZN3JSC4Heap20protectedObjectCountEv
__ZN3JSC4Heap25protectedObjectTypeCountsEv
__ZN3JSC4Heap26protectedGlobalObjectCountEv
@@ -296,7 +300,7 @@ __ZN3JSC9MarkStack10s_pageSizeE
__ZN3JSC9MarkStack12releaseStackEPvm
__ZN3JSC9MarkStack13allocateStackEm
__ZN3JSC9MarkStack18initializePagesizeEv
-__ZN3JSC9Structure13hasTransitionEPN7WebCore10StringImplEj
+__ZN3JSC9Structure13hasTransitionEPN3WTF10StringImplEj
__ZN3JSC9Structure17stopIgnoringLeaksEv
__ZN3JSC9Structure18startIgnoringLeaksEv
__ZN3JSC9Structure21addPropertyTransitionEPS0_RKNS_10IdentifierEjPNS_6JSCellERm
@@ -305,7 +309,7 @@ __ZN3JSC9Structure25changePrototypeTransitionEPS0_NS_7JSValueE
__ZN3JSC9Structure27despecifyDictionaryFunctionERKNS_10IdentifierE
__ZN3JSC9Structure27despecifyFunctionTransitionEPS0_RKNS_10IdentifierE
__ZN3JSC9Structure28addPropertyWithoutTransitionERKNS_10IdentifierEjPNS_6JSCellE
-__ZN3JSC9Structure3getEPKN7WebCore10StringImplERjRPNS_6JSCellE
+__ZN3JSC9Structure3getEPKN3WTF10StringImplERjRPNS_6JSCellE
__ZN3JSC9Structure40addPropertyTransitionToExistingStructureEPS0_RKNS_10IdentifierEjPNS_6JSCellERm
__ZN3JSC9StructureC1ENS_7JSValueERKNS_8TypeInfoEj
__ZN3JSC9StructureD1Ev
@@ -313,11 +317,49 @@ __ZN3JSC9constructEPNS_9ExecStateENS_7JSValueENS_13ConstructTypeERKNS_13Construc
__ZN3JSCeqERKNS_7UStringEPKc
__ZN3JSCgtERKNS_7UStringES2_
__ZN3JSCltERKNS_7UStringES2_
+__ZN3WTF10StringImpl11reverseFindEPS0_ib
+__ZN3WTF10StringImpl11reverseFindEti
+__ZN3WTF10StringImpl12sharedBufferEv
+__ZN3WTF10StringImpl18simplifyWhiteSpaceEv
+__ZN3WTF10StringImpl19characterStartingAtEj
+__ZN3WTF10StringImpl19createUninitializedEjRPt
+__ZN3WTF10StringImpl22containsOnlyWhitespaceEv
+__ZN3WTF10StringImpl23defaultWritingDirectionEv
+__ZN3WTF10StringImpl37createStrippingNullCharactersSlowCaseEPKtj
+__ZN3WTF10StringImpl4findEPFbtEi
+__ZN3WTF10StringImpl4findEPKcib
+__ZN3WTF10StringImpl4findEPS0_ib
+__ZN3WTF10StringImpl4findEti
+__ZN3WTF10StringImpl5adoptERNS_12StringBufferE
+__ZN3WTF10StringImpl5asciiEv
+__ZN3WTF10StringImpl5emptyEv
+__ZN3WTF10StringImpl5lowerEv
+__ZN3WTF10StringImpl5toIntEPb
+__ZN3WTF10StringImpl5upperEv
+__ZN3WTF10StringImpl6createEPKc
+__ZN3WTF10StringImpl6createEPKcj
+__ZN3WTF10StringImpl6createEPKtj
+__ZN3WTF10StringImpl6secureEt
+__ZN3WTF10StringImpl7replaceEPS0_S1_
+__ZN3WTF10StringImpl7replaceEjjPS0_
+__ZN3WTF10StringImpl7replaceEtPS0_
+__ZN3WTF10StringImpl7replaceEtt
+__ZN3WTF10StringImpl8endsWithEPS0_b
+__ZN3WTF10StringImpl9substringEjj
+__ZN3WTF10StringImplD1Ev
__ZN3WTF10fastCallocEmm
__ZN3WTF10fastMallocEm
__ZN3WTF10fastStrDupEPKc
+__ZN3WTF11commentAtomE
__ZN3WTF11currentTimeEv
__ZN3WTF11fastReallocEPvm
+__ZN3WTF12AtomicString11addSlowCaseEPNS_10StringImplE
+__ZN3WTF12AtomicString3addEPKc
+__ZN3WTF12AtomicString3addEPKt
+__ZN3WTF12AtomicString3addEPKtj
+__ZN3WTF12AtomicString3addEPKtjj
+__ZN3WTF12AtomicString4findEPKtjj
+__ZN3WTF12AtomicString4initEv
__ZN3WTF12createThreadEPFPvS0_ES0_
__ZN3WTF12createThreadEPFPvS0_ES0_PKc
__ZN3WTF12detachThreadEj
@@ -336,11 +378,19 @@ __ZN3WTF15ThreadCondition9broadcastEv
__ZN3WTF15ThreadCondition9timedWaitERNS_5MutexEd
__ZN3WTF15ThreadConditionC1Ev
__ZN3WTF15ThreadConditionD1Ev
+__ZN3WTF15charactersToIntEPKtmPb
__ZN3WTF16callOnMainThreadEPFvPvES0_
+__ZN3WTF16codePointCompareERKNS_6StringES2_
__ZN3WTF16fastZeroedMallocEm
+__ZN3WTF17charactersToFloatEPKtmPb
+__ZN3WTF17equalIgnoringCaseEPKtPKcj
+__ZN3WTF17equalIgnoringCaseEPNS_10StringImplEPKc
+__ZN3WTF17equalIgnoringCaseEPNS_10StringImplES1_
+__ZN3WTF18charactersToDoubleEPKtmPb
__ZN3WTF18dateToDaysFrom1970Eiii
__ZN3WTF18monthFromDayInYearEib
__ZN3WTF19initializeThreadingEv
+__ZN3WTF20equalIgnoringNullityEPNS_10StringImplES1_
__ZN3WTF20fastMallocStatisticsEv
__ZN3WTF20initializeMainThreadEv
__ZN3WTF21RefCountedLeakCounter16suppressMessagesEPKc
@@ -349,6 +399,8 @@ __ZN3WTF21RefCountedLeakCounter9decrementEv
__ZN3WTF21RefCountedLeakCounter9incrementEv
__ZN3WTF21RefCountedLeakCounterC1EPKc
__ZN3WTF21RefCountedLeakCounterD1Ev
+__ZN3WTF21charactersToIntStrictEPKtmPbi
+__ZN3WTF22charactersToUIntStrictEPKtmPbi
__ZN3WTF23callOnMainThreadAndWaitEPFvPvES0_
__ZN3WTF23dayInMonthFromDayInYearEib
__ZN3WTF23waitForThreadCompletionEjPPv
@@ -367,7 +419,30 @@ __ZN3WTF5Mutex6unlockEv
__ZN3WTF5Mutex7tryLockEv
__ZN3WTF5MutexC1Ev
__ZN3WTF5MutexD1Ev
+__ZN3WTF5equalEPKNS_10StringImplEPKc
+__ZN3WTF5equalEPKNS_10StringImplES2_
__ZN3WTF5yieldEv
+__ZN3WTF6String26fromUTF8WithLatin1FallbackEPKcm
+__ZN3WTF6String29charactersWithNullTerminationEv
+__ZN3WTF6String6appendEPKtj
+__ZN3WTF6String6appendERKS0_
+__ZN3WTF6String6appendEc
+__ZN3WTF6String6appendEt
+__ZN3WTF6String6formatEPKcz
+__ZN3WTF6String6insertERKS0_j
+__ZN3WTF6String6numberEd
+__ZN3WTF6String6numberEi
+__ZN3WTF6String6numberEj
+__ZN3WTF6String6numberEl
+__ZN3WTF6String6numberEm
+__ZN3WTF6String6numberEt
+__ZN3WTF6String6numberEx
+__ZN3WTF6String6numberEy
+__ZN3WTF6String6removeEji
+__ZN3WTF6String8fromUTF8EPKc
+__ZN3WTF6String8fromUTF8EPKcm
+__ZN3WTF6String8truncateEj
+__ZN3WTF6StringC1EPKt
__ZN3WTF6strtodEPKcPPc
__ZN3WTF7CString11mutableDataEv
__ZN3WTF7CString16newUninitializedEmRPc
@@ -375,96 +450,24 @@ __ZN3WTF7CStringC1EPKc
__ZN3WTF7CStringC1EPKcj
__ZN3WTF7Unicode18convertUTF16ToUTF8EPPKtS2_PPcS4_b
__ZN3WTF7Unicode18convertUTF8ToUTF16EPPKcS2_PPtS4_b
+__ZN3WTF7xmlAtomE
__ZN3WTF8Collator18setOrderLowerFirstEb
__ZN3WTF8CollatorC1EPKc
__ZN3WTF8CollatorD1Ev
__ZN3WTF8fastFreeEPv
__ZN3WTF8msToYearEd
+__ZN3WTF8nullAtomE
+__ZN3WTF8starAtomE
+__ZN3WTF8textAtomE
__ZN3WTF9ByteArray6createEm
__ZN3WTF9dayInYearEdi
+__ZN3WTF9emptyAtomE
+__ZN3WTF9xmlnsAtomE
+__ZN3WTFeqERKNS_12AtomicStringEPKc
__ZN3WTFeqERKNS_7CStringES2_
-__ZN7WebCore10StringImpl11reverseFindEPS0_ib
-__ZN7WebCore10StringImpl11reverseFindEti
-__ZN7WebCore10StringImpl12sharedBufferEv
-__ZN7WebCore10StringImpl18simplifyWhiteSpaceEv
-__ZN7WebCore10StringImpl19characterStartingAtEj
-__ZN7WebCore10StringImpl19createUninitializedEjRPt
-__ZN7WebCore10StringImpl22containsOnlyWhitespaceEv
-__ZN7WebCore10StringImpl23defaultWritingDirectionEv
-__ZN7WebCore10StringImpl37createStrippingNullCharactersSlowCaseEPKtj
-__ZN7WebCore10StringImpl4findEPFbtEi
-__ZN7WebCore10StringImpl4findEPKcib
-__ZN7WebCore10StringImpl4findEPS0_ib
-__ZN7WebCore10StringImpl4findEti
-__ZN7WebCore10StringImpl5adoptERNS_12StringBufferE
-__ZN7WebCore10StringImpl5asciiEv
-__ZN7WebCore10StringImpl5emptyEv
-__ZN7WebCore10StringImpl5lowerEv
-__ZN7WebCore10StringImpl5toIntEPb
-__ZN7WebCore10StringImpl5upperEv
-__ZN7WebCore10StringImpl6createEPKc
-__ZN7WebCore10StringImpl6createEPKcj
-__ZN7WebCore10StringImpl6createEPKtj
-__ZN7WebCore10StringImpl6createEPKtjN3WTF10PassRefPtrINS3_21CrossThreadRefCountedINS3_16OwnFastMallocPtrIS1_EEEEEE
-__ZN7WebCore10StringImpl6secureEt
-__ZN7WebCore10StringImpl7replaceEPS0_S1_
-__ZN7WebCore10StringImpl7replaceEjjPS0_
-__ZN7WebCore10StringImpl7replaceEtPS0_
-__ZN7WebCore10StringImpl7replaceEtt
-__ZN7WebCore10StringImpl8endsWithEPS0_b
-__ZN7WebCore10StringImpl9substringEjj
-__ZN7WebCore10StringImplD1Ev
-__ZN7WebCore11commentAtomE
-__ZN7WebCore12AtomicString11addSlowCaseEPNS_10StringImplE
-__ZN7WebCore12AtomicString3addEPKc
-__ZN7WebCore12AtomicString3addEPKt
-__ZN7WebCore12AtomicString3addEPKtj
-__ZN7WebCore12AtomicString3addEPKtjj
-__ZN7WebCore12AtomicString4findEPKtjj
-__ZN7WebCore12AtomicString4initEv
-__ZN7WebCore15charactersToIntEPKtmPb
-__ZN7WebCore16codePointCompareERKNS_6StringES2_
-__ZN7WebCore17charactersToFloatEPKtmPb
-__ZN7WebCore17equalIgnoringCaseEPKtPKcj
-__ZN7WebCore17equalIgnoringCaseEPNS_10StringImplEPKc
-__ZN7WebCore17equalIgnoringCaseEPNS_10StringImplES1_
-__ZN7WebCore18charactersToDoubleEPKtmPb
-__ZN7WebCore20equalIgnoringNullityEPNS_10StringImplES1_
-__ZN7WebCore21charactersToIntStrictEPKtmPbi
-__ZN7WebCore22charactersToUIntStrictEPKtmPbi
-__ZN7WebCore5equalEPKNS_10StringImplEPKc
-__ZN7WebCore5equalEPKNS_10StringImplES2_
-__ZN7WebCore6String26fromUTF8WithLatin1FallbackEPKcm
-__ZN7WebCore6String29charactersWithNullTerminationEv
-__ZN7WebCore6String6appendEPKtj
-__ZN7WebCore6String6appendERKS0_
-__ZN7WebCore6String6appendEc
-__ZN7WebCore6String6appendEt
-__ZN7WebCore6String6formatEPKcz
-__ZN7WebCore6String6insertERKS0_j
-__ZN7WebCore6String6numberEd
-__ZN7WebCore6String6numberEi
-__ZN7WebCore6String6numberEj
-__ZN7WebCore6String6numberEl
-__ZN7WebCore6String6numberEm
-__ZN7WebCore6String6numberEt
-__ZN7WebCore6String6numberEx
-__ZN7WebCore6String6numberEy
-__ZN7WebCore6String6removeEji
-__ZN7WebCore6String8fromUTF8EPKc
-__ZN7WebCore6String8fromUTF8EPKcm
-__ZN7WebCore6String8truncateEj
-__ZN7WebCore6StringC1EPKt
-__ZN7WebCore7xmlAtomE
-__ZN7WebCore8nullAtomE
-__ZN7WebCore8starAtomE
-__ZN7WebCore8textAtomE
-__ZN7WebCore9emptyAtomE
-__ZN7WebCore9xmlnsAtomE
-__ZN7WebCoreeqERKNS_12AtomicStringEPKc
-__ZN7WebCoreplEPKcRKNS_6StringE
-__ZN7WebCoreplERKNS_6StringEPKc
-__ZN7WebCoreplERKNS_6StringES2_
+__ZN3WTFplEPKcRKNS_6StringE
+__ZN3WTFplERKNS_6StringEPKc
+__ZN3WTFplERKNS_6StringES2_
__ZNK3JSC10JSFunction23isHostFunctionNonInlineEv
__ZNK3JSC11Interpreter14retrieveCallerEPNS_9ExecStateEPNS_10JSFunctionE
__ZNK3JSC11Interpreter18retrieveLastCallerEPNS_9ExecStateERiRlRNS_7UStringERNS_7JSValueE
@@ -524,33 +527,32 @@ __ZNK3JSC8JSObject9toBooleanEPNS_9ExecStateE
__ZNK3JSC8JSString11resolveRopeEPNS_9ExecStateE
__ZNK3JSC9HashTable11createTableEPNS_12JSGlobalDataE
__ZNK3JSC9HashTable11deleteTableEv
+__ZNK3WTF12AtomicString5lowerEv
+__ZNK3WTF6String11toIntStrictEPbi
+__ZNK3WTF6String12toUIntStrictEPbi
+__ZNK3WTF6String14threadsafeCopyEv
+__ZNK3WTF6String15stripWhiteSpaceEv
+__ZNK3WTF6String16removeCharactersEPFbtE
+__ZNK3WTF6String17crossThreadStringEv
+__ZNK3WTF6String18simplifyWhiteSpaceEv
+__ZNK3WTF6String19characterStartingAtEj
+__ZNK3WTF6String4utf8Ev
+__ZNK3WTF6String5asciiEv
+__ZNK3WTF6String5lowerEv
+__ZNK3WTF6String5splitERKS0_bRNS_6VectorIS0_Lm0EEE
+__ZNK3WTF6String5splitEtRNS_6VectorIS0_Lm0EEE
+__ZNK3WTF6String5splitEtbRNS_6VectorIS0_Lm0EEE
+__ZNK3WTF6String5toIntEPb
+__ZNK3WTF6String5upperEv
+__ZNK3WTF6String6latin1Ev
+__ZNK3WTF6String6toUIntEPb
+__ZNK3WTF6String7toFloatEPb
+__ZNK3WTF6String8foldCaseEv
+__ZNK3WTF6String8toDoubleEPb
+__ZNK3WTF6String8toIntPtrEPb
+__ZNK3WTF6String8toUInt64EPb
+__ZNK3WTF6String9substringEjj
__ZNK3WTF8Collator7collateEPKtmS2_m
-__ZNK7WebCore12AtomicString5lowerEv
-__ZNK7WebCore6String11toIntStrictEPbi
-__ZNK7WebCore6String12toUIntStrictEPbi
-__ZNK7WebCore6String14threadsafeCopyEv
-__ZNK7WebCore6String15stripWhiteSpaceEv
-__ZNK7WebCore6String16removeCharactersEPFbtE
-__ZNK7WebCore6String17crossThreadStringEv
-__ZNK7WebCore6String18simplifyWhiteSpaceEv
-__ZNK7WebCore6String19characterStartingAtEj
-__ZNK7WebCore6String4utf8Ev
-__ZNK7WebCore6String5asciiEv
-__ZNK7WebCore6String5lowerEv
-__ZNK7WebCore6String5splitERKS0_RN3WTF6VectorIS0_Lm0EEE
-__ZNK7WebCore6String5splitERKS0_bRN3WTF6VectorIS0_Lm0EEE
-__ZNK7WebCore6String5splitEtRN3WTF6VectorIS0_Lm0EEE
-__ZNK7WebCore6String5splitEtbRN3WTF6VectorIS0_Lm0EEE
-__ZNK7WebCore6String5toIntEPb
-__ZNK7WebCore6String5upperEv
-__ZNK7WebCore6String6latin1Ev
-__ZNK7WebCore6String6toUIntEPb
-__ZNK7WebCore6String7toFloatEPb
-__ZNK7WebCore6String8foldCaseEv
-__ZNK7WebCore6String8toDoubleEPb
-__ZNK7WebCore6String8toIntPtrEPb
-__ZNK7WebCore6String8toUInt64EPb
-__ZNK7WebCore6String9substringEjj
__ZTVN3JSC12StringObjectE
__ZTVN3JSC14JSGlobalObjectE
__ZTVN3JSC15JSWrapperObjectE
diff --git a/JavaScriptCore/JavaScriptCore.gypi b/JavaScriptCore/JavaScriptCore.gypi
index 9281576..a85d11c 100644
--- a/JavaScriptCore/JavaScriptCore.gypi
+++ b/JavaScriptCore/JavaScriptCore.gypi
@@ -164,6 +164,7 @@
'profiler/Profiler.cpp',
'profiler/Profiler.h',
'profiler/ProfilerServer.h',
+ 'runtime/AlignedMemoryAllocator.h',
'runtime/ArgList.cpp',
'runtime/ArgList.h',
'runtime/Arguments.cpp',
@@ -214,6 +215,10 @@
'runtime/FunctionConstructor.h',
'runtime/FunctionPrototype.cpp',
'runtime/FunctionPrototype.h',
+ 'runtime/GCActivityCallback.cpp',
+ 'runtime/GCActivityCallback.h',
+ 'runtime/GCHandle.cpp',
+ 'runtime/GCHandle.h',
'runtime/GetterSetter.cpp',
'runtime/GetterSetter.h',
'runtime/GlobalEvalFunction.cpp',
@@ -348,6 +353,7 @@
'wtf/Assertions.h',
'wtf/Atomics.h',
'wtf/AVLTree.h',
+ 'wtf/Bitmap.h',
'wtf/ByteArray.cpp',
'wtf/ByteArray.h',
'wtf/chromium/ChromiumThreading.h',
@@ -396,6 +402,7 @@
'wtf/OwnPtrCommon.h',
'wtf/PageAllocation.cpp',
'wtf/PageAllocation.h',
+ 'wtf/PageReservation',
'wtf/PassOwnPtr.h',
'wtf/PassRefPtr.h',
'wtf/Platform.h',
diff --git a/JavaScriptCore/JavaScriptCore.pro b/JavaScriptCore/JavaScriptCore.pro
index 2489580..d6c4420 100644
--- a/JavaScriptCore/JavaScriptCore.pro
+++ b/JavaScriptCore/JavaScriptCore.pro
@@ -139,6 +139,8 @@ SOURCES += \
runtime/Executable.cpp \
runtime/FunctionConstructor.cpp \
runtime/FunctionPrototype.cpp \
+ runtime/GCActivityCallback.cpp \
+ runtime/GCHandle.cpp \
runtime/GetterSetter.cpp \
runtime/GlobalEvalFunction.cpp \
runtime/Identifier.cpp \
@@ -215,7 +217,6 @@ SOURCES += \
wtf/PageAllocation.cpp \
wtf/RandomNumber.cpp \
wtf/RefCountedLeakCounter.cpp \
- wtf/symbian/BlockAllocatorSymbian.cpp \
wtf/ThreadingNone.cpp \
wtf/Threading.cpp \
wtf/TypeTraits.cpp \
diff --git a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
index a478725..90fd3ad 100644
--- a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
+++ b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
@@ -5,6 +5,7 @@ EXPORTS
??0Collator@WTF@@QAE@PBD@Z
??0DateInstance@JSC@@QAE@PAVExecState@1@N@Z
??0DateInstance@JSC@@QAE@PAVExecState@1@V?$NonNullPassRefPtr@VStructure@JSC@@@WTF@@N@Z
+ ??0DefaultGCActivityCallback@JSC@@QAE@PAVHeap@1@@Z
??0DropAllLocks@JSLock@JSC@@QAE@W4JSLockBehavior@2@@Z
??0InternalFunction@JSC@@IAE@PAVJSGlobalData@1@PAVJSGlobalObject@1@V?$NonNullPassRefPtr@VStructure@JSC@@@WTF@@ABVIdentifier@1@@Z
??0JSArray@JSC@@QAE@V?$NonNullPassRefPtr@VStructure@JSC@@@WTF@@@Z
@@ -38,13 +39,14 @@ EXPORTS
??8WTF@@YA_NABVCString@0@0@Z
?NaN@JSC@@3NB
?UTF8String@UString@JSC@@QBE?AVCString@WTF@@_N@Z
- ?add@Identifier@JSC@@SA?AV?$PassRefPtr@VStringImpl@WebCore@@@WTF@@PAVExecState@2@PBD@Z
- ?add@PropertyNameArray@JSC@@QAEXPAVStringImpl@WebCore@@@Z
+ ?add@Identifier@JSC@@SA?AV?$PassRefPtr@VStringImpl@WTF@@@WTF@@PAVExecState@2@PBD@Z
+ ?add@PropertyNameArray@JSC@@QAEXPAVStringImpl@WTF@@@Z
?addBytes@MD5@WTF@@QAEXPBEI@Z
?addPropertyTransition@Structure@JSC@@SA?AV?$PassRefPtr@VStructure@JSC@@@WTF@@PAV12@ABVIdentifier@2@IPAVJSCell@2@AAI@Z
?addPropertyTransitionToExistingStructure@Structure@JSC@@SA?AV?$PassRefPtr@VStructure@JSC@@@WTF@@PAV12@ABVIdentifier@2@IPAVJSCell@2@AAI@Z
?addPropertyWithoutTransition@Structure@JSC@@QAEIABVIdentifier@2@IPAVJSCell@2@@Z
- ?addSlowCase@Identifier@JSC@@CA?AV?$PassRefPtr@VStringImpl@WebCore@@@WTF@@PAVExecState@2@PAVStringImpl@WebCore@@@Z
+ ?addSlowCase@Identifier@JSC@@CA?AV?$PassRefPtr@VStringImpl@WTF@@@WTF@@PAVExecState@2@PAVStringImpl@4@@Z
+ ?addWeakGCHandle@Heap@JSC@@QAEPAVWeakGCHandle@2@PAVJSCell@2@@Z
?allocate@Heap@JSC@@QAEPAXI@Z
?allocatePropertyStorage@JSObject@JSC@@QAEXII@Z
?allocateStack@MarkStack@JSC@@CAPAXI@Z
@@ -126,9 +128,9 @@ EXPORTS
?didTimeOut@TimeoutChecker@JSC@@QAE_NPAVExecState@2@@Z
?doubleToStringInJavaScriptFormat@WTF@@YAXNQADPAI@Z
?dumpSampleData@JSGlobalData@JSC@@QAEXPAVExecState@2@@Z
- ?empty@StringImpl@WebCore@@SAPAV12@XZ
+ ?empty@StringImpl@WTF@@SAPAV12@XZ
?enumerable@PropertyDescriptor@JSC@@QBE_NXZ
- ?equal@Identifier@JSC@@SA_NPBVStringImpl@WebCore@@PBD@Z
+ ?equal@Identifier@JSC@@SA_NPBVStringImpl@WTF@@PBD@Z
?evaluate@DebuggerCallFrame@JSC@@QBE?AVJSValue@2@ABVUString@2@AAV32@@Z
?evaluate@JSC@@YA?AVCompletion@1@PAVExecState@1@AAVScopeChain@1@ABVSourceCode@1@VJSValue@1@@Z
?exclude@Profile@JSC@@QAEXPBVProfileNode@2@@Z
@@ -141,6 +143,7 @@ EXPORTS
?fastZeroedMalloc@WTF@@YAPAXI@Z
?fillGetterPropertySlot@JSObject@JSC@@QAEXAAVPropertySlot@2@PAVJSValue@2@@Z
?focus@Profile@JSC@@QAEXPBVProfileNode@2@@Z
+ ?free@WeakGCHandlePool@JSC@@QAEXPAVWeakGCHandle@2@@Z
?from@Identifier@JSC@@SA?AV12@PAVExecState@2@H@Z
?from@Identifier@JSC@@SA?AV12@PAVExecState@2@I@Z
?from@UString@JSC@@SA?AV12@H@Z
@@ -148,7 +151,7 @@ EXPORTS
?from@UString@JSC@@SA?AV12@N@Z
?functionGetter@PropertySlot@JSC@@ABE?AVJSValue@2@PAVExecState@2@@Z
?functionName@DebuggerCallFrame@JSC@@QBEPBVUString@2@XZ
- ?get@Structure@JSC@@QAEIPBVStringImpl@WebCore@@AAIAAPAVJSCell@2@@Z
+ ?get@Structure@JSC@@QAEIPBVStringImpl@WTF@@AAIAAPAVJSCell@2@@Z
?getCallData@JSCell@JSC@@UAE?AW4CallType@2@AATCallData@2@@Z
?getConstructData@JSCell@JSC@@UAE?AW4ConstructType@2@AATConstructData@2@@Z
?getJSNumber@JSCell@JSC@@UAE?AVJSValue@2@XZ
@@ -182,10 +185,10 @@ EXPORTS
?hasInstance@JSObject@JSC@@UAE_NPAVExecState@2@VJSValue@2@1@Z
?hasProperty@JSObject@JSC@@QBE_NPAVExecState@2@ABVIdentifier@2@@Z
?hasProperty@JSObject@JSC@@QBE_NPAVExecState@2@I@Z
- ?hasTransition@Structure@JSC@@QAE_NPAVStringImpl@WebCore@@I@Z
+ ?hasTransition@Structure@JSC@@QAE_NPAVStringImpl@WTF@@I@Z
?heap@Heap@JSC@@SAPAV12@VJSValue@2@@Z
?increment@RefCountedLeakCounter@WTF@@QAEXXZ
- ?init@AtomicString@WebCore@@SAXXZ
+ ?init@AtomicString@WTF@@SAXXZ
?init@JSGlobalObject@JSC@@AAEXPAVJSObject@2@@Z
?initializeMainThread@WTF@@YAXXZ
?initializeThreading@JSC@@YAXXZ
@@ -226,6 +229,7 @@ EXPORTS
?objectProtoFuncToString@JSC@@YI_JPAVExecState@1@@Z
?parse@Parser@JSC@@AAEXPAVJSGlobalData@2@PAHPAVUString@2@@Z
?parseDateFromNullTerminatedCharacters@WTF@@YANPBD@Z
+ ?pool@WeakGCHandle@JSC@@QAEPAVWeakGCHandlePool@2@XZ
?profiler@Profiler@JSC@@SAPAV12@XZ
?protect@Heap@JSC@@QAEXVJSValue@2@@Z
?protectedGlobalObjectCount@Heap@JSC@@QAEIXZ
diff --git a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
index 7819f99..83d34e3 100644
--- a/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
+++ b/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
@@ -709,6 +709,22 @@
>
</File>
<File
+ RelativePath="..\..\runtime\GCActivityCallback.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\runtime\GCActivityCallback.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\runtime\GCHandle.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\runtime\GCHandle.h"
+ >
+ </File>
+ <File
RelativePath="..\..\runtime\GetterSetter.cpp"
>
</File>
diff --git a/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj b/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
index 7f18f7e..9d551d0 100644
--- a/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
+++ b/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
@@ -437,6 +437,10 @@
>
</File>
<File
+ RelativePath="..\..\wtf\PageReservation.h"
+ >
+ </File>
+ <File
RelativePath="..\..\wtf\PassOwnPtr.h"
>
</File>
diff --git a/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
index 9e86120..de3afaa 100644
--- a/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
+++ b/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
@@ -214,6 +214,7 @@
868BFA17117CF19900B908B1 /* WTFString.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 868BFA15117CF19900B908B1 /* WTFString.cpp */; };
868BFA18117CF19900B908B1 /* WTFString.h in Headers */ = {isa = PBXBuildFile; fileRef = 868BFA16117CF19900B908B1 /* WTFString.h */; settings = {ATTRIBUTES = (Private, ); }; };
868BFA60117D048200B908B1 /* StaticConstructors.h in Headers */ = {isa = PBXBuildFile; fileRef = 868BFA5F117D048200B908B1 /* StaticConstructors.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 8690231512092D5C00630AF9 /* PageReservation.h in Headers */ = {isa = PBXBuildFile; fileRef = 8690231412092D5C00630AF9 /* PageReservation.h */; settings = {ATTRIBUTES = (Private, ); }; };
8698B86910D44D9400D8D01B /* StringBuilder.h in Headers */ = {isa = PBXBuildFile; fileRef = 8698B86810D44D9400D8D01B /* StringBuilder.h */; settings = {ATTRIBUTES = (Private, ); }; };
8698BB3910D86BAF00D8D01B /* UStringImpl.h in Headers */ = {isa = PBXBuildFile; fileRef = 8698BB3710D86BAF00D8D01B /* UStringImpl.h */; settings = {ATTRIBUTES = (Private, ); }; };
869D04AF1193B54D00803475 /* CachedTranscendentalFunction.h in Headers */ = {isa = PBXBuildFile; fileRef = 869D04AE1193B54D00803475 /* CachedTranscendentalFunction.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -310,6 +311,7 @@
A7482B9411671147003B0712 /* JSWeakObjectMapRefPrivate.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A7482B7A1166CDEA003B0712 /* JSWeakObjectMapRefPrivate.cpp */; };
A7482E93116A7CAD003B0712 /* JSWeakObjectMapRefInternal.h in Headers */ = {isa = PBXBuildFile; fileRef = A7482E37116A697B003B0712 /* JSWeakObjectMapRefInternal.h */; settings = {ATTRIBUTES = (Private, ); }; };
A74B3499102A5F8E0032AB98 /* MarkStack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A74B3498102A5F8E0032AB98 /* MarkStack.cpp */; };
+ A74DE1D0120B875600D40D5B /* ARMv7Assembler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A74DE1CB120B86D600D40D5B /* ARMv7Assembler.cpp */; };
A75706DE118A2BCF0057F88F /* JITArithmetic32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = A75706DD118A2BCF0057F88F /* JITArithmetic32_64.cpp */; };
A766B44F0EE8DCD1009518CA /* ExecutableAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = A7B48DB50EE74CFC00DCBDB6 /* ExecutableAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
A76C51761182748D00715B05 /* JSInterfaceJIT.h in Headers */ = {isa = PBXBuildFile; fileRef = A76C51741182748D00715B05 /* JSInterfaceJIT.h */; };
@@ -508,6 +510,17 @@
BCFD8C920EEB2EE700283848 /* JumpTable.cpp in Sources */ = {isa = PBXBuildFile; fileRef = BCFD8C900EEB2EE700283848 /* JumpTable.cpp */; };
BCFD8C930EEB2EE700283848 /* JumpTable.h in Headers */ = {isa = PBXBuildFile; fileRef = BCFD8C910EEB2EE700283848 /* JumpTable.h */; };
C0A272630E50A06300E96E15 /* NotFound.h in Headers */ = {isa = PBXBuildFile; fileRef = C0A2723F0E509F1E00E96E15 /* NotFound.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ DD2724681208D1FF00F9ABE7 /* AlignedMemoryAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = DD2724671208D1FF00F9ABE7 /* AlignedMemoryAllocator.h */; };
+ DD2724691208D1FF00F9ABE7 /* AlignedMemoryAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = DD2724671208D1FF00F9ABE7 /* AlignedMemoryAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ DD377CBC12072C18006A2517 /* Bitmap.h in Headers */ = {isa = PBXBuildFile; fileRef = DD377CBB12072C18006A2517 /* Bitmap.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ DDE82AD31209D955005C1756 /* GCHandle.cpp in Sources */ = {isa = PBXBuildFile; fileRef = DDE82AD11209D955005C1756 /* GCHandle.cpp */; };
+ DDE82AD41209D955005C1756 /* GCHandle.cpp in Sources */ = {isa = PBXBuildFile; fileRef = DDE82AD11209D955005C1756 /* GCHandle.cpp */; };
+ DDE82AD51209D955005C1756 /* GCHandle.cpp in Sources */ = {isa = PBXBuildFile; fileRef = DDE82AD11209D955005C1756 /* GCHandle.cpp */; };
+ DDE82AD61209D955005C1756 /* GCHandle.h in Headers */ = {isa = PBXBuildFile; fileRef = DDE82AD21209D955005C1756 /* GCHandle.h */; };
+ DDE82AD71209D955005C1756 /* GCHandle.cpp in Sources */ = {isa = PBXBuildFile; fileRef = DDE82AD11209D955005C1756 /* GCHandle.cpp */; };
+ DDE82AD81209D955005C1756 /* GCHandle.h in Headers */ = {isa = PBXBuildFile; fileRef = DDE82AD21209D955005C1756 /* GCHandle.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ DDF7ABD411F60ED200108E36 /* GCActivityCallback.h in Headers */ = {isa = PBXBuildFile; fileRef = DDF7ABD211F60ED200108E36 /* GCActivityCallback.h */; };
+ DDF7ABD511F60ED200108E36 /* GCActivityCallbackCF.cpp in Sources */ = {isa = PBXBuildFile; fileRef = DDF7ABD311F60ED200108E36 /* GCActivityCallbackCF.cpp */; };
E124A8F70E555775003091F1 /* OpaqueJSString.h in Headers */ = {isa = PBXBuildFile; fileRef = E124A8F50E555775003091F1 /* OpaqueJSString.h */; settings = {ATTRIBUTES = (Private, ); }; };
E124A8F80E555775003091F1 /* OpaqueJSString.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E124A8F60E555775003091F1 /* OpaqueJSString.cpp */; };
E178636D0D9BEEC300D74E75 /* InitializeThreading.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E178636C0D9BEEC300D74E75 /* InitializeThreading.cpp */; };
@@ -788,6 +801,7 @@
868BFA15117CF19900B908B1 /* WTFString.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = WTFString.cpp; path = text/WTFString.cpp; sourceTree = "<group>"; };
868BFA16117CF19900B908B1 /* WTFString.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = WTFString.h; path = text/WTFString.h; sourceTree = "<group>"; };
868BFA5F117D048200B908B1 /* StaticConstructors.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StaticConstructors.h; sourceTree = "<group>"; };
+ 8690231412092D5C00630AF9 /* PageReservation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PageReservation.h; sourceTree = "<group>"; };
8698B86810D44D9400D8D01B /* StringBuilder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StringBuilder.h; sourceTree = "<group>"; };
8698BB3710D86BAF00D8D01B /* UStringImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UStringImpl.h; sourceTree = "<group>"; };
869D04AE1193B54D00803475 /* CachedTranscendentalFunction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CachedTranscendentalFunction.h; sourceTree = "<group>"; };
@@ -914,6 +928,7 @@
A7482B7A1166CDEA003B0712 /* JSWeakObjectMapRefPrivate.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSWeakObjectMapRefPrivate.cpp; sourceTree = "<group>"; };
A7482E37116A697B003B0712 /* JSWeakObjectMapRefInternal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSWeakObjectMapRefInternal.h; sourceTree = "<group>"; };
A74B3498102A5F8E0032AB98 /* MarkStack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MarkStack.cpp; sourceTree = "<group>"; };
+ A74DE1CB120B86D600D40D5B /* ARMv7Assembler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ARMv7Assembler.cpp; sourceTree = "<group>"; };
A75706DD118A2BCF0057F88F /* JITArithmetic32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITArithmetic32_64.cpp; sourceTree = "<group>"; };
A76C51741182748D00715B05 /* JSInterfaceJIT.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSInterfaceJIT.h; sourceTree = "<group>"; };
A76EE6580FAE59D5003F069A /* NativeFunctionWrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NativeFunctionWrapper.h; sourceTree = "<group>"; };
@@ -1049,6 +1064,12 @@
C0A2723F0E509F1E00E96E15 /* NotFound.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NotFound.h; sourceTree = "<group>"; };
D21202280AD4310C00ED79B6 /* DateConversion.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = DateConversion.cpp; sourceTree = "<group>"; };
D21202290AD4310C00ED79B6 /* DateConversion.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = DateConversion.h; sourceTree = "<group>"; };
+ DD2724671208D1FF00F9ABE7 /* AlignedMemoryAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AlignedMemoryAllocator.h; sourceTree = "<group>"; };
+ DD377CBB12072C18006A2517 /* Bitmap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Bitmap.h; sourceTree = "<group>"; };
+ DDE82AD11209D955005C1756 /* GCHandle.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = GCHandle.cpp; sourceTree = "<group>"; };
+ DDE82AD21209D955005C1756 /* GCHandle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GCHandle.h; sourceTree = "<group>"; };
+ DDF7ABD211F60ED200108E36 /* GCActivityCallback.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GCActivityCallback.h; sourceTree = "<group>"; };
+ DDF7ABD311F60ED200108E36 /* GCActivityCallbackCF.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = GCActivityCallbackCF.cpp; sourceTree = "<group>"; };
E11D51750B2E798D0056C188 /* StringExtras.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StringExtras.h; sourceTree = "<group>"; };
E124A8F50E555775003091F1 /* OpaqueJSString.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpaqueJSString.h; sourceTree = "<group>"; };
E124A8F60E555775003091F1 /* OpaqueJSString.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OpaqueJSString.cpp; sourceTree = "<group>"; };
@@ -1406,6 +1427,7 @@
65E217B708E7EECC0023E5F6 /* Assertions.h */,
BC5F7BBB11823B590052C02C /* Atomics.h */,
E1A596370DE3E1C300C17E37 /* AVLTree.h */,
+ DD377CBB12072C18006A2517 /* Bitmap.h */,
86676D4D11FED55D004B6863 /* BumpPointerAllocator.h */,
A7A1F7AA0F252B3C00E184E2 /* ByteArray.cpp */,
A7A1F7AB0F252B3C00E184E2 /* ByteArray.h */,
@@ -1450,6 +1472,7 @@
440B7AED0FAF7FCB0073323E /* OwnPtrCommon.h */,
8627E5E911F1281900A313B5 /* PageAllocation.cpp */,
8627E5EA11F1281900A313B5 /* PageAllocation.h */,
+ 8690231412092D5C00630AF9 /* PageReservation.h */,
44DD48520FAEA85000D6B4EB /* PassOwnPtr.h */,
6580F795094070560082C219 /* PassRefPtr.h */,
65D6D87E09B5A32E0002E4D7 /* Platform.h */,
@@ -1555,6 +1578,9 @@
7EF6E0BB0EB7A1EC0079AFAF /* runtime */ = {
isa = PBXGroup;
children = (
+ DD2724671208D1FF00F9ABE7 /* AlignedMemoryAllocator.h */,
+ DDF7ABD211F60ED200108E36 /* GCActivityCallback.h */,
+ DDF7ABD311F60ED200108E36 /* GCActivityCallbackCF.cpp */,
BCF605110E203EF800B9A64D /* ArgList.cpp */,
BCF605120E203EF800B9A64D /* ArgList.h */,
BC257DE50E1F51C50016B6C9 /* Arguments.cpp */,
@@ -1608,6 +1634,8 @@
BC2680C10E16D4E900A06E92 /* FunctionConstructor.h */,
F692A85C0255597D01FF60F7 /* FunctionPrototype.cpp */,
F692A85D0255597D01FF60F7 /* FunctionPrototype.h */,
+ DDE82AD11209D955005C1756 /* GCHandle.cpp */,
+ DDE82AD21209D955005C1756 /* GCHandle.h */,
BC02E9B80E184545000F9297 /* GetterSetter.cpp */,
BC337BDE0E1AF0B80076918A /* GetterSetter.h */,
BC257DED0E1F52ED0016B6C9 /* GlobalEvalFunction.cpp */,
@@ -1822,6 +1850,7 @@
86D3B2BF10156BDE002865E7 /* ARMAssembler.cpp */,
86D3B2C010156BDE002865E7 /* ARMAssembler.h */,
86ADD1430FDDEA980006EEC2 /* ARMv7Assembler.h */,
+ A74DE1CB120B86D600D40D5B /* ARMv7Assembler.cpp */,
9688CB130ED12B4E001D649F /* AssemblerBuffer.h */,
86D3B2C110156BDE002865E7 /* AssemblerBufferWithConstantPool.h */,
86E116B00FE75AC800B512BC /* CodeLocation.h */,
@@ -1895,6 +1924,8 @@
144007570A5370D20005F061 /* JSNodeList.h in Headers */,
144005CC0A5338F80005F061 /* Node.h in Headers */,
1440074A0A536CC20005F061 /* NodeList.h in Headers */,
+ DD2724681208D1FF00F9ABE7 /* AlignedMemoryAllocator.h in Headers */,
+ DDE82AD61209D955005C1756 /* GCHandle.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -1923,6 +1954,7 @@
868BFA0A117CEFD100B908B1 /* AtomicStringImpl.h in Headers */,
BC18C3EB0E16F5CD00B34460 /* AVLTree.h in Headers */,
147B83AC0E6DB8C9004775A4 /* BatchedTransitionOptimizer.h in Headers */,
+ DD377CBC12072C18006A2517 /* Bitmap.h in Headers */,
BC18C3EC0E16F5CD00B34460 /* BooleanObject.h in Headers */,
86676D5211FED9BC004B6863 /* BumpPointerAllocator.h in Headers */,
A7A1F7AD0F252B3C00E184E2 /* ByteArray.h in Headers */,
@@ -2088,6 +2120,7 @@
BC18C44A0E16F5CD00B34460 /* OwnPtr.h in Headers */,
4409D8470FAF80A200523B87 /* OwnPtrCommon.h in Headers */,
8627E5EC11F1281900A313B5 /* PageAllocation.h in Headers */,
+ 8690231512092D5C00630AF9 /* PageReservation.h in Headers */,
BC18C44B0E16F5CD00B34460 /* Parser.h in Headers */,
93052C350FB792190048FDC3 /* ParserArena.h in Headers */,
44DD48530FAEA85000D6B4EB /* PassOwnPtr.h in Headers */,
@@ -2193,6 +2226,9 @@
868BFA18117CF19900B908B1 /* WTFString.h in Headers */,
86D08D5411793613006E5ED0 /* WTFThreadData.h in Headers */,
9688CB160ED12B4E001D649F /* X86Assembler.h in Headers */,
+ DDF7ABD411F60ED200108E36 /* GCActivityCallback.h in Headers */,
+ DD2724691208D1FF00F9ABE7 /* AlignedMemoryAllocator.h in Headers */,
+ DDE82AD81209D955005C1756 /* GCHandle.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -2287,6 +2323,12 @@
buildConfigurationList = 149C277108902AFE008A9EFC /* Build configuration list for PBXProject "JavaScriptCore" */;
compatibilityVersion = "Xcode 2.4";
hasScannedForEncodings = 1;
+ knownRegions = (
+ English,
+ Japanese,
+ French,
+ German,
+ );
mainGroup = 0867D691FE84028FC02AAC07 /* JavaScriptCore */;
productRefGroup = 034768DFFF38A50411DB9C8B /* Products */;
projectDirPath = "";
@@ -2447,6 +2489,7 @@
141211340A48795800480255 /* minidom.c in Sources */,
1440063F0A53598A0005F061 /* Node.c in Sources */,
1440074B0A536CC20005F061 /* NodeList.c in Sources */,
+ DDE82AD51209D955005C1756 /* GCHandle.cpp in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -2455,6 +2498,7 @@
buildActionMask = 2147483647;
files = (
1440F6100A4F85670005F061 /* testapi.c in Sources */,
+ DDE82AD41209D955005C1756 /* GCHandle.cpp in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -2632,7 +2676,10 @@
E1EF79AA0CE97BA60088D500 /* UTF8.cpp in Sources */,
868BFA17117CF19900B908B1 /* WTFString.cpp in Sources */,
86D08D5311793613006E5ED0 /* WTFThreadData.cpp in Sources */,
+ DDF7ABD511F60ED200108E36 /* GCActivityCallbackCF.cpp in Sources */,
8627E5EB11F1281900A313B5 /* PageAllocation.cpp in Sources */,
+ DDE82AD71209D955005C1756 /* GCHandle.cpp in Sources */,
+ A74DE1D0120B875600D40D5B /* ARMv7Assembler.cpp in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -2641,6 +2688,7 @@
buildActionMask = 2147483647;
files = (
932F5BDD0822A1C700736975 /* jsc.cpp in Sources */,
+ DDE82AD31209D955005C1756 /* GCHandle.cpp in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
diff --git a/JavaScriptCore/assembler/ARMAssembler.cpp b/JavaScriptCore/assembler/ARMAssembler.cpp
index 0016540..9442b4b 100644
--- a/JavaScriptCore/assembler/ARMAssembler.cpp
+++ b/JavaScriptCore/assembler/ARMAssembler.cpp
@@ -351,6 +351,8 @@ void* ARMAssembler::executableCopy(ExecutablePool* allocator)
bkpt(0);
char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator));
+ if (!data)
+ return 0;
for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
// The last bit is set if the constant must be placed on constant pool.
diff --git a/JavaScriptCore/assembler/ARMv7Assembler.cpp b/JavaScriptCore/assembler/ARMv7Assembler.cpp
new file mode 100644
index 0000000..233a6f1
--- /dev/null
+++ b/JavaScriptCore/assembler/ARMv7Assembler.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2)
+
+#include "ARMv7Assembler.h"
+
+namespace JSC {
+
+const int ARMv7Assembler::JumpSizes[] = { 0xffffffff, 2 * sizeof(uint16_t), 2 * sizeof(uint16_t), 5 * sizeof(uint16_t) };
+
+}
+
+#endif
diff --git a/JavaScriptCore/assembler/ARMv7Assembler.h b/JavaScriptCore/assembler/ARMv7Assembler.h
index 48eef53..f1b57b8 100644
--- a/JavaScriptCore/assembler/ARMv7Assembler.h
+++ b/JavaScriptCore/assembler/ARMv7Assembler.h
@@ -381,8 +381,8 @@ public:
u.d = d;
- int sign = (u.i >> 63);
- int exponent = (u.i >> 52) & 0x7ff;
+ int sign = static_cast<int>(u.i >> 63);
+ int exponent = static_cast<int>(u.i >> 52) & 0x7ff;
uint64_t mantissa = u.i & 0x000fffffffffffffull;
if ((exponent >= 0x3fc) && (exponent <= 0x403) && !(mantissa & 0x0000ffffffffffffull))
@@ -445,7 +445,6 @@ private:
} m_u;
};
-
class ARMv7Assembler {
public:
~ARMv7Assembler()
@@ -476,14 +475,44 @@ public:
ConditionGT,
ConditionLE,
ConditionAL,
-
+
ConditionCS = ConditionHS,
ConditionCC = ConditionLO,
} Condition;
+ enum JumpType { JumpNoCondition, JumpCondition, JumpFullSize };
+ enum JumpLinkType { LinkInvalid, LinkShortJump, LinkConditionalShortJump, LinkLongJump, JumpTypeCount };
+ static const int JumpSizes[JumpTypeCount];
+ enum { JumpPaddingSize = 5 * sizeof(uint16_t) };
+ class LinkRecord {
+ public:
+ LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition)
+ : m_from(from)
+ , m_to(to)
+ , m_type(type)
+ , m_linkType(LinkInvalid)
+ , m_condition(condition)
+ {
+ }
+ intptr_t from() const { return m_from; }
+ void setFrom(intptr_t from) { m_from = from; }
+ intptr_t to() const { return m_to; }
+ JumpType type() const { return m_type; }
+ JumpLinkType linkType() const { return m_linkType; }
+ void setLinkType(JumpLinkType linkType) { ASSERT(m_linkType == LinkInvalid); m_linkType = linkType; }
+ Condition condition() const { return m_condition; }
+ private:
+ intptr_t m_from : 31;
+ intptr_t m_to : 31;
+ JumpType m_type : 2;
+ JumpLinkType m_linkType : 3;
+ Condition m_condition : 16;
+ };
+
class JmpSrc {
friend class ARMv7Assembler;
friend class ARMInstructionFormatter;
+ friend class LinkBuffer;
public:
JmpSrc()
: m_offset(-1)
@@ -491,17 +520,32 @@ public:
}
private:
- JmpSrc(int offset)
+ JmpSrc(int offset, JumpType type)
: m_offset(offset)
+ , m_condition(0xffff)
+ , m_type(type)
{
+ ASSERT(m_type != JumpCondition);
+ }
+
+ JmpSrc(int offset, JumpType type, Condition condition)
+ : m_offset(offset)
+ , m_condition(condition)
+ , m_type(type)
+ {
+ ASSERT(m_type == JumpCondition || m_type == JumpFullSize);
}
int m_offset;
+ Condition m_condition : 16;
+ JumpType m_type : 16;
+
};
class JmpDst {
friend class ARMv7Assembler;
friend class ARMInstructionFormatter;
+ friend class LinkBuffer;
public:
JmpDst()
: m_offset(-1)
@@ -525,17 +569,6 @@ public:
private:
- struct LinkRecord {
- LinkRecord(intptr_t from, intptr_t to)
- : from(from)
- , to(to)
- {
- }
-
- intptr_t from;
- intptr_t to;
- };
-
// ARMv7, Appx-A.6.3
bool BadReg(RegisterID reg)
{
@@ -739,7 +772,7 @@ private:
}
public:
-
+
void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
{
// Rd can only be SP if Rn is also SP.
@@ -878,27 +911,33 @@ public:
ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
}
-
+
// Only allowed in IT (if then) block if last instruction.
- JmpSrc b()
+ JmpSrc b(JumpType type)
{
m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b);
- return JmpSrc(m_formatter.size());
+ return JmpSrc(m_formatter.size(), type);
}
// Only allowed in IT (if then) block if last instruction.
- JmpSrc blx(RegisterID rm)
+ JmpSrc blx(RegisterID rm, JumpType type)
{
ASSERT(rm != ARMRegisters::pc);
m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
- return JmpSrc(m_formatter.size());
+ return JmpSrc(m_formatter.size(), type);
}
// Only allowed in IT (if then) block if last instruction.
- JmpSrc bx(RegisterID rm)
+ JmpSrc bx(RegisterID rm, JumpType type, Condition condition)
+ {
+ m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
+ return JmpSrc(m_formatter.size(), type, condition);
+ }
+
+ JmpSrc bx(RegisterID rm, JumpType type)
{
m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0);
- return JmpSrc(m_formatter.size());
+ return JmpSrc(m_formatter.size(), type);
}
void bkpt(uint8_t imm=0)
@@ -1617,6 +1656,15 @@ public:
{
return dst.m_offset - src.m_offset;
}
+
+ int executableOffsetFor(int location)
+ {
+ if (!location)
+ return 0;
+ return static_cast<int32_t*>(m_formatter.data())[location / sizeof(int32_t) - 1];
+ }
+
+ int jumpSizeDelta(JumpLinkType jumpLinkType) { return JumpPaddingSize - JumpSizes[jumpLinkType]; }
// Assembler admin methods:
@@ -1625,22 +1673,66 @@ public:
return m_formatter.size();
}
- void* executableCopy(ExecutablePool* allocator)
+ static bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
{
- void* copy = m_formatter.executableCopy(allocator);
+ return a.from() < b.from();
+ }
- unsigned jumpCount = m_jumpsToLink.size();
- for (unsigned i = 0; i < jumpCount; ++i) {
- uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].from);
- uint16_t* target = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(copy) + m_jumpsToLink[i].to);
- linkJumpAbsolute(location, target);
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ {
+ if (record.type() >= JumpFullSize) {
+ record.setLinkType(LinkLongJump);
+ return LinkLongJump;
+ }
+ bool mayTriggerErrata = false;
+ const uint16_t* shortJumpLocation = reinterpret_cast<const uint16_t*>(from - (JumpPaddingSize - JumpSizes[LinkShortJump]));
+ if (!canBeShortJump(shortJumpLocation, to, mayTriggerErrata)) {
+ record.setLinkType(LinkLongJump);
+ return LinkLongJump;
}
- m_jumpsToLink.clear();
+ if (mayTriggerErrata) {
+ record.setLinkType(LinkLongJump);
+ return LinkLongJump;
+ }
+ if (record.type() == JumpCondition) {
+ record.setLinkType(LinkConditionalShortJump);
+ return LinkConditionalShortJump;
+ }
+ record.setLinkType(LinkShortJump);
+ return LinkShortJump;
+ }
- ASSERT(copy);
- return copy;
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
+ {
+ int32_t ptr = regionStart / sizeof(int32_t);
+ const int32_t end = regionEnd / sizeof(int32_t);
+ int32_t* offsets = static_cast<int32_t*>(m_formatter.data());
+ while (ptr < end)
+ offsets[ptr++] = offset;
+ }
+
+ Vector<LinkRecord>& jumpsToLink()
+ {
+ std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
+ return m_jumpsToLink;
+ }
+
+ void link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ {
+ uint16_t* itttLocation;
+ if (record.linkType() == LinkConditionalShortJump) {
+ itttLocation = reinterpret_cast<uint16_t*>(from - JumpSizes[LinkConditionalShortJump] - 2);
+ itttLocation[0] = ifThenElse(record.condition()) | OP_IT;
+ }
+ ASSERT(record.linkType() != LinkInvalid);
+ if (record.linkType() != LinkLongJump)
+ linkShortJump(reinterpret_cast<uint16_t*>(from), to);
+ else
+ linkLongJump(reinterpret_cast<uint16_t*>(from), to);
}
+ void* unlinkedCode() { return m_formatter.data(); }
+
static unsigned getCallReturnOffset(JmpSrc call)
{
ASSERT(call.m_offset >= 0);
@@ -1659,7 +1751,7 @@ public:
{
ASSERT(to.m_offset != -1);
ASSERT(from.m_offset != -1);
- m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset));
+ m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, from.m_type, from.m_condition));
}
static void linkJump(void* code, JmpSrc from, void* to)
@@ -1862,19 +1954,12 @@ private:
return (instruction[0] == OP_NOP_T2a) && (instruction[1] == OP_NOP_T2b);
}
- static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ static bool canBeShortJump(const uint16_t* instruction, const void* target, bool& mayTriggerErrata)
{
- // FIMXE: this should be up in the MacroAssembler layer. :-(
- const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
-
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
-
- ASSERT( (isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
- || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)) );
-
+
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
-
// From Cortex-A8 errata:
// If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and
// the target of the branch falls within the first region it is
@@ -1883,11 +1968,50 @@ private:
// to enter a deadlock state.
// The instruction is spanning two pages if it ends at an address ending 0x002
bool spansTwo4K = ((reinterpret_cast<intptr_t>(instruction) & 0xfff) == 0x002);
+ mayTriggerErrata = spansTwo4K;
// The target is in the first page if the jump branch back by [3..0x1002] bytes
bool targetInFirstPage = (relative >= -0x1002) && (relative < -2);
bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage;
+ return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata;
+ }
- if (((relative << 7) >> 7) == relative && !wouldTriggerA8Errata) {
+ static void linkLongJump(uint16_t* instruction, void* target)
+ {
+ linkJumpAbsolute(instruction, target);
+ }
+
+ static void linkShortJump(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ bool scratch;
+ UNUSED_PARAM(scratch);
+ ASSERT(canBeShortJump(instruction, target, scratch));
+ // ARM encoding for the top two bits below the sign bit is 'peculiar'.
+ if (relative >= 0)
+ relative ^= 0xC00000;
+
+ // All branch offsets should be an even distance.
+ ASSERT(!(relative & 1));
+ instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
+ instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
+ }
+
+ static void linkJumpAbsolute(uint16_t* instruction, void* target)
+ {
+ // FIMXE: this should be up in the MacroAssembler layer. :-(
+ ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
+ ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
+
+ ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
+ || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)));
+
+ intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
+ bool scratch;
+ if (canBeShortJump(instruction, target, scratch)) {
// ARM encoding for the top two bits below the sign bit is 'peculiar'.
if (relative >= 0)
relative ^= 0xC00000;
@@ -1905,6 +2029,7 @@ private:
instruction[-2] = OP_B_T4a | ((relative & 0x1000000) >> 14) | ((relative & 0x3ff000) >> 12);
instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1);
} else {
+ const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) + 1));
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(reinterpret_cast<uint32_t>(target) >> 16));
instruction[-5] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16);
@@ -1919,6 +2044,7 @@ private:
{
return op | (imm.m_value.i << 10) | imm.m_value.imm4;
}
+
static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm)
{
return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8;
@@ -2035,6 +2161,7 @@ private:
} m_formatter;
Vector<LinkRecord> m_jumpsToLink;
+ Vector<int32_t> m_offsets;
};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/AbstractMacroAssembler.h b/JavaScriptCore/assembler/AbstractMacroAssembler.h
index aab9089..5db2cb9 100644
--- a/JavaScriptCore/assembler/AbstractMacroAssembler.h
+++ b/JavaScriptCore/assembler/AbstractMacroAssembler.h
@@ -418,12 +418,6 @@ public:
// Section 3: Misc admin methods
-
- static CodePtr trampolineAt(CodeRef ref, Label label)
- {
- return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
- }
-
size_t size()
{
return m_assembler.size();
@@ -479,6 +473,9 @@ public:
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
}
+
+ void beginUninterruptedSequence() { }
+ void endUninterruptedSequence() { }
protected:
AssemblerType m_assembler;
diff --git a/JavaScriptCore/assembler/LinkBuffer.h b/JavaScriptCore/assembler/LinkBuffer.h
index 47cac5a..624d1cc 100644
--- a/JavaScriptCore/assembler/LinkBuffer.h
+++ b/JavaScriptCore/assembler/LinkBuffer.h
@@ -49,30 +49,56 @@ namespace JSC {
//
class LinkBuffer : public Noncopyable {
typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
typedef MacroAssembler::Label Label;
typedef MacroAssembler::Jump Jump;
typedef MacroAssembler::JumpList JumpList;
typedef MacroAssembler::Call Call;
typedef MacroAssembler::DataLabel32 DataLabel32;
typedef MacroAssembler::DataLabelPtr DataLabelPtr;
+ typedef MacroAssembler::JmpDst JmpDst;
+#if ENABLE(BRANCH_COMPACTION)
+ typedef MacroAssembler::LinkRecord LinkRecord;
+ typedef MacroAssembler::JumpLinkType JumpLinkType;
+#endif
+
+ enum LinkBufferState {
+ StateInit,
+ StateChecked,
+ StateFinalized,
+ };
public:
// Note: Initialization sequence is significant, since executablePool is a PassRefPtr.
// First, executablePool is copied into m_executablePool, then the initialization of
// m_code uses m_executablePool, *not* executablePool, since this is no longer valid.
- LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool)
+ // The linkOffset parameter should only be non-null when recompiling for exception info
+ LinkBuffer(MacroAssembler* masm, PassRefPtr<ExecutablePool> executablePool, void* linkOffset)
: m_executablePool(executablePool)
- , m_code(masm->m_assembler.executableCopy(m_executablePool.get()))
- , m_size(masm->m_assembler.size())
+ , m_size(0)
+ , m_code(0)
+ , m_assembler(masm)
#ifndef NDEBUG
- , m_completed(false)
+ , m_state(StateInit)
#endif
{
+ linkCode(linkOffset);
}
~LinkBuffer()
{
- ASSERT(m_completed);
+ ASSERT(m_state == StateFinalized);
+ }
+
+ // After constructing a link buffer, a client must call allocationSuccessful() to check alloc did not return 0.
+ bool allocationSuccessful()
+ {
+#ifndef NDEBUG
+ ASSERT(m_state == StateInit);
+ m_state = StateChecked;
+#endif
+
+ return m_code;
}
// These methods are used to link or set values at code generation time.
@@ -80,28 +106,32 @@ public:
void link(Call call, FunctionPtr function)
{
ASSERT(call.isFlagSet(Call::Linkable));
+ call.m_jmp = applyOffset(call.m_jmp);
MacroAssembler::linkCall(code(), call, function);
}
void link(Jump jump, CodeLocationLabel label)
{
+ jump.m_jmp = applyOffset(jump.m_jmp);
MacroAssembler::linkJump(code(), jump, label);
}
void link(JumpList list, CodeLocationLabel label)
{
for (unsigned i = 0; i < list.m_jumps.size(); ++i)
- MacroAssembler::linkJump(code(), list.m_jumps[i], label);
+ link(list.m_jumps[i], label);
}
void patch(DataLabelPtr label, void* value)
{
- MacroAssembler::linkPointer(code(), label.m_label, value);
+ JmpDst target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value);
}
void patch(DataLabelPtr label, CodeLocationLabel value)
{
- MacroAssembler::linkPointer(code(), label.m_label, value.executableAddress());
+ JmpDst target = applyOffset(label.m_label);
+ MacroAssembler::linkPointer(code(), target, value.executableAddress());
}
// These methods are used to obtain handles to allow the code to be relinked / repatched later.
@@ -110,35 +140,36 @@ public:
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(!call.isFlagSet(Call::Near));
- return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ return CodeLocationCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_jmp)));
}
CodeLocationNearCall locationOfNearCall(Call call)
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(call.isFlagSet(Call::Near));
- return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), call.m_jmp));
+ return CodeLocationNearCall(MacroAssembler::getLinkerAddress(code(), applyOffset(call.m_jmp)));
}
CodeLocationLabel locationOf(Label label)
{
- return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ return CodeLocationLabel(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
CodeLocationDataLabelPtr locationOf(DataLabelPtr label)
{
- return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ return CodeLocationDataLabelPtr(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
CodeLocationDataLabel32 locationOf(DataLabel32 label)
{
- return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), label.m_label));
+ return CodeLocationDataLabel32(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label)));
}
// This method obtains the return address of the call, given as an offset from
// the start of the code.
unsigned returnAddressOffset(Call call)
{
+ call.m_jmp = applyOffset(call.m_jmp);
return MacroAssembler::getLinkerCallReturnOffset(call);
}
@@ -152,6 +183,7 @@ public:
return CodeRef(m_code, m_executablePool, m_size);
}
+
CodeLocationLabel finalizeCodeAddendum()
{
performFinalization();
@@ -159,7 +191,20 @@ public:
return CodeLocationLabel(code());
}
+ CodePtr trampolineAt(Label label)
+ {
+ return CodePtr(MacroAssembler::AssemblerType_T::getRelocatedAddress(code(), applyOffset(label.m_label)));
+ }
+
private:
+ template <typename T> T applyOffset(T src)
+ {
+#if ENABLE(BRANCH_COMPACTION)
+ src.m_offset -= m_assembler->executableOffsetFor(src.m_offset);
+#endif
+ return src;
+ }
+
// Keep this private! - the underlying code should only be obtained externally via
// finalizeCode() or finalizeCodeAddendum().
void* code()
@@ -167,11 +212,80 @@ private:
return m_code;
}
+ void linkCode(void* linkOffset)
+ {
+ UNUSED_PARAM(linkOffset);
+ ASSERT(!m_code);
+#if !ENABLE(BRANCH_COMPACTION)
+ m_code = m_assembler->m_assembler.executableCopy(m_executablePool.get());
+ m_size = m_assembler->size();
+#else
+ size_t initialSize = m_assembler->size();
+ m_code = (uint8_t*)m_executablePool->alloc(initialSize);
+ if (!m_code)
+ return;
+ ExecutableAllocator::makeWritable(m_code, m_assembler->size());
+ uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
+ uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
+ const uint8_t* linkBase = linkOffset ? reinterpret_cast<uint8_t*>(linkOffset) : outData;
+ int readPtr = 0;
+ int writePtr = 0;
+ Vector<LinkRecord>& jumpsToLink = m_assembler->jumpsToLink();
+ unsigned jumpCount = jumpsToLink.size();
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ int offset = readPtr - writePtr;
+ ASSERT(!(offset & 1));
+
+ // Copy the instructions from the last jump to the current one.
+ size_t regionSize = jumpsToLink[i].from() - readPtr;
+ memcpy(outData + writePtr, inData + readPtr, regionSize);
+ m_assembler->recordLinkOffsets(readPtr, jumpsToLink[i].from(), offset);
+ readPtr += regionSize;
+ writePtr += regionSize;
+
+ // Calculate absolute address of the jump target, in the case of backwards
+ // branches we need to be precise, forward branches we are pessimistic
+ const uint8_t* target;
+ if (jumpsToLink[i].to() >= jumpsToLink[i].from())
+ target = linkBase + jumpsToLink[i].to() - offset; // Compensate for what we have collapsed so far
+ else
+ target = linkBase + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+
+ JumpLinkType jumpLinkType = m_assembler->computeJumpType(jumpsToLink[i], linkBase + writePtr, target);
+
+ // Step back in the write stream
+ int32_t delta = m_assembler->jumpSizeDelta(jumpLinkType);
+ if (delta) {
+ writePtr -= delta;
+ m_assembler->recordLinkOffsets(jumpsToLink[i].from() - delta, readPtr, readPtr - writePtr);
+ }
+ jumpsToLink[i].setFrom(writePtr);
+ }
+ // Copy everything after the last jump
+ memcpy(outData + writePtr, inData + readPtr, m_assembler->size() - readPtr);
+ m_assembler->recordLinkOffsets(readPtr, m_assembler->size(), readPtr - writePtr);
+
+ // Actually link everything (don't link if we've be given a linkoffset as it's a
+ // waste of time: linkOffset is used for recompiling to get exception info)
+ if (!linkOffset) {
+ for (unsigned i = 0; i < jumpCount; ++i) {
+ uint8_t* location = outData + jumpsToLink[i].from();
+ uint8_t* target = outData + jumpsToLink[i].to() - m_assembler->executableOffsetFor(jumpsToLink[i].to());
+ m_assembler->link(jumpsToLink[i], location, target);
+ }
+ }
+
+ jumpsToLink.clear();
+ m_size = writePtr + m_assembler->size() - readPtr;
+ m_executablePool->returnLastBytes(initialSize - m_size);
+#endif
+ }
+
void performFinalization()
{
#ifndef NDEBUG
- ASSERT(!m_completed);
- m_completed = true;
+ ASSERT(m_state == StateChecked);
+ m_state = StateFinalized;
#endif
ExecutableAllocator::makeExecutable(code(), m_size);
@@ -179,10 +293,11 @@ private:
}
RefPtr<ExecutablePool> m_executablePool;
- void* m_code;
size_t m_size;
+ void* m_code;
+ MacroAssembler* m_assembler;
#ifndef NDEBUG
- bool m_completed;
+ LinkBufferState m_state;
#endif
};
diff --git a/JavaScriptCore/assembler/MIPSAssembler.h b/JavaScriptCore/assembler/MIPSAssembler.h
index 076d220..a19c7a6 100644
--- a/JavaScriptCore/assembler/MIPSAssembler.h
+++ b/JavaScriptCore/assembler/MIPSAssembler.h
@@ -689,10 +689,8 @@ public:
void* executableCopy(ExecutablePool* allocator)
{
void *result = m_buffer.executableCopy(allocator);
- if (!result)
- return 0;
-
- relocateJumps(m_buffer.data(), result);
+ if (result)
+ relocateJumps(m_buffer.data(), result);
return result;
}
diff --git a/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index 64513fd..a1539f2 100644
--- a/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -45,6 +45,23 @@ class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
public:
+ typedef ARMv7Assembler::LinkRecord LinkRecord;
+ typedef ARMv7Assembler::JumpLinkType JumpLinkType;
+
+ MacroAssemblerARMv7()
+ : m_inUninterruptedSequence(false)
+ {
+ }
+
+ void beginUninterruptedSequence() { m_inUninterruptedSequence = true; }
+ void endUninterruptedSequence() { m_inUninterruptedSequence = false; }
+ Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); }
+ void* unlinkedCode() { return m_assembler.unlinkedCode(); }
+ JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
+ void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
+ int jumpSizeDelta(JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpLinkType); }
+ void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
+
struct ArmAddress {
enum AddressType {
HasOffset,
@@ -969,14 +986,14 @@ public:
void jump(RegisterID target)
{
- m_assembler.bx(target);
+ m_assembler.bx(target, inUninterruptedSequence() ? ARMv7Assembler::JumpFullSize : ARMv7Assembler::JumpNoCondition);
}
// Address is a memory location containing the address to jump to
void jump(Address address)
{
load32(address, dataTempRegister);
- m_assembler.bx(dataTempRegister);
+ m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpFullSize : ARMv7Assembler::JumpNoCondition);
}
@@ -1012,7 +1029,7 @@ public:
Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
{
- ASSERT(cond == Overflow);
+ ASSERT_UNUSED(cond, cond == Overflow);
m_assembler.smull(dest, dataTempRegister, dest, src);
m_assembler.asr(addressTempRegister, dest, 31);
return branch32(NotEqual, addressTempRegister, dataTempRegister);
@@ -1020,7 +1037,7 @@ public:
Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
{
- ASSERT(cond == Overflow);
+ ASSERT_UNUSED(cond, cond == Overflow);
move(imm, dataTempRegister);
m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
m_assembler.asr(addressTempRegister, dest, 31);
@@ -1059,35 +1076,35 @@ public:
void breakpoint()
{
- m_assembler.bkpt();
+ m_assembler.bkpt(0);
}
Call nearCall()
{
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear);
+ return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFullSize), Call::LinkableNear);
}
Call call()
{
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return Call(m_assembler.blx(dataTempRegister), Call::Linkable);
+ return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFullSize), Call::Linkable);
}
Call call(RegisterID target)
{
- return Call(m_assembler.blx(target), Call::None);
+ return Call(m_assembler.blx(target, ARMv7Assembler::JumpFullSize), Call::None);
}
Call call(Address address)
{
load32(address, dataTempRegister);
- return Call(m_assembler.blx(dataTempRegister), Call::None);
+ return Call(m_assembler.blx(dataTempRegister, ARMv7Assembler::JumpFullSize), Call::None);
}
void ret()
{
- m_assembler.bx(linkRegister);
+ m_assembler.bx(linkRegister, ARMv7Assembler::JumpFullSize);
}
void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest)
@@ -1187,7 +1204,7 @@ public:
{
// Like a normal call, but don't link.
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return Call(m_assembler.bx(dataTempRegister), Call::Linkable);
+ return Call(m_assembler.bx(dataTempRegister, ARMv7Assembler::JumpFullSize), Call::Linkable);
}
Call makeTailRecursiveCall(Jump oldJump)
@@ -1196,19 +1213,29 @@ public:
return tailRecursiveCall();
}
+
+ int executableOffsetFor(int location)
+ {
+ return m_assembler.executableOffsetFor(location);
+ }
protected:
+ bool inUninterruptedSequence()
+ {
+ return m_inUninterruptedSequence;
+ }
+
ARMv7Assembler::JmpSrc makeJump()
{
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return m_assembler.bx(dataTempRegister);
+ return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpFullSize : ARMv7Assembler::JumpNoCondition);
}
ARMv7Assembler::JmpSrc makeBranch(ARMv7Assembler::Condition cond)
{
m_assembler.it(cond, true, true);
moveFixedWidthEncoding(Imm32(0), dataTempRegister);
- return m_assembler.bx(dataTempRegister);
+ return m_assembler.bx(dataTempRegister, inUninterruptedSequence() ? ARMv7Assembler::JumpFullSize : ARMv7Assembler::JumpCondition, cond);
}
ARMv7Assembler::JmpSrc makeBranch(Condition cond) { return makeBranch(armV7Condition(cond)); }
ARMv7Assembler::JmpSrc makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); }
@@ -1298,6 +1325,8 @@ private:
{
ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
+
+ bool m_inUninterruptedSequence;
};
} // namespace JSC
diff --git a/JavaScriptCore/assembler/X86Assembler.h b/JavaScriptCore/assembler/X86Assembler.h
index 20d72f5..a1fae0c 100644
--- a/JavaScriptCore/assembler/X86Assembler.h
+++ b/JavaScriptCore/assembler/X86Assembler.h
@@ -1626,9 +1626,7 @@ public:
void* executableCopy(ExecutablePool* allocator)
{
- void* copy = m_formatter.executableCopy(allocator);
- ASSERT(copy);
- return copy;
+ return m_formatter.executableCopy(allocator);
}
private:
diff --git a/JavaScriptCore/bytecode/CodeBlock.cpp b/JavaScriptCore/bytecode/CodeBlock.cpp
index 0e55d6a..9a8c332 100644
--- a/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -1344,8 +1344,9 @@ void CodeBlock::dumpStatistics()
#endif
}
-CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, SymbolTable* symTab, bool isConstructor)
- : m_numCalleeRegisters(0)
+CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject *globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, SymbolTable* symTab, bool isConstructor)
+ : m_globalObject(globalObject)
+ , m_numCalleeRegisters(0)
, m_numVars(0)
, m_numParameters(0)
, m_isConstructor(isConstructor)
@@ -1457,8 +1458,8 @@ void CodeBlock::derefStructures(Instruction* vPC) const
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global) || vPC[0].u.opcode == interpreter->getOpcode(op_resolve_global_dynamic)) {
- if(vPC[4].u.structure)
- vPC[4].u.structure->deref();
+ if (vPC[3].u.structure)
+ vPC[3].u.structure->deref();
return;
}
if ((vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto_list))
@@ -1518,6 +1519,7 @@ void CodeBlock::markAggregate(MarkStack& markStack)
m_functionExprs[i]->markAggregate(markStack);
for (size_t i = 0; i < m_functionDecls.size(); ++i)
m_functionDecls[i]->markAggregate(markStack);
+ markStack.append(m_globalObject);
}
bool CodeBlock::reparseForExceptionInfoIfNecessary(CallFrame* callFrame)
diff --git a/JavaScriptCore/bytecode/CodeBlock.h b/JavaScriptCore/bytecode/CodeBlock.h
index 2f22dd0..be12254 100644
--- a/JavaScriptCore/bytecode/CodeBlock.h
+++ b/JavaScriptCore/bytecode/CodeBlock.h
@@ -275,7 +275,10 @@ namespace JSC {
class CodeBlock : public FastAllocBase {
friend class JIT;
protected:
- CodeBlock(ScriptExecutable* ownerExecutable, CodeType, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable* symbolTable, bool isConstructor);
+ CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable* symbolTable, bool isConstructor);
+
+ JSGlobalObject* m_globalObject;
+
public:
virtual ~CodeBlock();
@@ -483,6 +486,7 @@ namespace JSC {
unsigned addRegExp(RegExp* r) { createRareDataIfNecessary(); unsigned size = m_rareData->m_regexps.size(); m_rareData->m_regexps.append(r); return size; }
RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
+ JSGlobalObject* globalObject() { return m_globalObject; }
// Jump Tables
@@ -602,9 +606,8 @@ namespace JSC {
class GlobalCodeBlock : public CodeBlock {
public:
- GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, JSGlobalObject* globalObject)
- : CodeBlock(ownerExecutable, codeType, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false)
- , m_globalObject(globalObject)
+ GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset)
+ : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false)
{
m_globalObject->codeBlocks().add(this);
}
@@ -618,14 +621,13 @@ namespace JSC {
void clearGlobalObject() { m_globalObject = 0; }
private:
- JSGlobalObject* m_globalObject; // For program and eval nodes, the global object that marks the constant pool.
SymbolTable m_unsharedSymbolTable;
};
class ProgramCodeBlock : public GlobalCodeBlock {
public:
ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider)
- : GlobalCodeBlock(ownerExecutable, codeType, sourceProvider, 0, globalObject)
+ : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0)
{
}
};
@@ -633,7 +635,7 @@ namespace JSC {
class EvalCodeBlock : public GlobalCodeBlock {
public:
EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth)
- : GlobalCodeBlock(ownerExecutable, EvalCode, sourceProvider, 0, globalObject)
+ : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0)
, m_baseScopeDepth(baseScopeDepth)
{
}
@@ -659,8 +661,8 @@ namespace JSC {
// as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared
// symbol table, so we just pass as a raw pointer with a ref count of 1. We then manually deref
// in the destructor.
- FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor)
- : CodeBlock(ownerExecutable, codeType, sourceProvider, sourceOffset, SharedSymbolTable::create().releaseRef(), isConstructor)
+ FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor)
+ : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, SharedSymbolTable::create().releaseRef(), isConstructor)
{
}
~FunctionCodeBlock()
diff --git a/JavaScriptCore/bytecode/Opcode.h b/JavaScriptCore/bytecode/Opcode.h
index ca5feeb..4563ebe 100644
--- a/JavaScriptCore/bytecode/Opcode.h
+++ b/JavaScriptCore/bytecode/Opcode.h
@@ -93,12 +93,12 @@ namespace JSC {
\
macro(op_resolve, 3) \
macro(op_resolve_skip, 4) \
- macro(op_resolve_global, 6) \
- macro(op_resolve_global_dynamic, 7) \
+ macro(op_resolve_global, 5) \
+ macro(op_resolve_global_dynamic, 6) \
macro(op_get_scoped_var, 4) \
macro(op_put_scoped_var, 4) \
- macro(op_get_global_var, 4) \
- macro(op_put_global_var, 4) \
+ macro(op_get_global_var, 3) \
+ macro(op_put_global_var, 3) \
macro(op_resolve_base, 3) \
macro(op_resolve_with_base, 4) \
macro(op_get_by_id, 8) \
@@ -254,6 +254,17 @@ namespace JSC {
#endif
+ inline size_t opcodeLength(OpcodeID opcode)
+ {
+ switch (opcode) {
+#define OPCODE_ID_LENGTHS(id, length) case id: return OPCODE_LENGTH(id);
+ FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS)
+#undef OPCODE_ID_LENGTHS
+ }
+ ASSERT_NOT_REACHED();
+ return 0;
+ }
+
} // namespace JSC
#endif // Opcode_h
diff --git a/JavaScriptCore/bytecode/StructureStubInfo.h b/JavaScriptCore/bytecode/StructureStubInfo.h
index 8e2c489..8578171 100644
--- a/JavaScriptCore/bytecode/StructureStubInfo.h
+++ b/JavaScriptCore/bytecode/StructureStubInfo.h
@@ -66,7 +66,7 @@ namespace JSC {
baseObjectStructure->ref();
}
- void initGetByIdProto(Structure* baseObjectStructure, Structure* prototypeStructure)
+ void initGetByIdProto(Structure* baseObjectStructure, Structure* prototypeStructure, CodeLocationLabel routine)
{
accessType = access_get_by_id_proto;
@@ -75,9 +75,11 @@ namespace JSC {
u.getByIdProto.prototypeStructure = prototypeStructure;
prototypeStructure->ref();
+
+ stubRoutine = routine;
}
- void initGetByIdChain(Structure* baseObjectStructure, StructureChain* chain)
+ void initGetByIdChain(Structure* baseObjectStructure, StructureChain* chain, CodeLocationLabel routine)
{
accessType = access_get_by_id_chain;
@@ -86,27 +88,33 @@ namespace JSC {
u.getByIdChain.chain = chain;
chain->ref();
+
+ stubRoutine = routine;
}
- void initGetByIdSelfList(PolymorphicAccessStructureList* structureList, int listSize)
+ void initGetByIdSelfList(PolymorphicAccessStructureList* structureList)
{
accessType = access_get_by_id_self_list;
u.getByIdProtoList.structureList = structureList;
- u.getByIdProtoList.listSize = listSize;
+ u.getByIdProtoList.listSize = 1;
+
+ stubRoutine = CodeLocationLabel();
}
- void initGetByIdProtoList(PolymorphicAccessStructureList* structureList, int listSize)
+ void initGetByIdProtoList(PolymorphicAccessStructureList* structureList)
{
accessType = access_get_by_id_proto_list;
u.getByIdProtoList.structureList = structureList;
- u.getByIdProtoList.listSize = listSize;
+ u.getByIdProtoList.listSize = 1;
+
+ stubRoutine = CodeLocationLabel();
}
// PutById*
- void initPutByIdTransition(Structure* previousStructure, Structure* structure, StructureChain* chain)
+ void initPutByIdTransition(Structure* previousStructure, Structure* structure, StructureChain* chain, CodeLocationLabel routine)
{
accessType = access_put_by_id_transition;
@@ -118,6 +126,8 @@ namespace JSC {
u.putByIdTransition.chain = chain;
chain->ref();
+
+ stubRoutine = routine;
}
void initPutByIdReplace(Structure* baseObjectStructure)
diff --git a/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
index a3fa937..34011c1 100644
--- a/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
+++ b/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
@@ -216,6 +216,9 @@ BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, const Debugger* d
, m_globalConstantIndex(0)
, m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
, m_lastOpcodeID(op_end)
+#ifndef NDEBUG
+ , m_lastOpcodePosition(0)
+#endif
, m_emitNodeDepth(0)
, m_usesExceptions(false)
, m_regeneratingForExceptionInfo(false)
@@ -588,6 +591,11 @@ PassRefPtr<Label> BytecodeGenerator::emitLabel(Label* l0)
void BytecodeGenerator::emitOpcode(OpcodeID opcodeID)
{
+#ifndef NDEBUG
+ size_t opcodePosition = instructions().size();
+ ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end);
+ m_lastOpcodePosition = opcodePosition;
+#endif
instructions().append(globalData()->interpreter->getOpcode(opcodeID));
m_lastOpcodeID = opcodeID;
}
@@ -613,12 +621,14 @@ void ALWAYS_INLINE BytecodeGenerator::rewindBinaryOp()
{
ASSERT(instructions().size() >= 4);
instructions().shrink(instructions().size() - 4);
+ m_lastOpcodeID = op_end;
}
void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp()
{
ASSERT(instructions().size() >= 3);
instructions().shrink(instructions().size() - 3);
+ m_lastOpcodeID = op_end;
}
PassRefPtr<Label> BytecodeGenerator::emitJump(Label* target)
@@ -1106,7 +1116,6 @@ RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const Identifier& pr
#endif
emitOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
instructions().append(dst->index());
- instructions().append(globalObject);
instructions().append(addConstant(property));
instructions().append(0);
instructions().append(0);
@@ -1142,7 +1151,6 @@ RegisterID* BytecodeGenerator::emitGetScopedVar(RegisterID* dst, size_t depth, i
if (globalObject) {
emitOpcode(op_get_global_var);
instructions().append(dst->index());
- instructions().append(asCell(globalObject));
instructions().append(index);
return dst;
}
@@ -1158,7 +1166,6 @@ RegisterID* BytecodeGenerator::emitPutScopedVar(size_t depth, int index, Registe
{
if (globalObject) {
emitOpcode(op_put_global_var);
- instructions().append(asCell(globalObject));
instructions().append(index);
instructions().append(value->index());
return value;
@@ -1229,7 +1236,6 @@ RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, Register
#endif
emitOpcode(requiresDynamicChecks ? op_resolve_global_dynamic : op_resolve_global);
instructions().append(propDst->index());
- instructions().append(globalObject);
instructions().append(addConstant(property));
instructions().append(0);
instructions().append(0);
diff --git a/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/JavaScriptCore/bytecompiler/BytecodeGenerator.h
index ad0ae4e..f855d12 100644
--- a/JavaScriptCore/bytecompiler/BytecodeGenerator.h
+++ b/JavaScriptCore/bytecompiler/BytecodeGenerator.h
@@ -560,6 +560,9 @@ namespace JSC {
JSGlobalData* m_globalData;
OpcodeID m_lastOpcodeID;
+#ifndef NDEBUG
+ size_t m_lastOpcodePosition;
+#endif
unsigned m_emitNodeDepth;
diff --git a/JavaScriptCore/create_jit_stubs b/JavaScriptCore/create_jit_stubs
index 9bb9ee5..4950bfc 100644
--- a/JavaScriptCore/create_jit_stubs
+++ b/JavaScriptCore/create_jit_stubs
@@ -25,16 +25,17 @@ use Getopt::Long;
my $usage = basename($0) . " --prefix prefix [--offset offset] file";
my $rtype_template = quotemeta("#rtype#");
-my $offset_template = quotemeta("#offset#");
+my $offset_template = quotemeta(" THUNK_RETURN_ADDRESS_OFFSET");
my $op_template = quotemeta("#op#");
my $prefix;
+my $offset_is_set = 0;
my $offset = 32;
my $file;
my $getOptionsResult = GetOptions(
'prefix=s' => \$prefix,
- 'offset=i' => \$offset
+ 'offset=i' => sub { $offset_is_set = 1; $offset = $_[1]; }
);
$file = $ARGV[0];
@@ -54,7 +55,9 @@ open(IN, $file) or die "No such file $file";
while ( $_ = <IN> ) {
if ( /^$prefix\_BEGIN\((.*)\)/ ) {
$stub = $1;
- $stub =~ s/$offset_template/$offset/g;
+ if ($offset_is_set) {
+ $stub =~ s/$offset_template/$offset/g;
+ }
print $stub . "\n";
}
if ( /^$prefix\((.*)\)/ ) {
@@ -67,7 +70,9 @@ while ( $_ = <IN> ) {
$stub = $stub_template;
$rtype = quotemeta($1);
$op = quotemeta($2);
- $stub =~ s/$offset_template/$offset/g;
+ if ($offset_is_set) {
+ $stub =~ s/$offset_template/$offset/g;
+ }
$stub =~ s/$rtype_template/$rtype/g;
$stub =~ s/$op_template/$op/g;
$stub =~ s/\\\*/\*/g;
diff --git a/JavaScriptCore/interpreter/Interpreter.cpp b/JavaScriptCore/interpreter/Interpreter.cpp
index 2342ed6..e7ae540 100644
--- a/JavaScriptCore/interpreter/Interpreter.cpp
+++ b/JavaScriptCore/interpreter/Interpreter.cpp
@@ -144,28 +144,28 @@ NEVER_INLINE bool Interpreter::resolveSkip(CallFrame* callFrame, Instruction* vP
NEVER_INLINE bool Interpreter::resolveGlobal(CallFrame* callFrame, Instruction* vPC, JSValue& exceptionValue)
{
int dst = vPC[1].u.operand;
- JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(vPC[2].u.jsCell);
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ JSGlobalObject* globalObject = codeBlock->globalObject();
ASSERT(globalObject->isGlobalObject());
- int property = vPC[3].u.operand;
- Structure* structure = vPC[4].u.structure;
- int offset = vPC[5].u.operand;
+ int property = vPC[2].u.operand;
+ Structure* structure = vPC[3].u.structure;
+ int offset = vPC[4].u.operand;
if (structure == globalObject->structure()) {
callFrame->r(dst) = JSValue(globalObject->getDirectOffset(offset));
return true;
}
- CodeBlock* codeBlock = callFrame->codeBlock();
Identifier& ident = codeBlock->identifier(property);
PropertySlot slot(globalObject);
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
JSValue result = slot.getValue(callFrame, ident);
if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
- if (vPC[4].u.structure)
- vPC[4].u.structure->deref();
+ if (vPC[3].u.structure)
+ vPC[3].u.structure->deref();
globalObject->structure()->ref();
- vPC[4] = globalObject->structure();
- vPC[5] = slot.cachedOffset();
+ vPC[3] = globalObject->structure();
+ vPC[4] = slot.cachedOffset();
callFrame->r(dst) = JSValue(result);
return true;
}
@@ -184,13 +184,13 @@ NEVER_INLINE bool Interpreter::resolveGlobal(CallFrame* callFrame, Instruction*
NEVER_INLINE bool Interpreter::resolveGlobalDynamic(CallFrame* callFrame, Instruction* vPC, JSValue& exceptionValue)
{
int dst = vPC[1].u.operand;
- JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(vPC[2].u.jsCell);
- ASSERT(globalObject->isGlobalObject());
- int property = vPC[3].u.operand;
- Structure* structure = vPC[4].u.structure;
- int offset = vPC[5].u.operand;
CodeBlock* codeBlock = callFrame->codeBlock();
- int skip = vPC[6].u.operand;
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ ASSERT(globalObject->isGlobalObject());
+ int property = vPC[2].u.operand;
+ Structure* structure = vPC[3].u.structure;
+ int offset = vPC[4].u.operand;
+ int skip = vPC[5].u.operand;
ScopeChainNode* scopeChain = callFrame->scopeChain();
ScopeChainIterator iter = scopeChain->begin();
@@ -231,11 +231,11 @@ NEVER_INLINE bool Interpreter::resolveGlobalDynamic(CallFrame* callFrame, Instru
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
JSValue result = slot.getValue(callFrame, ident);
if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
- if (vPC[4].u.structure)
- vPC[4].u.structure->deref();
+ if (vPC[3].u.structure)
+ vPC[3].u.structure->deref();
globalObject->structure()->ref();
- vPC[4] = globalObject->structure();
- vPC[5] = slot.cachedOffset();
+ vPC[3] = globalObject->structure();
+ vPC[4] = slot.cachedOffset();
callFrame->r(dst) = JSValue(result);
return true;
}
@@ -2284,9 +2284,9 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
Gets the global var at global slot index and places it in register dst.
*/
int dst = vPC[1].u.operand;
- JSGlobalObject* scope = static_cast<JSGlobalObject*>(vPC[2].u.jsCell);
+ JSGlobalObject* scope = codeBlock->globalObject();
ASSERT(scope->isGlobalObject());
- int index = vPC[3].u.operand;
+ int index = vPC[2].u.operand;
callFrame->r(dst) = scope->registerAt(index);
vPC += OPCODE_LENGTH(op_get_global_var);
@@ -2297,10 +2297,10 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
Puts value into global slot index.
*/
- JSGlobalObject* scope = static_cast<JSGlobalObject*>(vPC[1].u.jsCell);
+ JSGlobalObject* scope = codeBlock->globalObject();
ASSERT(scope->isGlobalObject());
- int index = vPC[2].u.operand;
- int value = vPC[3].u.operand;
+ int index = vPC[1].u.operand;
+ int value = vPC[2].u.operand;
scope->registerAt(index) = JSValue(callFrame->r(value).jsValue());
vPC += OPCODE_LENGTH(op_put_global_var);
diff --git a/JavaScriptCore/interpreter/RegisterFile.cpp b/JavaScriptCore/interpreter/RegisterFile.cpp
index 63ea5b3..4f5d1d5 100644
--- a/JavaScriptCore/interpreter/RegisterFile.cpp
+++ b/JavaScriptCore/interpreter/RegisterFile.cpp
@@ -35,26 +35,15 @@ namespace JSC {
RegisterFile::~RegisterFile()
{
-#if HAVE(MMAP)
- munmap(m_buffer, ((m_max - m_start) + m_maxGlobals) * sizeof(Register));
-#elif HAVE(VIRTUALALLOC)
-#if OS(WINCE)
- VirtualFree(m_buffer, DWORD(m_commitEnd) - DWORD(m_buffer), MEM_DECOMMIT);
-#endif
- VirtualFree(m_buffer, 0, MEM_RELEASE);
-#else
- fastFree(m_buffer);
-#endif
+ void* base = m_reservation.base();
+ m_reservation.decommit(base, reinterpret_cast<intptr_t>(m_commitEnd) - reinterpret_cast<intptr_t>(base));
+ m_reservation.deallocate();
}
void RegisterFile::releaseExcessCapacity()
{
-#if HAVE(MMAP) && HAVE(MADV_FREE) && !HAVE(VIRTUALALLOC)
- while (madvise(m_start, (m_max - m_start) * sizeof(Register), MADV_FREE) == -1 && errno == EAGAIN) { }
-#elif HAVE(VIRTUALALLOC)
- VirtualFree(m_start, (m_max - m_start) * sizeof(Register), MEM_DECOMMIT);
+ m_reservation.decommit(m_start, reinterpret_cast<intptr_t>(m_commitEnd) - reinterpret_cast<intptr_t>(m_start));
m_commitEnd = m_start;
-#endif
m_maxUsed = m_start;
}
diff --git a/JavaScriptCore/interpreter/RegisterFile.h b/JavaScriptCore/interpreter/RegisterFile.h
index e9b8204..6c4e969 100644
--- a/JavaScriptCore/interpreter/RegisterFile.h
+++ b/JavaScriptCore/interpreter/RegisterFile.h
@@ -35,13 +35,9 @@
#include "WeakGCPtr.h"
#include <stdio.h>
#include <wtf/Noncopyable.h>
+#include <wtf/PageReservation.h>
#include <wtf/VMTags.h>
-#if HAVE(MMAP)
-#include <errno.h>
-#include <sys/mman.h>
-#endif
-
namespace JSC {
/*
@@ -109,9 +105,9 @@ namespace JSC {
enum { ProgramCodeThisRegister = -CallFrameHeaderSize - 1 };
- static const size_t defaultCapacity = 524288;
- static const size_t defaultMaxGlobals = 8192;
- static const size_t commitSize = 1 << 14;
+ static const size_t defaultCapacity = 512 * 1024;
+ static const size_t defaultMaxGlobals = 8 * 1024;
+ static const size_t commitSize = 16 * 1024;
// Allow 8k of excess registers before we start trying to reap the registerfile
static const ptrdiff_t maxExcessCapacity = 8 * 1024;
@@ -139,81 +135,39 @@ namespace JSC {
void markCallFrames(MarkStack& markStack, Heap* heap) { heap->markConservatively(markStack, m_start, m_end); }
private:
+ void checkAllocatedOkay(bool okay);
+
void releaseExcessCapacity();
size_t m_numGlobals;
const size_t m_maxGlobals;
Register* m_start;
Register* m_end;
Register* m_max;
- Register* m_buffer;
Register* m_maxUsed;
-
-#if HAVE(VIRTUALALLOC)
Register* m_commitEnd;
-#endif
+ PageReservation m_reservation;
WeakGCPtr<JSGlobalObject> m_globalObject; // The global object whose vars are currently stored in the register file.
};
- // FIXME: Add a generic getpagesize() to WTF, then move this function to WTF as well.
- inline bool isPageAligned(size_t size) { return size != 0 && size % (8 * 1024) == 0; }
-
inline RegisterFile::RegisterFile(size_t capacity, size_t maxGlobals)
: m_numGlobals(0)
, m_maxGlobals(maxGlobals)
, m_start(0)
, m_end(0)
, m_max(0)
- , m_buffer(0)
{
- // Verify that our values will play nice with mmap and VirtualAlloc.
- ASSERT(isPageAligned(maxGlobals));
- ASSERT(isPageAligned(capacity));
+ ASSERT(maxGlobals && PageAllocation::isPageAligned(maxGlobals));
+ ASSERT(capacity && PageAllocation::isPageAligned(capacity));
size_t bufferLength = (capacity + maxGlobals) * sizeof(Register);
- #if HAVE(MMAP)
- m_buffer = static_cast<Register*>(mmap(0, bufferLength, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, VM_TAG_FOR_REGISTERFILE_MEMORY, 0));
- if (m_buffer == MAP_FAILED) {
-#if OS(WINCE)
- fprintf(stderr, "Could not allocate register file: %d\n", GetLastError());
-#else
- fprintf(stderr, "Could not allocate register file: %d\n", errno);
-#endif
- CRASH();
- }
- #elif HAVE(VIRTUALALLOC)
- m_buffer = static_cast<Register*>(VirtualAlloc(0, roundUpAllocationSize(bufferLength, commitSize), MEM_RESERVE, PAGE_READWRITE));
- if (!m_buffer) {
-#if OS(WINCE)
- fprintf(stderr, "Could not allocate register file: %d\n", GetLastError());
-#else
- fprintf(stderr, "Could not allocate register file: %d\n", errno);
-#endif
- CRASH();
- }
+ m_reservation = PageReservation::reserve(roundUpAllocationSize(bufferLength, commitSize), PageAllocation::JSVMStackPages);
+ void* base = m_reservation.base();
+ checkAllocatedOkay(base);
size_t committedSize = roundUpAllocationSize(maxGlobals * sizeof(Register), commitSize);
- void* commitCheck = VirtualAlloc(m_buffer, committedSize, MEM_COMMIT, PAGE_READWRITE);
- if (commitCheck != m_buffer) {
-#if OS(WINCE)
- fprintf(stderr, "Could not allocate register file: %d\n", GetLastError());
-#else
- fprintf(stderr, "Could not allocate register file: %d\n", errno);
-#endif
- CRASH();
- }
- m_commitEnd = reinterpret_cast<Register*>(reinterpret_cast<char*>(m_buffer) + committedSize);
- #else
- /*
- * If neither MMAP nor VIRTUALALLOC are available - use fastMalloc instead.
- *
- * Please note that this is the fallback case, which is non-optimal.
- * If any possible, the platform should provide for a better memory
- * allocation mechanism that allows for "lazy commit" or dynamic
- * pre-allocation, similar to mmap or VirtualAlloc, to avoid waste of memory.
- */
- m_buffer = static_cast<Register*>(fastMalloc(bufferLength));
- #endif
- m_start = m_buffer + maxGlobals;
+ checkAllocatedOkay(m_reservation.commit(base, committedSize));
+ m_commitEnd = reinterpret_cast<Register*>(reinterpret_cast<char*>(base) + committedSize);
+ m_start = static_cast<Register*>(base) + maxGlobals;
m_end = m_start;
m_maxUsed = m_end;
m_max = m_start + capacity;
@@ -236,20 +190,11 @@ namespace JSC {
if (newEnd > m_max)
return false;
-#if !HAVE(MMAP) && HAVE(VIRTUALALLOC)
if (newEnd > m_commitEnd) {
size_t size = roundUpAllocationSize(reinterpret_cast<char*>(newEnd) - reinterpret_cast<char*>(m_commitEnd), commitSize);
- if (!VirtualAlloc(m_commitEnd, size, MEM_COMMIT, PAGE_READWRITE)) {
-#if OS(WINCE)
- fprintf(stderr, "Could not allocate register file: %d\n", GetLastError());
-#else
- fprintf(stderr, "Could not allocate register file: %d\n", errno);
-#endif
- CRASH();
- }
+ checkAllocatedOkay(m_reservation.commit(m_commitEnd, size));
m_commitEnd = reinterpret_cast<Register*>(reinterpret_cast<char*>(m_commitEnd) + size);
}
-#endif
if (newEnd > m_maxUsed)
m_maxUsed = newEnd;
@@ -258,6 +203,16 @@ namespace JSC {
return true;
}
+ inline void RegisterFile::checkAllocatedOkay(bool okay)
+ {
+ if (!okay) {
+#ifndef NDEBUG
+ fprintf(stderr, "Could not allocate register file: %d\n", PageReservation::lastError());
+#endif
+ CRASH();
+ }
+ }
+
} // namespace JSC
#endif // RegisterFile_h
diff --git a/JavaScriptCore/jit/ExecutableAllocator.cpp b/JavaScriptCore/jit/ExecutableAllocator.cpp
index 86c24fd..e3525f5 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -45,15 +45,13 @@ void ExecutableAllocator::intializePageSize()
// for moving memory model limitation
ExecutableAllocator::pageSize = 256 * 1024;
#else
- ExecutableAllocator::pageSize = PageAllocation::pagesize();
+ ExecutableAllocator::pageSize = PageAllocation::pageSize();
#endif
}
ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
{
PageAllocation allocation = PageAllocation::allocate(size, PageAllocation::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
- if (!allocation)
- CRASH();
return allocation;
}
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h
index f8e991f..b60d591 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/JavaScriptCore/jit/ExecutableAllocator.h
@@ -34,7 +34,7 @@
#include <wtf/UnusedParam.h>
#include <wtf/Vector.h>
-#if OS(IPHONE_OS)
+#if OS(IOS)
#include <libkern/OSCacheControl.h>
#include <sys/mman.h>
#endif
@@ -85,15 +85,29 @@ inline size_t roundUpAllocationSize(size_t request, size_t granularity)
namespace JSC {
class ExecutablePool : public RefCounted<ExecutablePool> {
-private:
+public:
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
typedef PageAllocation Allocation;
+#else
+ class Allocation {
+ public:
+ Allocation(void* base, size_t size)
+ : m_base(base)
+ , m_size(size)
+ {
+ }
+ void* base() { return m_base; }
+ size_t size() { return m_size; }
+ bool operator!() const { return !m_base; }
+
+ private:
+ void* m_base;
+ size_t m_size;
+ };
+#endif
typedef Vector<Allocation, 2> AllocationList;
-public:
- static PassRefPtr<ExecutablePool> create(size_t n)
- {
- return adoptRef(new ExecutablePool(n));
- }
+ static PassRefPtr<ExecutablePool> create(size_t n);
void* alloc(size_t n)
{
@@ -114,6 +128,11 @@ public:
return poolAllocate(n);
}
+ void returnLastBytes(size_t count)
+ {
+ m_freePtr -= count;
+ }
+
~ExecutablePool()
{
AllocationList::iterator end = m_pools.end();
@@ -127,7 +146,7 @@ private:
static Allocation systemAlloc(size_t n);
static void systemRelease(Allocation& alloc);
- ExecutablePool(size_t n);
+ ExecutablePool(Allocation&);
void* poolAllocate(size_t n);
@@ -145,8 +164,11 @@ public:
{
if (!pageSize)
intializePageSize();
- if (isValid())
+ if (isValid()) {
m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+ if (!m_smallAllocationPool)
+ CRASH();
+ }
#if !ENABLE(INTERPRETER)
else
CRASH();
@@ -163,7 +185,7 @@ public:
return m_smallAllocationPool;
// If the request is large, we just provide a unshared allocator
- if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
+ if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
return ExecutablePool::create(n);
// Create a new allocator
@@ -200,7 +222,7 @@ public:
static void cacheFlush(void* code, size_t size)
{
#if COMPILER(GCC) && GCC_VERSION_AT_LEAST(4,3,0)
-#if WTF_MIPS_ISA_REV(2) && GCC_VERSION_AT_LEAST(4,4,3)
+#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4,4,3)
int lineSize;
asm("rdhwr %0, $1" : "=r" (lineSize));
//
@@ -222,7 +244,7 @@ public:
_flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
#endif
}
-#elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
+#elif CPU(ARM_THUMB2) && OS(IOS)
static void cacheFlush(void* code, size_t size)
{
sys_dcache_flush(code, size);
@@ -286,15 +308,20 @@ private:
static void intializePageSize();
};
-inline ExecutablePool::ExecutablePool(size_t n)
+inline PassRefPtr<ExecutablePool> ExecutablePool::create(size_t n)
+ {
+ Allocation mem = systemAlloc(roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE));
+ if (!mem)
+ return 0;
+ return adoptRef(new ExecutablePool(mem));
+ }
+
+inline ExecutablePool::ExecutablePool(Allocation& mem)
{
- size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
- Allocation mem = systemAlloc(allocSize);
+ ASSERT(!!mem);
m_pools.append(mem);
m_freePtr = static_cast<char*>(mem.base());
- if (!m_freePtr)
- CRASH(); // Failed to allocate
- m_end = m_freePtr + allocSize;
+ m_end = m_freePtr + mem.size();
}
inline void* ExecutablePool::poolAllocate(size_t n)
@@ -303,8 +330,8 @@ inline void* ExecutablePool::poolAllocate(size_t n)
Allocation result = systemAlloc(allocSize);
if (!result.base())
- CRASH(); // Failed to allocate
-
+ return 0; // Failed to allocate
+
ASSERT(m_end >= m_freePtr);
if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
// Replace allocation pool
diff --git a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index 421c34b..f05e919 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -35,6 +35,7 @@
#include <sys/mman.h>
#include <unistd.h>
#include <wtf/AVLTree.h>
+#include <wtf/PageReservation.h>
#include <wtf/VMTags.h>
#if CPU(X86_64)
@@ -131,12 +132,12 @@ class FixedVMPoolAllocator
void reuse(void* position, size_t size)
{
- bool okay = m_allocation.commit(position, size, EXECUTABLE_POOL_WRITABLE, true);
+ bool okay = m_allocation.commit(position, size);
ASSERT_UNUSED(okay, okay);
}
// All addition to the free list should go through this method, rather than
- // calling insert directly, to avoid multiple entries beging added with the
+ // calling insert directly, to avoid multiple entries being added with the
// same key. All nodes being added should be singletons, they should not
// already be a part of a chain.
void addToFreeList(FreeListEntry* entry)
@@ -155,7 +156,7 @@ class FixedVMPoolAllocator
}
// We do not attempt to coalesce addition, which may lead to fragmentation;
- // instead we periodically perform a sweep to try to coalesce neigboring
+ // instead we periodically perform a sweep to try to coalesce neighboring
// entries in m_freeList. Presently this is triggered at the point 16MB
// of memory has been released.
void coalesceFreeSpace()
@@ -168,7 +169,7 @@ class FixedVMPoolAllocator
for (FreeListEntry* entry; (entry = *iter); ++iter) {
// Each entry in m_freeList might correspond to multiple
// free chunks of memory (of the same size). Walk the chain
- // (this is likely of couse only be one entry long!) adding
+ // (this is likely of course only be one entry long!) adding
// each entry to the Vector (at reseting the next in chain
// pointer to separate each node out).
FreeListEntry* next;
@@ -283,16 +284,16 @@ public:
//
// But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
// for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
- // 2^24, which should put up somewhere in the middle of usespace (in the address range
+ // 2^24, which should put up somewhere in the middle of userspace (in the address range
// 0x200000000000 .. 0x5fffffffffff).
#if VM_POOL_ASLR
intptr_t randomLocation = 0;
randomLocation = arc4random() & ((1 << 25) - 1);
randomLocation += (1 << 24);
randomLocation <<= 21;
- m_allocation = PageAllocation::reserveAt(reinterpret_cast<void*>(randomLocation), false, totalHeapSize, PageAllocation::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ m_allocation = PageReservation::reserveAt(reinterpret_cast<void*>(randomLocation), false, totalHeapSize, PageAllocation::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
#else
- m_allocation = PageAllocation::reserve(totalHeapSize, PageAllocation::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ m_allocation = PageReservation::reserve(totalHeapSize, PageAllocation::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
#endif
if (!!m_allocation)
@@ -303,12 +304,12 @@ public:
#endif
}
- PageAllocation alloc(size_t size)
+ ExecutablePool::Allocation alloc(size_t size)
{
- return PageAllocation(allocInternal(size), size, m_allocation);
+ return ExecutablePool::Allocation(allocInternal(size), size);
}
- void free(PageAllocation allocation)
+ void free(ExecutablePool::Allocation allocation)
{
void* pointer = allocation.base();
size_t size = allocation.size();
@@ -356,18 +357,17 @@ private:
result = m_commonSizedAllocations.last();
m_commonSizedAllocations.removeLast();
} else {
- // Serach m_freeList for a suitable sized chunk to allocate memory from.
+ // Search m_freeList for a suitable sized chunk to allocate memory from.
FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
- // This would be bad news.
+ // This is bad news.
if (!entry) {
- // Errk! Lets take a last-ditch desparation attempt at defragmentation...
+ // Errk! Lets take a last-ditch desperation attempt at defragmentation...
coalesceFreeSpace();
// Did that free up a large enough chunk?
entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
- // No?... *BOOM!*
if (!entry)
- CRASH();
+ return 0;
}
ASSERT(entry->size != m_commonSize);
@@ -424,7 +424,7 @@ private:
// This is used for housekeeping, to trigger defragmentation of the freed lists.
size_t m_countFreedSinceLastCoalesce;
- PageAllocation m_allocation;
+ PageReservation m_allocation;
};
void ExecutableAllocator::intializePageSize()
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index f5df5f7..cd5944a 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -71,7 +71,7 @@ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAd
repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
}
-JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
+JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock, void* linkerOffset)
: m_interpreter(globalData->interpreter)
, m_globalData(globalData)
, m_codeBlock(codeBlock)
@@ -89,6 +89,7 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
, m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
, m_jumpTargetsPosition(0)
#endif
+ , m_linkerOffset(linkerOffset)
{
}
@@ -508,7 +509,12 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
ASSERT(m_jmpTable.isEmpty());
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ RefPtr<ExecutablePool> executablePool = m_globalData->executableAllocator.poolForSize(m_assembler.size());
+ if (!executablePool)
+ return JITCode();
+ LinkBuffer patchBuffer(this, executablePool.release(), m_linkerOffset);
+ if (!patchBuffer.allocationSuccessful())
+ return JITCode();
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index d398d51..393c771 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -178,50 +178,50 @@ namespace JSC {
static const int patchGetByIdDefaultOffset = 256;
public:
- static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, CodePtr* functionEntryArityCheck = 0)
+ static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, CodePtr* functionEntryArityCheck = 0, void* offsetBase = 0)
{
- return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck);
+ return JIT(globalData, codeBlock, offsetBase).privateCompile(functionEntryArityCheck);
}
- static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ static bool compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
+ return jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ static bool compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset);
+ return jit.privateCompileGetByIdSelfList(stubInfo, structure, ident, slot, cachedOffset);
}
- static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ static bool compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
+ return jit.privateCompileGetByIdProtoList(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+ static bool compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame);
+ return jit.privateCompileGetByIdChainList(stubInfo, structure, chain, count, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ static bool compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
+ return jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+ static bool compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
JIT jit(globalData, codeBlock);
- jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
+ return jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
}
static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, TrampolineStructure *trampolines)
{
if (!globalData->canUseJIT())
return;
- JIT jit(globalData);
+ JIT jit(globalData, 0, 0);
jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines);
}
@@ -229,7 +229,7 @@ namespace JSC {
{
if (!globalData->canUseJIT())
return CodePtr();
- JIT jit(globalData);
+ JIT jit(globalData, 0, 0);
return jit.privateCompileCTINativeCall(executablePool, globalData, func);
}
@@ -237,10 +237,10 @@ namespace JSC {
static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct);
static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr);
- static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
+ static bool compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
- return jit.privateCompilePatchGetArrayLength(returnAddress);
+ return jit.privateCompilePatchGetArrayLength(stubInfo, returnAddress);
}
static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*);
@@ -259,23 +259,23 @@ namespace JSC {
}
};
- JIT(JSGlobalData*, CodeBlock* = 0);
+ JIT(JSGlobalData*, CodeBlock* = 0, void* = 0);
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
JITCode privateCompile(CodePtr* functionEntryArityCheck);
- void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset);
- void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct);
+ bool privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ bool privateCompileGetByIdSelfList(StructureStubInfo*, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset);
+ bool privateCompileGetByIdProtoList(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
+ bool privateCompileGetByIdChainList(StructureStubInfo*, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
+ bool privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ bool privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct);
void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, TrampolineStructure *trampolines);
Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
CodePtr privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* data, NativeFunction func);
- void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
+ bool privateCompilePatchGetArrayLength(StructureStubInfo* stubInfo, ReturnAddressPtr returnAddress);
void addSlowCase(Jump);
void addSlowCase(JumpList);
@@ -295,7 +295,7 @@ namespace JSC {
void emitLoadDouble(unsigned index, FPRegisterID value);
void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
- void testPrototype(Structure*, JumpList& failureCases);
+ void testPrototype(JSValue, JumpList& failureCases);
#if USE(JSVALUE32_64)
bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
@@ -666,16 +666,16 @@ namespace JSC {
#endif
#endif // USE(JSVALUE32_64)
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name) beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
-#define END_UNINTERRUPTED_SEQUENCE(name) endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
+#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
void beginUninterruptedSequence(int, int);
void endUninterruptedSequence(int, int);
#else
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name)
-#define END_UNINTERRUPTED_SEQUENCE(name)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(); } while (false)
#endif
void emit_op_add(Instruction*);
@@ -940,6 +940,7 @@ namespace JSC {
int m_uninterruptedConstantSequenceBegin;
#endif
#endif
+ void* m_linkerOffset;
static CodePtr stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
} JIT_CLASS_ALIGNMENT;
diff --git a/JavaScriptCore/jit/JITArithmetic32_64.cpp b/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 5a69d5a..e53af77 100644
--- a/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -1383,6 +1383,8 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ UNUSED_PARAM(currentInstruction);
+ UNUSED_PARAM(iter);
#if ENABLE(JIT_USE_SOFT_MODULO)
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 3b28f34..e2e77db 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -99,6 +99,7 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
{
+ JSInterfaceJIT::beginUninterruptedSequence();
#if CPU(ARM_TRADITIONAL)
#ifndef NDEBUG
// Ensure the label after the sequence can also fit
@@ -124,6 +125,7 @@ ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
#endif
+ JSInterfaceJIT::endUninterruptedSequence();
}
#endif
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index 852de4e..28ef4ca 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -161,7 +161,14 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
#endif
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ *executablePool = m_globalData->executableAllocator.poolForSize(m_assembler.size());
+ // We can't run without the JIT trampolines!
+ if (!*executablePool)
+ CRASH();
+ LinkBuffer patchBuffer(this, *executablePool, 0);
+ // We can't run without the JIT trampolines!
+ if (!patchBuffer.allocationSuccessful())
+ CRASH();
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
@@ -176,19 +183,18 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
-
- trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
- trampolines->ctiVirtualConstructLink = trampolineAt(finalCode, virtualConstructLinkBegin);
- trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- trampolines->ctiVirtualConstruct = trampolineAt(finalCode, virtualConstructBegin);
- trampolines->ctiNativeCall = trampolineAt(finalCode, nativeCallThunk);
- trampolines->ctiNativeConstruct = trampolineAt(finalCode, nativeConstructThunk);
+
+ trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
+ trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
+ trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
+ trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
+ trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
#if ENABLE(JIT_USE_SOFT_MODULO)
- trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
+ trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
#endif
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+ trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
#endif
}
@@ -462,18 +468,18 @@ void JIT::emit_op_construct(Instruction* currentInstruction)
void JIT::emit_op_get_global_var(Instruction* currentInstruction)
{
- JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
+ JSVariableObject* globalObject = m_codeBlock->globalObject();
move(ImmPtr(globalObject), regT0);
- emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0);
+ emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_put_global_var(Instruction* currentInstruction)
{
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
- JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
+ JSVariableObject* globalObject = m_codeBlock->globalObject();
move(ImmPtr(globalObject), regT0);
- emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand);
+ emitPutVariableObjectRegister(regT1, regT0, currentInstruction[1].u.operand);
}
void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
@@ -644,7 +650,7 @@ void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
{
// Fast case
- void* globalObject = currentInstruction[2].u.jsCell;
+ void* globalObject = m_codeBlock->globalObject();
unsigned currentIndex = m_globalResolveInfoIndex++;
void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
@@ -665,16 +671,15 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = currentInstruction[2].u.jsCell;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
unsigned currentIndex = m_globalResolveInfoIndex++;
linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(globalObject));
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(regT0);
stubCall.call(dst);
}
@@ -1489,7 +1494,7 @@ void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCa
void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
{
- int skip = currentInstruction[6].u.operand;
+ int skip = currentInstruction[5].u.operand;
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
while (skip--) {
@@ -1503,9 +1508,8 @@ void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = currentInstruction[2].u.jsCell;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
- int skip = currentInstruction[6].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+ int skip = currentInstruction[5].u.operand;
while (skip--)
linkSlowCase(iter);
JITStubCall resolveStubCall(this, cti_op_resolve);
@@ -1517,9 +1521,9 @@ void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Ve
linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(globalObject));
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(regT0);
stubCall.call(dst);
}
diff --git a/JavaScriptCore/jit/JITOpcodes32_64.cpp b/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 5622e9c..939aa8c 100644
--- a/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -159,7 +159,14 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
#endif
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ *executablePool = m_globalData->executableAllocator.poolForSize(m_assembler.size());
+ // We can't run without the JIT trampolines!
+ if (!*executablePool)
+ CRASH();
+ LinkBuffer patchBuffer(this, *executablePool, 0);
+ // We can't run without the JIT trampolines!
+ if (!patchBuffer.allocationSuccessful())
+ CRASH();
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
@@ -174,21 +181,20 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
patchBuffer.link(callCompileCconstruct, FunctionPtr(cti_op_construct_jitCompile));
CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
- trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- trampolines->ctiVirtualConstruct = trampolineAt(finalCode, virtualConstructBegin);
- trampolines->ctiNativeCall = trampolineAt(finalCode, nativeCallThunk);
- trampolines->ctiNativeConstruct = trampolineAt(finalCode, nativeConstructThunk);
+ trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
+ trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
+ trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
+ trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
+ trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
#endif
#if ENABLE(JIT_OPTIMIZE_CALL)
- trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
- trampolines->ctiVirtualConstructLink = trampolineAt(finalCode, virtualConstructLinkBegin);
+ trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
#endif
#if ENABLE(JIT_USE_SOFT_MODULO)
- trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
+ trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
#endif
}
@@ -356,12 +362,15 @@ JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executa
ret();
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, executablePool);
+ LinkBuffer patchBuffer(this, executablePool, 0);
+ // We can't continue if we can't call a function!
+ if (!patchBuffer.allocationSuccessful())
+ CRASH();
patchBuffer.link(nativeCall, FunctionPtr(func));
+ patchBuffer.finalizeCode();
- CodeRef finalCode = patchBuffer.finalizeCode();
- return trampolineAt(finalCode, nativeCallThunk);
+ return patchBuffer.trampolineAt(nativeCallThunk);
}
void JIT::emit_op_mov(Instruction* currentInstruction)
@@ -516,9 +525,9 @@ void JIT::emit_op_new_func(Instruction* currentInstruction)
void JIT::emit_op_get_global_var(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[2].u.jsCell);
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
ASSERT(globalObject->isGlobalObject());
- int index = currentInstruction[3].u.operand;
+ int index = currentInstruction[2].u.operand;
loadPtr(&globalObject->d()->registers, regT2);
@@ -529,10 +538,10 @@ void JIT::emit_op_get_global_var(Instruction* currentInstruction)
void JIT::emit_op_put_global_var(Instruction* currentInstruction)
{
- JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[1].u.jsCell);
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
ASSERT(globalObject->isGlobalObject());
- int index = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
+ int index = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitLoad(value, regT1, regT0);
@@ -669,7 +678,7 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
// FIXME: Optimize to use patching instead of so many memory accesses.
unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = currentInstruction[2].u.jsCell;
+ void* globalObject = m_codeBlock->globalObject();
unsigned currentIndex = m_globalResolveInfoIndex++;
void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
@@ -692,14 +701,12 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = currentInstruction[2].u.jsCell;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
unsigned currentIndex = m_globalResolveInfoIndex++;
linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(globalObject));
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
stubCall.call(dst);
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index 10dcd3f..6b2a2fe 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -77,7 +77,10 @@ JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, Executab
jit.move(Imm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(&jit, pool);
+ LinkBuffer patchBuffer(&jit, pool, 0);
+ // We can't run without the JIT trampolines!
+ if (!patchBuffer.allocationSuccessful())
+ CRASH();
return patchBuffer.finalizeCode().m_code;
}
@@ -103,10 +106,10 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT0, base);
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_vector)), regT2);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
- loadPtr(BaseIndex(regT2, regT1, ScalePtr), regT0);
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
addSlowCase(branchTestPtr(Zero, regT0));
emitPutVirtualRegister(dst);
@@ -214,21 +217,21 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_vector)), regT2);
- Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Label storeResult(this);
emitGetVirtualRegister(value, regT0);
- storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr));
+ storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Jump end = jump();
empty.link(this);
- add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)-OBJECT_OFFSETOF(ArrayStorage, m_vector)));
- branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)-OBJECT_OFFSETOF(ArrayStorage, m_vector))).linkTo(storeResult, this);
+ add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
move(regT1, regT0);
add32(Imm32(1), regT0);
- store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)-OBJECT_OFFSETOF(ArrayStorage, m_vector)));
+ store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
jump().linkTo(storeResult, this);
end.link(this);
@@ -581,28 +584,35 @@ void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID res
}
}
-void JIT::testPrototype(Structure* structure, JumpList& failureCases)
+void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
{
- if (structure->m_prototype.isNull())
+ if (prototype.isNull())
return;
- move(ImmPtr(&asCell(structure->m_prototype)->m_structure), regT2);
- move(ImmPtr(asCell(structure->m_prototype)->m_structure), regT3);
- failureCases.append(branchPtr(NotEqual, Address(regT2), regT3));
+ // We have a special case for X86_64 here because X86 instructions that take immediate values
+ // only take 32 bit immediate values, wheras the pointer constants we are using here are 64 bit
+ // values. In the non X86_64 case, the generated code is slightly more efficient because it uses
+ // two less instructions and doesn't require any scratch registers.
+#if CPU(X86_64)
+ move(ImmPtr(asCell(prototype)->structure()), regT3);
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(prototype)->m_structure), regT3));
+#else
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(prototype)->m_structure), ImmPtr(asCell(prototype)->structure())));
+#endif
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+bool JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
JumpList failureCases;
// Check eax is an object of the right Structure.
failureCases.append(emitJumpIfNotJSCell(regT0));
failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
- testPrototype(oldStructure, failureCases);
+ testPrototype(oldStructure->storedPrototype(), failureCases);
// ecx = baseObject->m_structure
if (!direct) {
for (RefPtr<Structure>* it = chain->head(); *it; ++it)
- testPrototype(it->get(), failureCases);
+ testPrototype((*it)->storedPrototype(), failureCases);
}
Call callTarget;
@@ -642,7 +652,9 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
@@ -652,9 +664,10 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
}
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+ stubInfo->initPutByIdTransition(oldStructure, newStructure, chain, entryLabel);
+ return true;
}
void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
@@ -717,22 +730,22 @@ void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo,
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
}
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+bool JIT::privateCompilePatchGetArrayLength(StructureStubInfo* stubInfo, ReturnAddressPtr returnAddress)
{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
-
// Check eax is an array
Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
// Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_vector)), regT3);
- load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)-OBJECT_OFFSETOF(ArrayStorage, m_vector)), regT2);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+ load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -744,7 +757,6 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
// Track the stub we have created so that it will be deleted later.
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
@@ -753,9 +765,11 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+ stubInfo->stubRoutine = entryLabel;
+ return true;
}
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+bool JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
@@ -795,7 +809,9 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
} else
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -813,7 +829,6 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
}
// Track the stub we have created so that it will be deleted later.
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
@@ -822,10 +837,15 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+ stubInfo->initGetByIdProto(structure, prototypeStructure, entryLabel);
+ return true;
}
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+bool JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
+ PolymorphicAccessStructureList* polymorphicStructures = stubInfo->u.getByIdSelfList.structureList;
+ int currentIndex = stubInfo->u.getByIdSelfList.listSize;
+
Jump failureCase = checkStructure(regT0, structure);
bool needsStubLink = false;
if (slot.cachedPropertyType() == PropertySlot::Getter) {
@@ -852,7 +872,9 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -880,10 +902,15 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, entryLabel);
+ stubInfo->u.getByIdSelfList.listSize++;
+ return true;
}
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+bool JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
{
+ PolymorphicAccessStructureList* prototypeStructures = stubInfo->u.getByIdProtoList.structureList;
+ int currentIndex = stubInfo->u.getByIdProtoList.listSize;
+
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
@@ -923,7 +950,9 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -950,10 +979,15 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, entryLabel);
+ stubInfo->u.getByIdProtoList.listSize++;
+ return true;
}
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+bool JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
{
+ PolymorphicAccessStructureList* prototypeStructures = stubInfo->u.getByIdProtoList.structureList;
+ int currentIndex = stubInfo->u.getByIdProtoList.listSize;
+
ASSERT(count);
JumpList bucketsOfFail;
@@ -962,20 +996,12 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
bucketsOfFail.append(baseObjectCheck);
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ RefPtr<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
}
ASSERT(protoObject);
@@ -1000,7 +1026,9 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -1028,9 +1056,11 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, entryLabel);
+ stubInfo->u.getByIdProtoList.listSize++;
+ return true;
}
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+bool JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
ASSERT(count);
@@ -1040,20 +1070,12 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
bucketsOfFail.append(checkStructure(regT0, structure));
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ RefPtr<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
}
ASSERT(protoObject);
@@ -1078,7 +1100,9 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
@@ -1095,7 +1119,6 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
// Track the stub we have created so that it will be deleted later.
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
@@ -1104,6 +1127,8 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+ stubInfo->initGetByIdChain(structure, chain, entryLabel);
+ return true;
}
/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
diff --git a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index 375d3e8..9239641 100644
--- a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -295,7 +295,10 @@ JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, Executab
jit.move(Imm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(&jit, pool);
+ LinkBuffer patchBuffer(&jit, pool, 0);
+ // We can't run without the JIT trampolines!
+ if (!patchBuffer.allocationSuccessful())
+ CRASH();
return patchBuffer.finalizeCode().m_code;
}
@@ -311,11 +314,11 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(base, regT1);
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_vector)), regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
emitStore(dst, regT1, regT0);
@@ -364,22 +367,22 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_vector)), regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
- Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
+ Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
Label storeResult(this);
emitLoad(value, regT1, regT0);
- store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
- store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
Jump end = jump();
empty.link(this);
- add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)-OBJECT_OFFSETOF(ArrayStorage, m_vector)));
- branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)-OBJECT_OFFSETOF(ArrayStorage, m_vector))).linkTo(storeResult, this);
+ add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
add32(Imm32(1), regT2, regT0);
- store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)-OBJECT_OFFSETOF(ArrayStorage, m_vector)));
+ store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
jump().linkTo(storeResult, this);
end.link(this);
@@ -585,27 +588,36 @@ void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID res
load32(Address(temp, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
}
-void JIT::testPrototype(Structure* structure, JumpList& failureCases)
+void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
{
- if (structure->m_prototype.isNull())
+ if (prototype.isNull())
return;
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure)));
+ // We have a special case for X86_64 here because X86 instructions that take immediate values
+ // only take 32 bit immediate values, wheras the pointer constants we are using here are 64 bit
+ // values. In the non X86_64 case, the generated code is slightly more efficient because it uses
+ // two less instructions and doesn't require any scratch registers.
+#if CPU(X86_64)
+ move(ImmPtr(asCell(prototype)->structure()), regT3);
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(prototype)->m_structure), regT3));
+#else
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(prototype)->m_structure), ImmPtr(asCell(prototype)->structure())));
+#endif
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+bool JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
// It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
JumpList failureCases;
failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
- testPrototype(oldStructure, failureCases);
+ testPrototype(oldStructure->storedPrototype(), failureCases);
if (!direct) {
// Verify that nothing in the prototype chain has a setter for this property.
for (RefPtr<Structure>* it = chain->head(); *it; ++it)
- testPrototype(it->get(), failureCases);
+ testPrototype((*it)->storedPrototype(), failureCases);
}
// Reallocate property storage if needed.
@@ -644,8 +656,10 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
+
patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
if (willNeedStorageRealloc) {
@@ -654,9 +668,10 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
}
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+ stubInfo->initPutByIdTransition(oldStructure, newStructure, chain, entryLabel);
+ return true;
}
void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
@@ -721,26 +736,26 @@ void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo,
repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
}
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+bool JIT::privateCompilePatchGetArrayLength(StructureStubInfo* stubInfo, ReturnAddressPtr returnAddress)
{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
-
// regT0 holds a JSCell*
// Check for array
Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
// Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_vector)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)-OBJECT_OFFSETOF(ArrayStorage, m_vector)), regT2);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
move(regT2, regT0);
move(Imm32(JSValue::Int32Tag), regT1);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
+
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
patchBuffer.link(failureCases1, slowCaseBegin);
@@ -751,8 +766,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
// Track the stub we have created so that it will be deleted later.
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
+
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
RepatchBuffer repatchBuffer(m_codeBlock);
@@ -760,9 +774,11 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+ stubInfo->stubRoutine = entryLabel;
+ return true;
}
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+bool JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
// regT0 holds a JSCell*
@@ -803,8 +819,10 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
+
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
patchBuffer.link(failureCases1, slowCaseBegin);
@@ -822,7 +840,6 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
// Track the stub we have created so that it will be deleted later.
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
@@ -831,11 +848,16 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+ stubInfo->initGetByIdProto(structure, prototypeStructure, entryLabel);
+ return true;
}
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+bool JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
+ PolymorphicAccessStructureList* polymorphicStructures = stubInfo->u.getByIdSelfList.structureList;
+ int currentIndex = stubInfo->u.getByIdSelfList.listSize;
+
// regT0 holds a JSCell*
Jump failureCase = checkStructure(regT0, structure);
bool needsStubLink = false;
@@ -864,7 +886,10 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
+
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
@@ -890,10 +915,15 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, entryLabel);
+ stubInfo->u.getByIdSelfList.listSize++;
+ return true;
}
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+bool JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
{
+ PolymorphicAccessStructureList* prototypeStructures = stubInfo->u.getByIdProtoList.structureList;
+ int currentIndex = stubInfo->u.getByIdProtoList.listSize;
+
// regT0 holds a JSCell*
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
@@ -934,7 +964,10 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
+
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
@@ -959,10 +992,15 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, entryLabel);
+ stubInfo->u.getByIdProtoList.listSize++;
+ return true;
}
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+bool JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
{
+ PolymorphicAccessStructureList* prototypeStructures = stubInfo->u.getByIdProtoList.structureList;
+ int currentIndex = stubInfo->u.getByIdProtoList.listSize;
+
// regT0 holds a JSCell*
ASSERT(count);
@@ -972,20 +1010,12 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
bucketsOfFail.append(checkStructure(regT0, structure));
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ RefPtr<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
}
ASSERT(protoObject);
@@ -1011,7 +1041,10 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
+
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
@@ -1037,9 +1070,11 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
RepatchBuffer repatchBuffer(m_codeBlock);
repatchBuffer.relink(jumpLocation, entryLabel);
+ stubInfo->u.getByIdProtoList.listSize++;
+ return true;
}
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+bool JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
// regT0 holds a JSCell*
ASSERT(count);
@@ -1050,20 +1085,12 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
bucketsOfFail.append(checkStructure(regT0, structure));
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ RefPtr<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
}
ASSERT(protoObject);
@@ -1088,7 +1115,10 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (!patchBuffer.allocationSuccessful())
+ return false;
+
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
@@ -1103,8 +1133,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
// Track the stub we have created so that it will be deleted later.
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
+
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
RepatchBuffer repatchBuffer(m_codeBlock);
@@ -1112,6 +1141,8 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+ stubInfo->initGetByIdChain(structure, chain, entryLabel);
+ return true;
}
/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index f088b6e..c4ff0ca 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -71,7 +71,7 @@ namespace JSC {
#define SYMBOL_STRING(name) #name
#endif
-#if OS(IPHONE_OS)
+#if OS(IOS)
#define THUMB_FUNC_PARAM(name) SYMBOL_STRING(name)
#else
#define THUMB_FUNC_PARAM(name)
@@ -239,7 +239,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#define EXCEPTION_OFFSET 0x58
#define ENABLE_PROFILER_REFERENCE_OFFSET 0x60
-#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
+#elif (COMPILER(GCC) || COMPILER(RVCT)) && CPU(ARM_TRADITIONAL)
#define THUNK_RETURN_ADDRESS_OFFSET 64
#define PRESERVEDR4_OFFSET 68
@@ -304,6 +304,12 @@ extern "C" {
}
}
+#elif COMPILER(MSVC) && CPU(ARM_TRADITIONAL)
+
+#define THUNK_RETURN_ADDRESS_OFFSET 64
+#define PRESERVEDR4_OFFSET 68
+// See DEFINE_STUB_FUNCTION for more information.
+
#else
#error "JIT not supported on this platform."
#endif
@@ -451,7 +457,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#define EXCEPTION_OFFSET 0x38
#define ENABLE_PROFILER_REFERENCE_OFFSET 0x40
-#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
+#elif (COMPILER(GCC) || COMPILER(RVCT)) && CPU(ARM_TRADITIONAL)
#define THUNK_RETURN_ADDRESS_OFFSET 32
#define PRESERVEDR4_OFFSET 36
@@ -548,48 +554,6 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
".end " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
);
-#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
-
-#define THUNK_RETURN_ADDRESS_OFFSET 32
-#define PRESERVEDR4_OFFSET 36
-
-__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, JSValue*, Profiler**, JSGlobalData*)
-{
- ARM
- stmdb sp!, {r1-r3}
- stmdb sp!, {r4-r8, lr}
- sub sp, sp, #36
- mov r4, r2
- mov r5, #512
- mov lr, pc
- bx r0
- add sp, sp, #36
- ldmia sp!, {r4-r8, lr}
- add sp, sp, #12
- bx lr
-}
-
-__asm void ctiVMThrowTrampoline()
-{
- ARM
- PRESERVE8
- mov r0, sp
- bl cti_vm_throw
- add sp, sp, #36
- ldmia sp!, {r4-r8, lr}
- add sp, sp, #12
- bx lr
-}
-
-__asm void ctiOpThrowNotCaught()
-{
- ARM
- add sp, sp, #36
- ldmia sp!, {r4-r8, lr}
- add sp, sp, #12
- bx lr
-}
-
#elif COMPILER(MSVC) && CPU(X86)
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
@@ -649,6 +613,12 @@ extern "C" {
}
}
+#elif COMPILER(MSVC) && CPU(ARM_TRADITIONAL)
+
+#define THUNK_RETURN_ADDRESS_OFFSET 32
+#define PRESERVEDR4_OFFSET 36
+// See DEFINE_STUB_FUNCTION for more information.
+
#else
#error "JIT not supported on this platform."
#endif
@@ -755,6 +725,44 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
"mov pc, lr" "\n"
);
+#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
+
+__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, JSValue*, Profiler**, JSGlobalData*)
+{
+ ARM
+ stmdb sp!, {r1-r3}
+ stmdb sp!, {r4-r8, lr}
+ sub sp, sp, # PRESERVEDR4_OFFSET
+ mov r4, r2
+ mov r5, #512
+ mov lr, pc
+ bx r0
+ add sp, sp, # PRESERVEDR4_OFFSET
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+
+__asm void ctiVMThrowTrampoline()
+{
+ ARM
+ PRESERVE8
+ mov r0, sp
+ bl cti_vm_throw
+ add sp, sp, # PRESERVEDR4_OFFSET
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
+
+__asm void ctiOpThrowNotCaught()
+{
+ ARM
+ add sp, sp, # PRESERVEDR4_OFFSET
+ ldmia sp!, {r4-r8, lr}
+ add sp, sp, #12
+ bx lr
+}
#endif
#if ENABLE(OPCODE_SAMPLING)
@@ -815,111 +823,89 @@ JITThunks::~JITThunks()
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
+NEVER_INLINE bool JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
{
// The interpreter checks for recursion here; I do not believe this can occur in CTI.
if (!baseValue.isCell())
- return;
+ return false;
// Uncacheable: give up.
- if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
+ if (!slot.isCacheable())
+ return false;
JSCell* baseCell = asCell(baseValue);
Structure* structure = baseCell->structure();
- if (structure->isUncacheableDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
+ if (structure->isUncacheableDictionary())
+ return false;
// If baseCell != base, then baseCell must be a proxy for another object.
- if (baseCell != slot.base()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
+ if (baseCell != slot.base())
+ return false;
// Cache hit: Specialize instruction and ref Structures.
// Structure transition, cache transition info
if (slot.type() == PutPropertySlot::NewProperty) {
- if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
+ if (structure->isDictionary())
+ return false;
// put_by_id_transition checks the prototype chain for setters.
normalizePrototypeChain(callFrame, baseCell);
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain);
- JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress, direct);
- return;
+ return JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress, direct);
}
-
- stubInfo->initPutByIdReplace(structure);
JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress, direct);
+ stubInfo->initPutByIdReplace(structure);
+ return true;
}
-NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
+NEVER_INLINE bool JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
{
// FIXME: Write a test that proves we need to check for recursion here just
// like the interpreter does, then add a check for recursion.
// FIXME: Cache property access for immediates.
- if (!baseValue.isCell()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
+ if (!baseValue.isCell())
+ return false;
JSGlobalData* globalData = &callFrame->globalData();
- if (isJSArray(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
- JIT::compilePatchGetArrayLength(callFrame->scopeChain()->globalData, codeBlock, returnAddress);
- return;
- }
+ if (isJSArray(globalData, baseValue) && propertyName == callFrame->propertyNames().length)
+ return JIT::compilePatchGetArrayLength(callFrame->scopeChain()->globalData, codeBlock, stubInfo, returnAddress);
if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
// The tradeoff of compiling an patched inline string length access routine does not seem
// to pay off, so we currently only do this for arrays.
ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs->ctiStringLengthTrampoline());
- return;
+ return true;
}
// Uncacheable: give up.
- if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
+ if (!slot.isCacheable())
+ return false;
JSCell* baseCell = asCell(baseValue);
Structure* structure = baseCell->structure();
- if (structure->isUncacheableDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
+ if (structure->isUncacheableDictionary())
+ return false;
// Cache hit: Specialize instruction and ref Structures.
if (slot.slotBase() == baseValue) {
- // set this up, so derefStructures can do it's job.
- stubInfo->initGetByIdSelf(structure);
if (slot.cachedPropertyType() != PropertySlot::Value)
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
- else
- JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
- return;
+ return false;
+ JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+ stubInfo->initGetByIdSelf(structure);
+ return true;
}
- if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
+ if (structure->isDictionary())
+ return false;
if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
ASSERT(slot.slotBase().isObject());
@@ -933,25 +919,20 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
slotBaseObject->flattenDictionaryObject();
offset = slotBaseObject->structure()->get(propertyName);
}
-
- stubInfo->initGetByIdProto(structure, slotBaseObject->structure());
-
ASSERT(!structure->isDictionary());
ASSERT(!slotBaseObject->structure()->isDictionary());
- JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset, returnAddress);
- return;
+ return JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset, returnAddress);
}
size_t offset = slot.cachedOffset();
size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset);
if (!count) {
stubInfo->accessType = access_get_by_id_generic;
- return;
+ return true;
}
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- stubInfo->initGetByIdChain(structure, prototypeChain);
- JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
+ return JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
}
#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
@@ -1152,9 +1133,9 @@ RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION))
RVCT({)
RVCT( ARM)
RVCT( IMPORT JITStubThunked_#op#)
-RVCT( str lr, [sp, ##offset#])
+RVCT( str lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
RVCT( bl JITStubThunked_#op#)
-RVCT( ldr lr, [sp, ##offset#])
+RVCT( ldr lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
RVCT( bx lr)
RVCT(})
RVCT()
@@ -1163,6 +1144,62 @@ RVCT()
/* Include the generated file */
#include "GeneratedJITStubs_RVCT.h"
+#elif CPU(ARM_TRADITIONAL) && COMPILER(MSVC)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) extern "C" rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+/* The following is a workaround for MSVC toolchain; inline assembler is not supported */
+
+/* The following section is a template to generate code for GeneratedJITStubs_MSVC.asm */
+/* The pattern "#xxx#" will be replaced with "xxx" */
+
+/*
+MSVC_BEGIN( AREA Trampoline, CODE)
+MSVC_BEGIN()
+MSVC_BEGIN( EXPORT ctiTrampoline)
+MSVC_BEGIN( EXPORT ctiVMThrowTrampoline)
+MSVC_BEGIN( EXPORT ctiOpThrowNotCaught)
+MSVC_BEGIN()
+MSVC_BEGIN(ctiTrampoline PROC)
+MSVC_BEGIN( stmdb sp!, {r1-r3})
+MSVC_BEGIN( stmdb sp!, {r4-r8, lr})
+MSVC_BEGIN( sub sp, sp, ##offset#+4)
+MSVC_BEGIN( mov r4, r2)
+MSVC_BEGIN( mov r5, #512)
+MSVC_BEGIN( ; r0 contains the code)
+MSVC_BEGIN( mov lr, pc)
+MSVC_BEGIN( bx r0)
+MSVC_BEGIN( add sp, sp, ##offset#+4)
+MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
+MSVC_BEGIN( add sp, sp, #12)
+MSVC_BEGIN( bx lr)
+MSVC_BEGIN(ctiTrampoline ENDP)
+MSVC_BEGIN()
+MSVC_BEGIN(ctiVMThrowTrampoline PROC)
+MSVC_BEGIN( mov r0, sp)
+MSVC_BEGIN( mov lr, pc)
+MSVC_BEGIN( bl cti_vm_throw)
+MSVC_BEGIN(ctiOpThrowNotCaught)
+MSVC_BEGIN( add sp, sp, ##offset#+4)
+MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
+MSVC_BEGIN( add sp, sp, #12)
+MSVC_BEGIN( bx lr)
+MSVC_BEGIN(ctiVMThrowTrampoline ENDP)
+MSVC_BEGIN()
+
+MSVC( EXPORT cti_#op#)
+MSVC( IMPORT JITStubThunked_#op#)
+MSVC(cti_#op# PROC)
+MSVC( str lr, [sp, ##offset#])
+MSVC( bl JITStubThunked_#op#)
+MSVC( ldr lr, [sp, ##offset#])
+MSVC( bx lr)
+MSVC(cti_#op# ENDP)
+MSVC()
+
+MSVC_END( END)
+*/
+
#else
#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
#endif
@@ -1348,9 +1385,13 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id)
StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
if (!stubInfo->seenOnce())
stubInfo->setSeen();
- else
- JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, false);
-
+ else {
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ bool cached = JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, slot, stubInfo, false);
+ if (!cached && baseValue.isCell())
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_id_generic));
+ }
+
CHECK_FOR_EXCEPTION_AT_END();
}
@@ -1367,9 +1408,13 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id_direct)
StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
if (!stubInfo->seenOnce())
stubInfo->setSeen();
- else
- JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, true);
-
+ else {
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ bool cached = JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, slot, stubInfo, true);
+ if (!cached && baseValue.isCell())
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_id_direct_generic));
+ }
+
CHECK_FOR_EXCEPTION_AT_END();
}
@@ -1501,8 +1546,11 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
if (!stubInfo->seenOnce())
stubInfo->setSeen();
- else
- JITThunks::tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
+ else {
+ bool cached = JITThunks::tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
+ if (!cached)
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ }
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
@@ -1531,57 +1579,28 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
ASSERT(slot.slotBase().isObject());
- PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex = 1;
-
- if (stubInfo->accessType == access_get_by_id_self) {
- ASSERT(!stubInfo->stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList(CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure);
- stubInfo->initGetByIdSelfList(polymorphicStructureList, 1);
- } else {
- polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
- listIndex = stubInfo->u.getByIdSelfList.listSize;
- }
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- stubInfo->u.getByIdSelfList.listSize++;
- JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, asCell(baseValue)->structure(), ident, slot, slot.cachedOffset());
+ // If this is a regular self access (not yet upgraded to list), then switch the stubInfo over.
+ if (stubInfo->accessType == access_get_by_id_self)
+ stubInfo->initGetByIdSelfList(new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdSelf.baseObjectStructure));
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ // If there is room in the list, try to add a cached entry.
+ if (stubInfo->u.getByIdSelfList.listSize < POLYMORPHIC_LIST_CACHE_SIZE) {
+ bool cached = JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, asCell(baseValue)->structure(), ident, slot, slot.cachedOffset());
+ if (cached)
+ return JSValue::encode(result);
}
- } else
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ }
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
return JSValue::encode(result);
}
-static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(StructureStubInfo* stubInfo, int& listIndex)
-{
- PolymorphicAccessStructureList* prototypeStructureList = 0;
- listIndex = 1;
-
- switch (stubInfo->accessType) {
- case access_get_by_id_proto:
- prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure, stubInfo->u.getByIdProto.prototypeStructure);
- stubInfo->stubRoutine = CodeLocationLabel();
- stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
- break;
- case access_get_by_id_chain:
- prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain);
- stubInfo->stubRoutine = CodeLocationLabel();
- stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
- break;
- case access_get_by_id_proto_list:
- prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
- listIndex = stubInfo->u.getByIdProtoList.listSize;
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE)
- stubInfo->u.getByIdProtoList.listSize++;
- break;
- default:
- ASSERT_NOT_REACHED();
- }
-
- ASSERT(listIndex <= POLYMORPHIC_LIST_CACHE_SIZE);
- return prototypeStructureList;
+static void setupPolymorphicProtoList(StructureStubInfo* stubInfo)
+{
+ if (stubInfo->accessType == access_get_by_id_proto)
+ stubInfo->initGetByIdProtoList(new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure, stubInfo->u.getByIdProto.prototypeStructure));
+ else if (stubInfo->accessType == access_get_by_id_chain)
+ stubInfo->initGetByIdProtoList(new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain));
+ ASSERT(stubInfo->accessType == access_get_by_id_proto_list);
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_getter_stub)
@@ -1642,40 +1661,36 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
size_t offset = slot.cachedOffset();
- if (slot.slotBase() == baseValue)
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- else if (slot.slotBase() == asCell(baseValue)->structure()->prototypeForLookup(callFrame)) {
- ASSERT(!asCell(baseValue)->structure()->isDictionary());
- // Since we're accessing a prototype in a loop, it's a good bet that it
- // should not be treated as a dictionary.
- if (slotBaseObject->structure()->isDictionary()) {
- slotBaseObject->flattenDictionaryObject();
- offset = slotBaseObject->structure()->get(propertyName);
- }
-
- int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), propertyName, slot, offset);
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
- }
- } else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) {
- ASSERT(!asCell(baseValue)->structure()->isDictionary());
- int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
-
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- StructureChain* protoChain = structure->prototypeChain(callFrame);
- JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, propertyName, slot, offset);
+ // Don't mix self & proto/chain accesses in the same list
+ if (slot.slotBase() != baseValue) {
+ if (slot.slotBase() == asCell(baseValue)->structure()->prototypeForLookup(callFrame)) {
+ ASSERT(!asCell(baseValue)->structure()->isDictionary());
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary()) {
+ slotBaseObject->flattenDictionaryObject();
+ offset = slotBaseObject->structure()->get(propertyName);
+ }
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ setupPolymorphicProtoList(stubInfo);
+ if (stubInfo->u.getByIdProtoList.listSize < POLYMORPHIC_LIST_CACHE_SIZE) {
+ bool cached = JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset);
+ if (cached)
+ return JSValue::encode(result);
+ }
+ } else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) {
+ ASSERT(!asCell(baseValue)->structure()->isDictionary());
+
+ setupPolymorphicProtoList(stubInfo);
+ if (stubInfo->u.getByIdProtoList.listSize < POLYMORPHIC_LIST_CACHE_SIZE) {
+ bool cached = JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, structure->prototypeChain(callFrame), count, propertyName, slot, offset);
+ if (cached)
+ return JSValue::encode(result);
+ }
}
- } else
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ }
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
return JSValue::encode(result);
}
@@ -2611,16 +2626,17 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalObject* globalObject = stackFrame.args[0].globalObject();
- Identifier& ident = stackFrame.args[1].identifier();
- unsigned globalResolveInfoIndex = stackFrame.args[2].int32();
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ Identifier& ident = stackFrame.args[0].identifier();
+ unsigned globalResolveInfoIndex = stackFrame.args[1].int32();
ASSERT(globalObject->isGlobalObject());
PropertySlot slot(globalObject);
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
JSValue result = slot.getValue(callFrame, ident);
if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
- GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex);
+ GlobalResolveInfo& globalResolveInfo = codeBlock->globalResolveInfo(globalResolveInfoIndex);
if (globalResolveInfo.structure)
globalResolveInfo.structure->deref();
globalObject->structure()->ref();
@@ -2633,8 +2649,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
return JSValue::encode(result);
}
- unsigned vPCIndex = callFrame->codeBlock()->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, callFrame->codeBlock());
+ unsigned vPCIndex = codeBlock->bytecodeOffset(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index 306e475..94e319f 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -252,8 +252,8 @@ namespace JSC {
JITThunks(JSGlobalData*);
~JITThunks();
- static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
- static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo, bool direct);
+ static bool tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
+ static bool tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo, bool direct);
MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_trampolineStructure.ctiStringLengthTrampoline; }
MacroAssemblerCodePtr ctiVirtualCallLink() { return m_trampolineStructure.ctiVirtualCallLink; }
diff --git a/JavaScriptCore/jit/JSInterfaceJIT.h b/JavaScriptCore/jit/JSInterfaceJIT.h
index c85b94d..031bfa8 100644
--- a/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -177,7 +177,7 @@ namespace JSC {
};
struct ThunkHelpers {
- static unsigned stringImplDataOffset() { return WebCore::StringImpl::dataOffset(); }
+ static unsigned stringImplDataOffset() { return StringImpl::dataOffset(); }
static unsigned jsStringLengthOffset() { return OBJECT_OFFSETOF(JSString, m_length); }
static unsigned jsStringValueOffset() { return OBJECT_OFFSETOF(JSString, m_value); }
};
diff --git a/JavaScriptCore/jit/SpecializedThunkJIT.h b/JavaScriptCore/jit/SpecializedThunkJIT.h
index 00f7aef..ba95498 100644
--- a/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -129,7 +129,10 @@ namespace JSC {
MacroAssemblerCodePtr finalize(MacroAssemblerCodePtr fallback)
{
- LinkBuffer patchBuffer(this, m_pool.get());
+ LinkBuffer patchBuffer(this, m_pool.get(), 0);
+ // We can't continue if we can't call a function!
+ if (!patchBuffer.allocationSuccessful())
+ CRASH();
patchBuffer.link(m_failures, CodeLocationLabel(fallback));
return patchBuffer.finalizeCode().m_code;
}
diff --git a/JavaScriptCore/jsc.pro b/JavaScriptCore/jsc.pro
index 20b8521..6f3831e 100644
--- a/JavaScriptCore/jsc.pro
+++ b/JavaScriptCore/jsc.pro
@@ -30,4 +30,8 @@ symbian {
mac {
LIBS_PRIVATE += -framework AppKit
-} \ No newline at end of file
+}
+
+# Prevent warnings about difference in visibility on Mac OS X
+contains(QT_CONFIG, reduce_exports):CONFIG += hide_symbols
+unix:contains(QT_CONFIG, reduce_relocations):CONFIG += bsymbolic_functions
diff --git a/JavaScriptCore/parser/Lexer.cpp b/JavaScriptCore/parser/Lexer.cpp
index d7a122e..877e89a 100644
--- a/JavaScriptCore/parser/Lexer.cpp
+++ b/JavaScriptCore/parser/Lexer.cpp
@@ -399,6 +399,60 @@ inline void Lexer::record16(int c)
record16(UChar(static_cast<unsigned short>(c)));
}
+ALWAYS_INLINE JSTokenType Lexer::parseIdentifier(JSTokenData* lvalp, LexType lexType)
+{
+ bool bufferRequired = false;
+ const UChar* identifierStart = currentCharacter();
+ int identifierLength;
+
+ while (true) {
+ if (LIKELY(isIdentPart(m_current))) {
+ shift();
+ continue;
+ }
+ if (LIKELY(m_current != '\\'))
+ break;
+
+ // \uXXXX unicode characters.
+ bufferRequired = true;
+ if (identifierStart != currentCharacter())
+ m_buffer16.append(identifierStart, currentCharacter() - identifierStart);
+ shift();
+ if (UNLIKELY(m_current != 'u'))
+ return ERRORTOK;
+ shift();
+ int character = getUnicodeCharacter();
+ if (UNLIKELY(character == -1))
+ return ERRORTOK;
+ if (UNLIKELY(m_buffer16.size() ? !isIdentPart(character) : !isIdentStart(character)))
+ return ERRORTOK;
+ record16(character);
+ identifierStart = currentCharacter();
+ }
+
+ if (!bufferRequired)
+ identifierLength = currentCharacter() - identifierStart;
+ else {
+ if (identifierStart != currentCharacter())
+ m_buffer16.append(identifierStart, currentCharacter() - identifierStart);
+ identifierStart = m_buffer16.data();
+ identifierLength = m_buffer16.size();
+ }
+
+ const Identifier* ident = makeIdentifier(identifierStart, identifierLength);
+ lvalp->ident = ident;
+ m_delimited = false;
+
+ if (LIKELY(!bufferRequired && lexType == IdentifyReservedWords)) {
+ // Keywords must not be recognized if there was an \uXXXX in the identifier.
+ const HashEntry* entry = m_keywordTable.entry(m_globalData, *ident);
+ return entry ? static_cast<JSTokenType>(entry->lexerValue()) : IDENT;
+ }
+
+ m_buffer16.resize(0);
+ return IDENT;
+}
+
ALWAYS_INLINE bool Lexer::parseString(JSTokenData* lvalp)
{
int stringQuoteCharacter = m_current;
@@ -488,7 +542,6 @@ JSTokenType Lexer::lex(JSTokenData* lvalp, JSTokenInfo* llocp, LexType lexType)
ASSERT(m_buffer16.isEmpty());
JSTokenType token = ERRORTOK;
- int identChar = 0;
m_terminator = false;
start:
@@ -753,8 +806,6 @@ start:
shift();
token = CLOSEBRACE;
break;
- case CharacterBackSlash:
- goto startIdentifierWithBackslash;
case CharacterZero:
goto startNumberWithZeroDigit;
case CharacterNumber:
@@ -768,7 +819,10 @@ start:
break;
case CharacterIdentifierStart:
ASSERT(isIdentStart(m_current));
- goto startIdentifierOrKeyword;
+ // Fall through into CharacterBackSlash.
+ case CharacterBackSlash:
+ token = parseIdentifier(lvalp, lexType);
+ break;
case CharacterLineTerminator:
ASSERT(isLineTerminator(m_current));
shiftLineTerminator();
@@ -789,53 +843,6 @@ start:
m_atLineStart = false;
goto returnToken;
-startIdentifierWithBackslash: {
- shift();
- if (UNLIKELY(m_current != 'u'))
- goto returnError;
- shift();
-
- identChar = getUnicodeCharacter();
- if (UNLIKELY(identChar == -1))
- goto returnError;
- if (UNLIKELY(!isIdentStart(identChar)))
- goto returnError;
- goto inIdentifierAfterCharacterCheck;
-}
-
-startIdentifierOrKeyword: {
- const UChar* identifierStart = currentCharacter();
- shift();
- while (isIdentPart(m_current))
- shift();
- if (LIKELY(m_current != '\\')) {
- // Fast case for idents which does not contain \uCCCC characters
- lvalp->ident = makeIdentifier(identifierStart, currentCharacter() - identifierStart);
- goto doneIdentifierOrKeyword;
- }
- m_buffer16.append(identifierStart, currentCharacter() - identifierStart);
-}
-
- do {
- shift();
- if (UNLIKELY(m_current != 'u'))
- goto returnError;
- shift();
- identChar = getUnicodeCharacter();
- if (UNLIKELY(identChar == -1))
- goto returnError;
- if (UNLIKELY(!isIdentPart(identChar)))
- goto returnError;
-inIdentifierAfterCharacterCheck:
- record16(identChar);
-
- while (isIdentPart(m_current)) {
- record16(m_current);
- shift();
- }
- } while (UNLIKELY(m_current == '\\'));
- goto doneIdentifier;
-
inSingleLineComment:
while (!isLineTerminator(m_current)) {
if (UNLIKELY(m_current == -1))
@@ -1008,27 +1015,7 @@ doneNumeric:
doneSemicolon:
token = SEMICOLON;
m_delimited = true;
- goto returnToken;
-
-doneIdentifier:
- m_atLineStart = false;
- m_delimited = false;
- lvalp->ident = makeIdentifier(m_buffer16.data(), m_buffer16.size());
- m_buffer16.resize(0);
- token = IDENT;
- goto returnToken;
-
-doneIdentifierOrKeyword: {
- m_atLineStart = false;
- m_delimited = false;
- m_buffer16.resize(0);
- if (lexType == IdentifyReservedWords) {
- const HashEntry* entry = m_keywordTable.entry(m_globalData, *lvalp->ident);
- token = entry ? static_cast<JSTokenType>(entry->lexerValue()) : IDENT;
- } else
- token = IDENT;
// Fall through into returnToken.
-}
returnToken: {
int lineNumber = m_lineNumber;
diff --git a/JavaScriptCore/parser/Lexer.h b/JavaScriptCore/parser/Lexer.h
index 4f7af44..3d97cc1 100644
--- a/JavaScriptCore/parser/Lexer.h
+++ b/JavaScriptCore/parser/Lexer.h
@@ -94,6 +94,7 @@ namespace JSC {
ALWAYS_INLINE bool lastTokenWasRestrKeyword() const;
+ ALWAYS_INLINE JSTokenType parseIdentifier(JSTokenData*, LexType);
ALWAYS_INLINE bool parseString(JSTokenData* lvalp);
static const size_t initialReadBufferCapacity = 32;
diff --git a/JavaScriptCore/profiler/ProfilerServer.mm b/JavaScriptCore/profiler/ProfilerServer.mm
index a3944de..7d87f96 100644
--- a/JavaScriptCore/profiler/ProfilerServer.mm
+++ b/JavaScriptCore/profiler/ProfilerServer.mm
@@ -30,7 +30,7 @@
#import "JSRetainPtr.h"
#import <Foundation/Foundation.h>
-#if PLATFORM(IPHONE_SIMULATOR)
+#if PLATFORM(IOS_SIMULATOR)
#import <Foundation/NSDistributedNotificationCenter.h>
#endif
@@ -65,7 +65,7 @@
if ([defaults boolForKey:@"EnableJSProfiling"])
[self startProfiling];
-#if !PLATFORM(IPHONE) || PLATFORM(IPHONE_SIMULATOR)
+#if !PLATFORM(IOS) || PLATFORM(IOS_SIMULATOR)
// FIXME: <rdar://problem/6546135>
// The catch-all notifications
[[NSDistributedNotificationCenter defaultCenter] addObserver:self selector:@selector(startProfiling) name:@"ProfilerServerStartNotification" object:nil];
@@ -76,7 +76,7 @@
NSProcessInfo *processInfo = [NSProcessInfo processInfo];
_serverName = [[NSString alloc] initWithFormat:@"ProfilerServer-%d", [processInfo processIdentifier]];
-#if !PLATFORM(IPHONE) || PLATFORM(IPHONE_SIMULATOR)
+#if !PLATFORM(IOS) || PLATFORM(IOS_SIMULATOR)
// FIXME: <rdar://problem/6546135>
[[NSDistributedNotificationCenter defaultCenter] addObserver:self selector:@selector(startProfiling) name:[_serverName stringByAppendingString:@"-Start"] object:nil];
[[NSDistributedNotificationCenter defaultCenter] addObserver:self selector:@selector(stopProfiling) name:[_serverName stringByAppendingString:@"-Stop"] object:nil];
diff --git a/JavaScriptCore/runtime/AlignedMemoryAllocator.h b/JavaScriptCore/runtime/AlignedMemoryAllocator.h
new file mode 100644
index 0000000..e682eb3
--- /dev/null
+++ b/JavaScriptCore/runtime/AlignedMemoryAllocator.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef AlignedMemoryAllocator_h
+#define AlignedMemoryAllocator_h
+
+#include <wtf/Bitmap.h>
+#include <wtf/PageReservation.h>
+
+namespace JSC {
+
+struct AlignedMemoryAllocatorConstants {
+// Set sane defaults if -D<flagname=value> wasn't provided via compiler args
+#if defined(JSCCOLLECTOR_VIRTUALMEM_RESERVATION)
+ // Keep backwards compatibility with symbian build system
+ static const size_t virtualMemoryReservation = JSCCOLLECTOR_VIRTUALMEM_RESERVATION;
+#elif defined(__WINS__)
+ // Emulator has limited virtual address space
+ static const size_t virtualMemoryReservation = 0x400000;
+#else
+ // HW has plenty of virtual addresses
+ static const size_t virtualMemoryReservation = 0x8000000;
+#endif
+};
+
+template<size_t blockSize> class AlignedMemory;
+template<size_t blockSize> class AlignedMemoryAllocator;
+
+#if HAVE(PAGE_ALLOCATE_ALIGNED)
+
+template<size_t blockSize>
+class AlignedMemoryAllocator;
+
+template<size_t blockSize>
+class AlignedMemory {
+public:
+ void deallocate();
+ void* base();
+
+private:
+ friend class AlignedMemoryAllocator<blockSize>;
+
+ AlignedMemory(PageAllocation);
+
+ PageAllocation m_allocation;
+};
+
+template<size_t blockSize>
+class AlignedMemoryAllocator {
+public:
+ void destroy();
+ AlignedMemory<blockSize> allocate();
+};
+
+template<size_t blockSize>
+inline void AlignedMemoryAllocator<blockSize>::destroy()
+{
+}
+
+template<size_t blockSize>
+inline AlignedMemory<blockSize> AlignedMemoryAllocator<blockSize>::allocate()
+{
+ return AlignedMemory<blockSize>(PageAllocation::allocateAligned(blockSize, PageAllocation::JSGCHeapPages));
+}
+
+template<size_t blockSize>
+inline void AlignedMemory<blockSize>::deallocate()
+{
+ m_allocation.deallocate();
+}
+
+template<size_t blockSize>
+inline void* AlignedMemory<blockSize>::base()
+{
+ return m_allocation.base();
+}
+
+template<size_t blockSize>
+inline AlignedMemory<blockSize>::AlignedMemory(PageAllocation allocation)
+ : m_allocation(allocation)
+{
+}
+
+#else
+
+template<size_t blockSize>
+class AlignedMemory {
+public:
+ void deallocate();
+ void* base();
+
+private:
+ friend class AlignedMemoryAllocator<blockSize>;
+
+ AlignedMemory(void* base, AlignedMemoryAllocator<blockSize>* allocator);
+
+ void* m_base;
+ AlignedMemoryAllocator<blockSize>* m_allocator;
+};
+
+template<size_t blockSize>
+class AlignedMemoryAllocator {
+public:
+ AlignedMemoryAllocator();
+ ~AlignedMemoryAllocator();
+
+ void destroy();
+ AlignedMemory<blockSize> allocate();
+ void free(AlignedMemory<blockSize>);
+
+private:
+ static const size_t reservationSize = AlignedMemoryAllocatorConstants::virtualMemoryReservation;
+ static const size_t bitmapSize = reservationSize / blockSize;
+
+ PageReservation m_reservation;
+ size_t m_nextFree;
+ uintptr_t m_reservationBase;
+ WTF::Bitmap<bitmapSize> m_bitmap;
+};
+
+template<size_t blockSize>
+AlignedMemoryAllocator<blockSize>::AlignedMemoryAllocator()
+ : m_reservation(PageReservation::reserve(reservationSize + blockSize, PageAllocation::JSGCHeapPages))
+ , m_nextFree(0)
+{
+ // check that blockSize and reservationSize are powers of two
+ ASSERT(!(blockSize & (blockSize - 1)));
+ ASSERT(!(reservationSize & (reservationSize - 1)));
+
+ // check that blockSize is a multiple of pageSize and that
+ // reservationSize is a multiple of blockSize
+ ASSERT(!(blockSize & (PageAllocation::pageSize() - 1)));
+ ASSERT(!(reservationSize & (blockSize - 1)));
+
+ ASSERT(m_reservation);
+
+ m_reservationBase = reinterpret_cast<uintptr_t>(m_reservation.base());
+ m_reservationBase = (m_reservationBase + blockSize) & ~(blockSize - 1);
+}
+
+template<size_t blockSize>
+AlignedMemoryAllocator<blockSize>::~AlignedMemoryAllocator()
+{
+ destroy();
+ m_reservation.deallocate();
+}
+
+template<size_t blockSize>
+inline void AlignedMemoryAllocator<blockSize>::destroy()
+{
+ for (unsigned i = 0; i < bitmapSize; ++i) {
+ if (m_bitmap.get(i)) {
+ void* blockAddress = reinterpret_cast<void*>(m_reservationBase + m_nextFree * blockSize);
+ m_reservation.decommit(blockAddress, blockSize);
+
+ m_bitmap.clear(i);
+ }
+ }
+}
+
+template<size_t blockSize>
+AlignedMemory<blockSize> AlignedMemoryAllocator<blockSize>::allocate()
+{
+ while (m_nextFree < bitmapSize) {
+ if (!m_bitmap.get(m_nextFree)) {
+ void* blockAddress = reinterpret_cast<void*>(m_reservationBase + m_nextFree * blockSize);
+ m_reservation.commit(blockAddress, blockSize);
+
+ m_bitmap.set(m_nextFree);
+ ++m_nextFree;
+
+ return AlignedMemory<blockSize>(blockAddress, this);
+ }
+ m_bitmap.advanceToNextFreeBit(m_nextFree);
+ }
+
+ if (m_bitmap.isFull())
+ return AlignedMemory<blockSize>(0, this);
+
+ m_nextFree = 0;
+
+ return allocate();
+}
+
+template<size_t blockSize>
+void AlignedMemoryAllocator<blockSize>::free(AlignedMemory<blockSize> allocation)
+{
+ ASSERT(allocation.base());
+ m_reservation.decommit(allocation.base(), blockSize);
+
+ size_t diff = (reinterpret_cast<uintptr_t>(allocation.base()) - m_reservationBase);
+ ASSERT(!(diff & (blockSize - 1)));
+
+ size_t i = diff / blockSize;
+ ASSERT(m_bitmap.get(i));
+
+ m_bitmap.clear(i);
+}
+
+template<size_t blockSize>
+inline void AlignedMemory<blockSize>::deallocate()
+{
+ m_allocator->free(*this);
+}
+
+template<size_t blockSize>
+inline void* AlignedMemory<blockSize>::base()
+{
+ return m_base;
+}
+
+template<size_t blockSize>
+AlignedMemory<blockSize>::AlignedMemory(void* base, AlignedMemoryAllocator<blockSize>* allocator)
+ : m_base(base)
+ , m_allocator(allocator)
+{
+}
+
+#endif
+
+}
+
+#endif
diff --git a/JavaScriptCore/runtime/Collector.cpp b/JavaScriptCore/runtime/Collector.cpp
index 38f3ce5..4a81913 100644
--- a/JavaScriptCore/runtime/Collector.cpp
+++ b/JavaScriptCore/runtime/Collector.cpp
@@ -25,6 +25,7 @@
#include "CallFrame.h"
#include "CodeBlock.h"
#include "CollectorHeapIterator.h"
+#include "GCActivityCallback.h"
#include "Interpreter.h"
#include "JSArray.h"
#include "JSGlobalObject.h"
@@ -135,14 +136,13 @@ Heap::Heap(JSGlobalData* globalData)
, m_registeredThreads(0)
, m_currentThreadRegistrar(0)
#endif
-#if OS(SYMBIAN)
- , m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE)
-#endif
, m_globalData(globalData)
{
ASSERT(globalData);
memset(&m_heap, 0, sizeof(CollectorHeap));
allocateBlock();
+ m_activityCallback = DefaultGCActivityCallback::create(this);
+ (*m_activityCallback)();
}
Heap::~Heap()
@@ -170,6 +170,9 @@ void Heap::destroy()
freeBlocks();
+ for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i)
+ m_weakGCHandlePools[i].deallocate();
+
#if ENABLE(JSC_MULTIPLE_THREADS)
if (m_currentThreadRegistrar) {
int error = pthread_key_delete(m_currentThreadRegistrar);
@@ -183,63 +186,19 @@ void Heap::destroy()
t = next;
}
#endif
-#if OS(SYMBIAN)
m_blockallocator.destroy();
-#endif
m_globalData = 0;
}
NEVER_INLINE CollectorBlock* Heap::allocateBlock()
{
-#if OS(DARWIN)
- vm_address_t address = 0;
- vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
-#elif OS(SYMBIAN)
- void* address = m_blockallocator.alloc();
- if (!address)
+ AlignedCollectorBlock allocation = m_blockallocator.allocate();
+ CollectorBlock* block = static_cast<CollectorBlock*>(allocation.base());
+ if (!block)
CRASH();
-#elif OS(WINCE)
- void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
-#elif OS(WINDOWS)
-#if COMPILER(MINGW) && !COMPILER(MINGW64)
- void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
-#else
- void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
-#endif
- memset(address, 0, BLOCK_SIZE);
-#elif HAVE(POSIX_MEMALIGN)
- void* address;
- posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
-#else
-
-#if ENABLE(JSC_MULTIPLE_THREADS)
-#error Need to initialize pagesize safely.
-#endif
- static size_t pagesize = getpagesize();
-
- size_t extra = 0;
- if (BLOCK_SIZE > pagesize)
- extra = BLOCK_SIZE - pagesize;
-
- void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
- uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
-
- size_t adjust = 0;
- if ((address & BLOCK_OFFSET_MASK) != 0)
- adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
-
- if (adjust > 0)
- munmap(reinterpret_cast<char*>(address), adjust);
-
- if (adjust < extra)
- munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
-
- address += adjust;
-#endif
// Initialize block.
- CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address);
block->heap = this;
clearMarkBits(block);
@@ -251,14 +210,14 @@ NEVER_INLINE CollectorBlock* Heap::allocateBlock()
size_t numBlocks = m_heap.numBlocks;
if (m_heap.usedBlocks == numBlocks) {
- static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
+ static const size_t maxNumBlocks = ULONG_MAX / sizeof(AlignedCollectorBlock) / GROWTH_FACTOR;
if (numBlocks > maxNumBlocks)
CRASH();
numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
m_heap.numBlocks = numBlocks;
- m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*)));
+ m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, numBlocks * sizeof(AlignedCollectorBlock)));
}
- m_heap.blocks[m_heap.usedBlocks++] = block;
+ m_heap.blocks[m_heap.usedBlocks++] = allocation;
return block;
}
@@ -271,7 +230,7 @@ NEVER_INLINE void Heap::freeBlock(size_t block)
ObjectIterator end(m_heap, block + 1);
for ( ; it != end; ++it)
(*it)->~JSCell();
- freeBlockPtr(m_heap.blocks[block]);
+ m_heap.blocks[block].deallocate();
// swap with the last block so we compact as we go
m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1];
@@ -279,31 +238,10 @@ NEVER_INLINE void Heap::freeBlock(size_t block)
if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) {
m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR;
- m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*)));
+ m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(AlignedCollectorBlock)));
}
}
-NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block)
-{
-#if OS(DARWIN)
- vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
-#elif OS(SYMBIAN)
- m_blockallocator.free(reinterpret_cast<void*>(block));
-#elif OS(WINCE)
- VirtualFree(block, 0, MEM_RELEASE);
-#elif OS(WINDOWS)
-#if COMPILER(MINGW) && !COMPILER(MINGW64)
- __mingw_aligned_free(block);
-#else
- _aligned_free(block);
-#endif
-#elif HAVE(POSIX_MEMALIGN)
- free(block);
-#else
- munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
-#endif
-}
-
void Heap::freeBlocks()
{
ProtectCountSet protectedValuesCopy = m_protectedValues;
@@ -327,7 +265,7 @@ void Heap::freeBlocks()
it->first->~JSCell();
for (size_t block = 0; block < m_heap.usedBlocks; ++block)
- freeBlockPtr(m_heap.blocks[block]);
+ m_heap.blocks[block].deallocate();
fastFree(m_heap.blocks);
@@ -380,7 +318,7 @@ allocate:
do {
ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
- Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]);
+ Block* block = m_heap.collectorBlock(m_heap.nextBlock);
do {
ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block
@@ -435,10 +373,10 @@ void Heap::shrinkBlocks(size_t neededBlocks)
// Clear the always-on last bit, so isEmpty() isn't fooled by it.
for (size_t i = 0; i < m_heap.usedBlocks; ++i)
- m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - 1);
+ m_heap.collectorBlock(i)->marked.clear(HeapConstants::cellsPerBlock - 1);
for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) {
- if (m_heap.blocks[i]->marked.isEmpty()) {
+ if (m_heap.collectorBlock(i)->marked.isEmpty()) {
freeBlock(i);
} else
++i;
@@ -446,7 +384,7 @@ void Heap::shrinkBlocks(size_t neededBlocks)
// Reset the always-on last bit.
for (size_t i = 0; i < m_heap.usedBlocks; ++i)
- m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1);
+ m_heap.collectorBlock(i)->marked.set(HeapConstants::cellsPerBlock - 1);
}
#if OS(WINCE)
@@ -741,7 +679,6 @@ void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
char** p = static_cast<char**>(start);
char** e = static_cast<char**>(end);
- CollectorBlock** blocks = m_heap.blocks;
while (p != e) {
char* x = *p++;
if (isPossibleCell(x)) {
@@ -757,7 +694,7 @@ void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
usedBlocks = m_heap.usedBlocks;
for (size_t block = 0; block < usedBlocks; block++) {
- if (blocks[block] != blockAddr)
+ if (m_heap.collectorBlock(block) != blockAddr)
continue;
markStack.append(reinterpret_cast<JSCell*>(xAsBits));
markStack.drain();
@@ -972,6 +909,36 @@ void Heap::markStackObjectsConservatively(MarkStack& markStack)
#endif
}
+void Heap::updateWeakGCHandles()
+{
+ for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i)
+ weakGCHandlePool(i)->update();
+}
+
+void WeakGCHandlePool::update()
+{
+ for (unsigned i = 1; i < WeakGCHandlePool::numPoolEntries; ++i) {
+ if (m_entries[i].isValidPtr()) {
+ JSCell* cell = m_entries[i].get();
+ if (!cell || !Heap::isCellMarked(cell))
+ m_entries[i].invalidate();
+ }
+ }
+}
+
+WeakGCHandle* Heap::addWeakGCHandle(JSCell* ptr)
+{
+ for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i)
+ if (!weakGCHandlePool(i)->isFull())
+ return weakGCHandlePool(i)->allocate(ptr);
+
+ AlignedMemory<WeakGCHandlePool::poolSize> allocation = m_weakGCHandlePoolAllocator.allocate();
+ m_weakGCHandlePools.append(allocation);
+
+ WeakGCHandlePool* pool = new (allocation) WeakGCHandlePool();
+ return pool->allocate(ptr);
+}
+
void Heap::protect(JSValue k)
{
ASSERT(k);
@@ -1006,7 +973,7 @@ void Heap::markProtectedObjects(MarkStack& markStack)
void Heap::clearMarkBits()
{
for (size_t i = 0; i < m_heap.usedBlocks; ++i)
- clearMarkBits(m_heap.blocks[i]);
+ clearMarkBits(m_heap.collectorBlock(i));
}
void Heap::clearMarkBits(CollectorBlock* block)
@@ -1025,9 +992,9 @@ size_t Heap::markedCells(size_t startBlock, size_t startCell) const
return 0;
size_t result = 0;
- result += m_heap.blocks[startBlock]->marked.count(startCell);
+ result += m_heap.collectorBlock(startBlock)->marked.count(startCell);
for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i)
- result += m_heap.blocks[i]->marked.count();
+ result += m_heap.collectorBlock(i)->marked.count();
return result;
}
@@ -1108,6 +1075,8 @@ void Heap::markRoots()
markStack.drain();
markStack.compact();
+ updateWeakGCHandles();
+
m_heap.operationInProgress = NoOperation;
}
@@ -1236,6 +1205,8 @@ void Heap::reset()
resizeBlocks();
JAVASCRIPTCORE_GC_END();
+
+ (*m_activityCallback)();
}
void Heap::collectAllGarbage()
@@ -1272,4 +1243,9 @@ LiveObjectIterator Heap::primaryHeapEnd()
return LiveObjectIterator(m_heap, m_heap.usedBlocks);
}
+void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
+{
+ m_activityCallback = activityCallback;
+}
+
} // namespace JSC
diff --git a/JavaScriptCore/runtime/Collector.h b/JavaScriptCore/runtime/Collector.h
index 1dc9445..38c178b 100644
--- a/JavaScriptCore/runtime/Collector.h
+++ b/JavaScriptCore/runtime/Collector.h
@@ -22,13 +22,18 @@
#ifndef Collector_h
#define Collector_h
+#include "AlignedMemoryAllocator.h"
+#include "GCHandle.h"
#include <stddef.h>
#include <string.h>
+#include <wtf/Bitmap.h>
#include <wtf/FixedArray.h>
#include <wtf/HashCountedSet.h>
#include <wtf/HashSet.h>
#include <wtf/Noncopyable.h>
#include <wtf/OwnPtr.h>
+#include <wtf/PageAllocation.h>
+#include <wtf/PassOwnPtr.h>
#include <wtf/StdLibExtras.h>
#include <wtf/Threading.h>
@@ -36,15 +41,12 @@
#include <pthread.h>
#endif
-#if OS(SYMBIAN)
-#include <wtf/symbian/BlockAllocatorSymbian.h>
-#endif
-
#define ASSERT_CLASS_FITS_IN_CELL(class) COMPILE_ASSERT(sizeof(class) <= CELL_SIZE, class_fits_in_cell)
namespace JSC {
class CollectorBlock;
+ class GCActivityCallback;
class JSCell;
class JSGlobalData;
class JSValue;
@@ -55,10 +57,19 @@ namespace JSC {
class LiveObjectIterator;
+#if OS(WINCE) || OS(SYMBIAN)
+ const size_t BLOCK_SIZE = 64 * 1024; // 64k
+#else
+ const size_t BLOCK_SIZE = 256 * 1024; // 256k
+#endif
+
+ typedef AlignedMemoryAllocator<BLOCK_SIZE> CollectorBlockAllocator;
+ typedef AlignedMemory<BLOCK_SIZE> AlignedCollectorBlock;
+
struct CollectorHeap {
size_t nextBlock;
size_t nextCell;
- CollectorBlock** blocks;
+ AlignedCollectorBlock* blocks;
void* nextNumber;
@@ -69,6 +80,11 @@ namespace JSC {
bool didShrink;
OperationInProgress operationInProgress;
+
+ CollectorBlock* collectorBlock(size_t index) const
+ {
+ return static_cast<CollectorBlock*>(blocks[index].base());
+ }
};
class Heap : public Noncopyable {
@@ -82,6 +98,7 @@ namespace JSC {
bool isBusy(); // true if an allocation or collection is in progress
void collectAllGarbage();
+ void setActivityCallback(PassOwnPtr<GCActivityCallback>);
static const size_t minExtraCost = 256;
static const size_t maxExtraCost = 1024 * 1024;
@@ -115,6 +132,8 @@ namespace JSC {
static bool isCellMarked(const JSCell*);
static void markCell(JSCell*);
+ WeakGCHandle* addWeakGCHandle(JSCell*);
+
void markConservatively(MarkStack&, void* start, void* end);
HashSet<MarkedArgumentBuffer*>& markListSet() { if (!m_markListSet) m_markListSet = new HashSet<MarkedArgumentBuffer*>; return *m_markListSet; }
@@ -137,7 +156,6 @@ namespace JSC {
NEVER_INLINE CollectorBlock* allocateBlock();
NEVER_INLINE void freeBlock(size_t);
- NEVER_INLINE void freeBlockPtr(CollectorBlock*);
void freeBlocks();
void resizeBlocks();
void growBlocks(size_t neededBlocks);
@@ -157,14 +175,20 @@ namespace JSC {
void markOtherThreadConservatively(MarkStack&, Thread*);
void markStackObjectsConservatively(MarkStack&);
+ void updateWeakGCHandles();
+ WeakGCHandlePool* weakGCHandlePool(size_t index);
+
typedef HashCountedSet<JSCell*> ProtectCountSet;
CollectorHeap m_heap;
ProtectCountSet m_protectedValues;
+ WTF::Vector<AlignedMemory<WeakGCHandlePool::poolSize> > m_weakGCHandlePools;
HashSet<MarkedArgumentBuffer*>* m_markListSet;
+ OwnPtr<GCActivityCallback> m_activityCallback;
+
#if ENABLE(JSC_MULTIPLE_THREADS)
void makeUsableFromMultipleThreads();
@@ -176,21 +200,14 @@ namespace JSC {
pthread_key_t m_currentThreadRegistrar;
#endif
-#if OS(SYMBIAN)
// Allocates collector blocks with correct alignment
- WTF::AlignedBlockAllocator m_blockallocator;
-#endif
+ CollectorBlockAllocator m_blockallocator;
+ WeakGCHandlePool::Allocator m_weakGCHandlePoolAllocator;
JSGlobalData* m_globalData;
};
// tunable parameters
-#if OS(WINCE) || OS(SYMBIAN)
- const size_t BLOCK_SIZE = 64 * 1024; // 64k
-#else
- const size_t BLOCK_SIZE = 64 * 4096; // 256k
-#endif
-
// derived constants
const size_t BLOCK_OFFSET_MASK = BLOCK_SIZE - 1;
const size_t BLOCK_MASK = ~BLOCK_OFFSET_MASK;
@@ -294,6 +311,11 @@ namespace JSC {
return result;
}
+
+ inline WeakGCHandlePool* Heap::weakGCHandlePool(size_t index)
+ {
+ return static_cast<WeakGCHandlePool*>(m_weakGCHandlePools[index].base());
+ }
} // namespace JSC
#endif /* Collector_h */
diff --git a/JavaScriptCore/runtime/CollectorHeapIterator.h b/JavaScriptCore/runtime/CollectorHeapIterator.h
index 9a3327c..9d107b7 100644
--- a/JavaScriptCore/runtime/CollectorHeapIterator.h
+++ b/JavaScriptCore/runtime/CollectorHeapIterator.h
@@ -77,7 +77,7 @@ namespace JSC {
inline JSCell* CollectorHeapIterator::operator*() const
{
- return reinterpret_cast<JSCell*>(&m_heap.blocks[m_block]->cells[m_cell]);
+ return reinterpret_cast<JSCell*>(&m_heap.collectorBlock(m_block)->cells[m_cell]);
}
// Iterators advance up to the next-to-last -- and not the last -- cell in a
@@ -103,7 +103,7 @@ namespace JSC {
if (m_block < m_heap.nextBlock || (m_block == m_heap.nextBlock && m_cell < m_heap.nextCell))
return *this;
- while (m_block < m_heap.usedBlocks && !m_heap.blocks[m_block]->marked.get(m_cell))
+ while (m_block < m_heap.usedBlocks && !m_heap.collectorBlock(m_block)->marked.get(m_cell))
advance(HeapConstants::cellsPerBlock - 1);
return *this;
}
@@ -119,7 +119,7 @@ namespace JSC {
do {
advance(HeapConstants::cellsPerBlock - 1);
ASSERT(m_block > m_heap.nextBlock || (m_block == m_heap.nextBlock && m_cell >= m_heap.nextCell));
- } while (m_block < m_heap.usedBlocks && m_heap.blocks[m_block]->marked.get(m_cell));
+ } while (m_block < m_heap.usedBlocks && m_heap.collectorBlock(m_block)->marked.get(m_cell));
return *this;
}
diff --git a/JavaScriptCore/runtime/ExceptionHelpers.cpp b/JavaScriptCore/runtime/ExceptionHelpers.cpp
index ebde320..3e0b70c 100644
--- a/JavaScriptCore/runtime/ExceptionHelpers.cpp
+++ b/JavaScriptCore/runtime/ExceptionHelpers.cpp
@@ -186,6 +186,11 @@ JSObject* createNotAnObjectError(ExecState* exec, JSNotAnObjectErrorStub* error,
return exception;
}
+JSObject* createOutOfMemoryError(JSGlobalObject* globalObject)
+{
+ return createError(globalObject, "Out of memory");
+}
+
JSValue throwOutOfMemoryError(ExecState* exec)
{
return throwError(exec, createError(exec, "Out of memory"));
diff --git a/JavaScriptCore/runtime/ExceptionHelpers.h b/JavaScriptCore/runtime/ExceptionHelpers.h
index 3e6de86..e4c94fb 100644
--- a/JavaScriptCore/runtime/ExceptionHelpers.h
+++ b/JavaScriptCore/runtime/ExceptionHelpers.h
@@ -53,6 +53,7 @@ namespace JSC {
JSObject* createNotAConstructorError(ExecState*, JSValue, unsigned bytecodeOffset, CodeBlock*);
JSValue createNotAFunctionError(ExecState*, JSValue, unsigned bytecodeOffset, CodeBlock*);
JSObject* createNotAnObjectError(ExecState*, JSNotAnObjectErrorStub*, unsigned bytecodeOffset, CodeBlock*);
+ JSObject* createOutOfMemoryError(JSGlobalObject*);
JSValue throwOutOfMemoryError(ExecState*);
} // namespace JSC
diff --git a/JavaScriptCore/runtime/Executable.cpp b/JavaScriptCore/runtime/Executable.cpp
index 229588b..058a091 100644
--- a/JavaScriptCore/runtime/Executable.cpp
+++ b/JavaScriptCore/runtime/Executable.cpp
@@ -116,6 +116,10 @@ JSObject* EvalExecutable::compileInternal(ExecState* exec, ScopeChainNode* scope
#if ENABLE(JIT)
if (exec->globalData().canUseJIT()) {
m_jitCodeForCall = JIT::compile(scopeChainNode->globalData, m_evalCodeBlock.get());
+ if (UNLIKELY(!m_jitCodeForCall)) {
+ m_evalCodeBlock.clear();
+ return createOutOfMemoryError(globalObject);
+ }
#if !ENABLE(OPCODE_SAMPLING)
if (!BytecodeGenerator::dumpsGeneratedCode())
m_evalCodeBlock->discardBytecode();
@@ -164,6 +168,10 @@ JSObject* ProgramExecutable::compileInternal(ExecState* exec, ScopeChainNode* sc
#if ENABLE(JIT)
if (exec->globalData().canUseJIT()) {
m_jitCodeForCall = JIT::compile(scopeChainNode->globalData, m_programCodeBlock.get());
+ if (UNLIKELY(!m_jitCodeForCall)) {
+ m_programCodeBlock.clear();
+ return createOutOfMemoryError(globalObject);
+ }
#if !ENABLE(OPCODE_SAMPLING)
if (!BytecodeGenerator::dumpsGeneratedCode())
m_programCodeBlock->discardBytecode();
@@ -192,7 +200,7 @@ JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, ScopeChain
JSGlobalObject* globalObject = scopeChain.globalObject();
ASSERT(!m_codeBlockForCall);
- m_codeBlockForCall = adoptPtr(new FunctionCodeBlock(this, FunctionCode, source().provider(), source().startOffset(), false));
+ m_codeBlockForCall = adoptPtr(new FunctionCodeBlock(this, FunctionCode, globalObject, source().provider(), source().startOffset(), false));
OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(body.get(), globalObject->debugger(), scopeChain, m_codeBlockForCall->symbolTable(), m_codeBlockForCall.get())));
generator->generate();
m_numParametersForCall = m_codeBlockForCall->m_numParameters;
@@ -205,6 +213,10 @@ JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, ScopeChain
#if ENABLE(JIT)
if (exec->globalData().canUseJIT()) {
m_jitCodeForCall = JIT::compile(scopeChainNode->globalData, m_codeBlockForCall.get(), &m_jitCodeForCallWithArityCheck);
+ if (UNLIKELY(!m_jitCodeForCall)) {
+ m_codeBlockForCall.clear();
+ return createOutOfMemoryError(globalObject);
+ }
#if !ENABLE(OPCODE_SAMPLING)
if (!BytecodeGenerator::dumpsGeneratedCode())
m_codeBlockForCall->discardBytecode();
@@ -233,7 +245,7 @@ JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, Scope
JSGlobalObject* globalObject = scopeChain.globalObject();
ASSERT(!m_codeBlockForConstruct);
- m_codeBlockForConstruct = adoptPtr(new FunctionCodeBlock(this, FunctionCode, source().provider(), source().startOffset(), true));
+ m_codeBlockForConstruct = adoptPtr(new FunctionCodeBlock(this, FunctionCode, globalObject, source().provider(), source().startOffset(), true));
OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(body.get(), globalObject->debugger(), scopeChain, m_codeBlockForConstruct->symbolTable(), m_codeBlockForConstruct.get())));
generator->generate();
m_numParametersForConstruct = m_codeBlockForConstruct->m_numParameters;
@@ -246,6 +258,10 @@ JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, Scope
#if ENABLE(JIT)
if (exec->globalData().canUseJIT()) {
m_jitCodeForConstruct = JIT::compile(scopeChainNode->globalData, m_codeBlockForConstruct.get(), &m_jitCodeForConstructWithArityCheck);
+ if (UNLIKELY(!m_jitCodeForConstruct)) {
+ m_codeBlockForConstruct.clear();
+ return createOutOfMemoryError(globalObject);
+ }
#if !ENABLE(OPCODE_SAMPLING)
if (!BytecodeGenerator::dumpsGeneratedCode())
m_codeBlockForConstruct->discardBytecode();
@@ -277,7 +293,7 @@ PassOwnPtr<ExceptionInfo> FunctionExecutable::reparseExceptionInfo(JSGlobalData*
ScopeChain scopeChain(scopeChainNode);
JSGlobalObject* globalObject = scopeChain.globalObject();
- OwnPtr<CodeBlock> newCodeBlock(adoptPtr(new FunctionCodeBlock(this, FunctionCode, source().provider(), source().startOffset(), codeBlock->m_isConstructor)));
+ OwnPtr<CodeBlock> newCodeBlock(adoptPtr(new FunctionCodeBlock(this, FunctionCode, globalObject, source().provider(), source().startOffset(), codeBlock->m_isConstructor)));
globalData->functionCodeBlockBeingReparsed = newCodeBlock.get();
OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(newFunctionBody.get(), globalObject->debugger(), scopeChain, newCodeBlock->symbolTable(), newCodeBlock.get())));
@@ -288,13 +304,16 @@ PassOwnPtr<ExceptionInfo> FunctionExecutable::reparseExceptionInfo(JSGlobalData*
#if ENABLE(JIT)
if (globalData->canUseJIT()) {
- JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
+ JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), 0, codeBlock->m_isConstructor ? generatedJITCodeForConstruct().start() : generatedJITCodeForCall().start());
+ if (!newJITCode) {
+ globalData->functionCodeBlockBeingReparsed = 0;
+ return PassOwnPtr<ExceptionInfo>();
+ }
ASSERT(codeBlock->m_isConstructor ? newJITCode.size() == generatedJITCodeForConstruct().size() : newJITCode.size() == generatedJITCodeForCall().size());
}
#endif
globalData->functionCodeBlockBeingReparsed = 0;
-
return newCodeBlock->extractExceptionInfo();
}
@@ -318,7 +337,11 @@ PassOwnPtr<ExceptionInfo> EvalExecutable::reparseExceptionInfo(JSGlobalData* glo
#if ENABLE(JIT)
if (globalData->canUseJIT()) {
- JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
+ JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), 0, generatedJITCodeForCall().start());
+ if (!newJITCode) {
+ globalData->functionCodeBlockBeingReparsed = 0;
+ return PassOwnPtr<ExceptionInfo>();
+ }
ASSERT(newJITCode.size() == generatedJITCodeForCall().size());
}
#endif
diff --git a/JavaScriptCore/runtime/GCActivityCallback.cpp b/JavaScriptCore/runtime/GCActivityCallback.cpp
new file mode 100644
index 0000000..2f2c079
--- /dev/null
+++ b/JavaScriptCore/runtime/GCActivityCallback.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCActivityCallback.h"
+
+namespace JSC {
+
+struct DefaultGCActivityCallbackPlatformData {
+};
+
+DefaultGCActivityCallback::DefaultGCActivityCallback(Heap* heap)
+{
+}
+
+DefaultGCActivityCallback::~DefaultGCActivityCallback()
+{
+}
+
+void DefaultGCActivityCallback::operator()()
+{
+}
+
+}
+
diff --git a/JavaScriptCore/runtime/GCActivityCallback.h b/JavaScriptCore/runtime/GCActivityCallback.h
new file mode 100644
index 0000000..66d56e8
--- /dev/null
+++ b/JavaScriptCore/runtime/GCActivityCallback.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCActivityCallback_h
+#define GCActivityCallback_h
+
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
+
+namespace JSC {
+
+class Heap;
+
+class GCActivityCallback {
+public:
+ virtual ~GCActivityCallback() {}
+ virtual void operator()() {}
+
+protected:
+ GCActivityCallback() {}
+};
+
+struct DefaultGCActivityCallbackPlatformData;
+
+class DefaultGCActivityCallback : public GCActivityCallback {
+public:
+ static PassOwnPtr<DefaultGCActivityCallback> create(Heap*);
+
+ DefaultGCActivityCallback(Heap*);
+ ~DefaultGCActivityCallback();
+
+ void operator()();
+
+private:
+ OwnPtr<DefaultGCActivityCallbackPlatformData*> d;
+};
+
+inline PassOwnPtr<DefaultGCActivityCallback> DefaultGCActivityCallback::create(Heap* heap)
+{
+ return adoptPtr(new DefaultGCActivityCallback(heap));
+}
+
+}
+
+#endif
diff --git a/JavaScriptCore/runtime/GCActivityCallbackCF.cpp b/JavaScriptCore/runtime/GCActivityCallbackCF.cpp
new file mode 100644
index 0000000..06d4210
--- /dev/null
+++ b/JavaScriptCore/runtime/GCActivityCallbackCF.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCActivityCallback.h"
+
+#include "Collector.h"
+#include "JSLock.h"
+#include <wtf/RetainPtr.h>
+#include <CoreFoundation/CoreFoundation.h>
+
+#if !PLATFORM(CF)
+#error "This file should only be used on CF platforms."
+#endif
+
+namespace JSC {
+
+struct DefaultGCActivityCallbackPlatformData {
+ static void trigger(CFRunLoopTimerRef, void *info);
+
+ RetainPtr<CFRunLoopTimerRef> timer;
+ CFRunLoopTimerContext context;
+};
+
+const CFTimeInterval decade = 60 * 60 * 24 * 365 * 10;
+
+void DefaultGCActivityCallbackPlatformData::trigger(CFRunLoopTimerRef, void *info)
+{
+ Heap* heap = static_cast<Heap*>(info);
+ JSLock lock(heap->globalData());
+
+ heap->collectAllGarbage();
+}
+
+DefaultGCActivityCallback::DefaultGCActivityCallback(Heap* heap)
+{
+ d = adoptPtr(new DefaultGCActivityCallbackPlatformData);
+
+ memset(&d->context, '\0', sizeof(CFRunLoopTimerContext));
+ d->context.info = heap;
+ d->timer.adoptCF(CFRunLoopTimerCreate(0, decade, decade, 0, 0, DefaultGCActivityCallbackPlatformData::trigger, &d->context));
+ CFRunLoopAddTimer(CFRunLoopGetCurrent(), d->timer.get(), kCFRunLoopCommonModes);
+}
+
+DefaultGCActivityCallback::~DefaultGCActivityCallback()
+{
+ CFRunLoopRemoveTimer(CFRunLoopGetCurrent(), d->timer.get(), kCFRunLoopCommonModes);
+ CFRunLoopTimerInvalidate(d->timer.get());
+ d->context.info = 0;
+ d->timer = 0;
+}
+
+void DefaultGCActivityCallback::operator()()
+{
+ CFRunLoopTimerSetNextFireDate(d->timer.get(), CFAbsoluteTimeGetCurrent() + 2);
+}
+
+}
diff --git a/JavaScriptCore/runtime/GCHandle.cpp b/JavaScriptCore/runtime/GCHandle.cpp
new file mode 100644
index 0000000..3331517
--- /dev/null
+++ b/JavaScriptCore/runtime/GCHandle.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCHandle.h"
+
+namespace JSC {
+
+WeakGCHandlePool* WeakGCHandle::pool()
+{
+ uintptr_t pool = (reinterpret_cast<uintptr_t>(this) & WeakGCHandlePool::poolMask);
+ return reinterpret_cast<WeakGCHandlePool*>(pool);
+}
+
+WeakGCHandlePool::WeakGCHandlePool()
+{
+ ASSERT(sizeof(WeakGCHandlePool) <= WeakGCHandlePool::poolSize);
+ m_entriesSize = 0;
+ m_initialAlloc = 1;
+ m_entries[0].setNextInFreeList(0);
+}
+
+WeakGCHandle* WeakGCHandlePool::allocate(JSCell* cell)
+{
+ ASSERT(cell);
+ ASSERT(m_entries[0].isNext());
+ unsigned freeList = m_entries[0].getNextInFreeList();
+ ASSERT(freeList < WeakGCHandlePool::numPoolEntries);
+ ASSERT(m_entriesSize < WeakGCHandlePool::numPoolEntries);
+
+ if (m_entriesSize == WeakGCHandlePool::numPoolEntries - 1)
+ return 0;
+
+ if (freeList) {
+ unsigned i = freeList;
+ freeList = m_entries[i].getNextInFreeList();
+ m_entries[i].set(cell);
+ m_entries[0].setNextInFreeList(freeList);
+ ++m_entriesSize;
+ return &m_entries[i];
+ }
+
+ ASSERT(m_initialAlloc < WeakGCHandlePool::numPoolEntries);
+
+ unsigned i = m_initialAlloc;
+ ++m_initialAlloc;
+ m_entries[i].set(cell);
+ ++m_entriesSize;
+ return &m_entries[i];
+
+}
+
+void WeakGCHandlePool::free(WeakGCHandle* handle)
+{
+ ASSERT(handle->pool() == this);
+ ASSERT(m_entries[0].isNext());
+ unsigned freeList = m_entries[0].getNextInFreeList();
+ ASSERT(freeList < WeakGCHandlePool::numPoolEntries);
+ handle->setNextInFreeList(freeList);
+ m_entries[0].setNextInFreeList(handle - m_entries);
+ --m_entriesSize;
+}
+
+void* WeakGCHandlePool::operator new(size_t, AlignedMemory<WeakGCHandlePool::poolSize>& allocation)
+{
+ return allocation.base();
+}
+
+}
diff --git a/JavaScriptCore/runtime/GCHandle.h b/JavaScriptCore/runtime/GCHandle.h
new file mode 100644
index 0000000..38a7be9
--- /dev/null
+++ b/JavaScriptCore/runtime/GCHandle.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCHandle_h
+#define GCHandle_h
+
+#include "AlignedMemoryAllocator.h"
+
+namespace JSC {
+
+class Heap;
+class JSCell;
+class WeakGCHandle;
+class WeakGCHandlePool;
+
+class WeakGCHandle {
+ friend class WeakGCHandlePool;
+
+public:
+ // Because JSCell objects are aligned, we can use the lower two bits as
+ // status flags. The least significant bit is set when the handle is not a
+ // pointer, i.e. when it's used as a offset for the free list in
+ // WeakGCHandlePool. The second least significant bit is set when the object
+ // the pointer corresponds to has been deleted by a garbage collection
+
+ bool isValidPtr() { return !(m_ptr & 3); }
+ bool isPtr() { return !(m_ptr & 1); }
+ bool isNext() { return (m_ptr & 3) == 1; }
+
+ void invalidate()
+ {
+ ASSERT(isValidPtr());
+ m_ptr |= 2;
+ }
+
+ JSCell* get()
+ {
+ ASSERT(isPtr());
+ return reinterpret_cast<JSCell*>(m_ptr & ~3);
+ }
+
+ void set(JSCell* p)
+ {
+ m_ptr = reinterpret_cast<uintptr_t>(p);
+ ASSERT(isPtr());
+ }
+
+ WeakGCHandlePool* pool();
+
+private:
+ uintptr_t getNextInFreeList()
+ {
+ ASSERT(isNext());
+ return m_ptr >> 2;
+ }
+
+ void setNextInFreeList(uintptr_t n)
+ {
+ m_ptr = (n << 2) | 1;
+ ASSERT(isNext());
+ }
+
+ uintptr_t m_ptr;
+};
+
+class WeakGCHandlePool {
+public:
+ static const size_t poolSize = 32 * 1024; // 32k
+ static const size_t poolMask = ~(poolSize - 1);
+ static const size_t numPoolEntries = (poolSize - sizeof(Heap*) - 3 * sizeof(unsigned)) / sizeof(WeakGCHandle);
+
+ typedef AlignedMemoryAllocator<WeakGCHandlePool::poolSize> Allocator;
+
+ WeakGCHandlePool();
+
+ WeakGCHandle* allocate(JSCell* cell);
+ void free(WeakGCHandle*);
+
+ bool isFull()
+ {
+ ASSERT(m_entriesSize < WeakGCHandlePool::numPoolEntries);
+ return m_entriesSize == WeakGCHandlePool::numPoolEntries - 1;
+ }
+
+ void update();
+
+ void* operator new(size_t, AlignedMemory<WeakGCHandlePool::poolSize>&);
+
+private:
+ Heap* m_heap;
+ unsigned m_entriesSize;
+ unsigned m_initialAlloc;
+
+ WeakGCHandle m_entries[WeakGCHandlePool::numPoolEntries];
+};
+
+}
+#endif
diff --git a/JavaScriptCore/runtime/JSArray.cpp b/JavaScriptCore/runtime/JSArray.cpp
index 99e1a10..0db0a63 100644
--- a/JavaScriptCore/runtime/JSArray.cpp
+++ b/JavaScriptCore/runtime/JSArray.cpp
@@ -126,17 +126,36 @@ inline void JSArray::checkConsistency(ConsistencyCheckType)
#endif
+JSArray::JSArray(VPtrStealingHackType)
+ : JSObject(createStructure(jsNull()))
+{
+ unsigned initialCapacity = 0;
+
+ m_storage = static_cast<ArrayStorage*>(fastZeroedMalloc(storageSize(initialCapacity)));
+ m_storage->m_allocBase = m_storage;
+ m_indexBias = 0;
+ m_vectorLength = initialCapacity;
+
+ checkConsistency();
+
+ // It's not safe to call Heap::heap(this) in order to report extra memory
+ // cost here, because the VPtrStealingHackType JSArray is not allocated on
+ // the heap. For the same reason, it's OK not to report extra cost.
+}
+
JSArray::JSArray(NonNullPassRefPtr<Structure> structure)
: JSObject(structure)
{
unsigned initialCapacity = 0;
- ArrayStorage* storage = static_cast<ArrayStorage*>(fastZeroedMalloc(storageSize(initialCapacity)));
+ m_storage = static_cast<ArrayStorage*>(fastZeroedMalloc(storageSize(initialCapacity)));
+ m_storage->m_allocBase = m_storage;
m_indexBias = 0;
- setArrayStorage(storage);
m_vectorLength = initialCapacity;
checkConsistency();
+
+ Heap::heap(this)->reportExtraMemoryCost(storageSize(0));
}
JSArray::JSArray(NonNullPassRefPtr<Structure> structure, unsigned initialLength, ArrayCreationMode creationMode)
@@ -148,35 +167,35 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure, unsigned initialLength,
else
initialCapacity = min(BASE_VECTOR_LEN, MIN_SPARSE_ARRAY_INDEX);
- ArrayStorage* storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
- storage->m_length = initialLength;
+ m_storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
+ m_storage->m_allocBase = m_storage;
+ m_storage->m_length = initialLength;
m_indexBias = 0;
m_vectorLength = initialCapacity;
- setArrayStorage(storage);
- storage->m_sparseValueMap = 0;
- storage->subclassData = 0;
- storage->reportedMapCapacity = 0;
+ m_storage->m_sparseValueMap = 0;
+ m_storage->subclassData = 0;
+ m_storage->reportedMapCapacity = 0;
if (creationMode == CreateCompact) {
#if CHECK_ARRAY_CONSISTENCY
- storage->m_inCompactInitialization = !!initialCapacity;
+ m_storage->m_inCompactInitialization = !!initialCapacity;
#endif
- storage->m_length = 0;
- storage->m_numValuesInVector = initialCapacity;
+ m_storage->m_length = 0;
+ m_storage->m_numValuesInVector = initialCapacity;
} else {
#if CHECK_ARRAY_CONSISTENCY
storage->m_inCompactInitialization = false;
#endif
- storage->m_length = initialLength;
- storage->m_numValuesInVector = 0;
- JSValue* vector = m_vector;
+ m_storage->m_length = initialLength;
+ m_storage->m_numValuesInVector = 0;
+ JSValue* vector = m_storage->m_vector;
for (size_t i = 0; i < initialCapacity; ++i)
vector[i] = JSValue();
}
checkConsistency();
- Heap::heap(this)->reportExtraMemoryCost(initialCapacity * sizeof(JSValue));
+ Heap::heap(this)->reportExtraMemoryCost(storageSize(initialCapacity));
}
JSArray::JSArray(NonNullPassRefPtr<Structure> structure, const ArgList& list)
@@ -184,21 +203,21 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure, const ArgList& list)
{
unsigned initialCapacity = list.size();
- ArrayStorage* storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
+ m_storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
+ m_storage->m_allocBase = m_storage;
m_indexBias = 0;
- storage->m_length = initialCapacity;
+ m_storage->m_length = initialCapacity;
m_vectorLength = initialCapacity;
- storage->m_numValuesInVector = initialCapacity;
- storage->m_sparseValueMap = 0;
- storage->subclassData = 0;
- storage->reportedMapCapacity = 0;
+ m_storage->m_numValuesInVector = initialCapacity;
+ m_storage->m_sparseValueMap = 0;
+ m_storage->subclassData = 0;
+ m_storage->reportedMapCapacity = 0;
#if CHECK_ARRAY_CONSISTENCY
- storage->m_inCompactInitialization = false;
+ m_storage->m_inCompactInitialization = false;
#endif
- setArrayStorage(storage);
size_t i = 0;
- JSValue* vector = m_vector;
+ JSValue* vector = m_storage->m_vector;
ArgList::const_iterator end = list.end();
for (ArgList::const_iterator it = list.begin(); it != end; ++it, ++i)
vector[i] = *it;
@@ -213,15 +232,13 @@ JSArray::~JSArray()
ASSERT(vptr() == JSGlobalData::jsArrayVPtr);
checkConsistency(DestructorConsistencyCheck);
- ArrayStorage* storage = arrayStorage();
- delete storage->m_sparseValueMap;
- char* realStorage = reinterpret_cast<char*>(storage) - (m_indexBias * sizeof(JSValue));
- fastFree(realStorage);
+ delete m_storage->m_sparseValueMap;
+ fastFree(m_storage->m_allocBase);
}
bool JSArray::getOwnPropertySlot(ExecState* exec, unsigned i, PropertySlot& slot)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
if (i >= storage->m_length) {
if (i > MAX_ARRAY_INDEX)
@@ -230,7 +247,7 @@ bool JSArray::getOwnPropertySlot(ExecState* exec, unsigned i, PropertySlot& slot
}
if (i < m_vectorLength) {
- JSValue& valueSlot = m_vector[i];
+ JSValue& valueSlot = storage->m_vector[i];
if (valueSlot) {
slot.setValueSlot(&valueSlot);
return true;
@@ -270,7 +287,7 @@ bool JSArray::getOwnPropertyDescriptor(ExecState* exec, const Identifier& proper
return true;
}
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
bool isArrayIndex;
unsigned i = propertyName.toArrayIndex(&isArrayIndex);
@@ -278,7 +295,7 @@ bool JSArray::getOwnPropertyDescriptor(ExecState* exec, const Identifier& proper
if (i >= storage->m_length)
return false;
if (i < m_vectorLength) {
- JSValue& value = m_vector[i];
+ JSValue& value = storage->m_vector[i];
if (value) {
descriptor.setDescriptor(value, 0);
return true;
@@ -323,7 +340,7 @@ void JSArray::put(ExecState* exec, unsigned i, JSValue value)
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned length = storage->m_length;
if (i >= length && i <= MAX_ARRAY_INDEX) {
@@ -332,7 +349,7 @@ void JSArray::put(ExecState* exec, unsigned i, JSValue value)
}
if (i < m_vectorLength) {
- JSValue& valueSlot = m_vector[i];
+ JSValue& valueSlot = storage->m_vector[i];
if (valueSlot) {
valueSlot = value;
checkConsistency();
@@ -349,7 +366,7 @@ void JSArray::put(ExecState* exec, unsigned i, JSValue value)
NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue value)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
SparseArrayValueMap* map = storage->m_sparseValueMap;
@@ -387,8 +404,9 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
// Fast case is when there is no sparse map, so we can increase the vector size without moving values from it.
if (!map || map->isEmpty()) {
if (increaseVectorLength(i + 1)) {
- m_vector[i] = value;
- ++arrayStorage()->m_numValuesInVector;
+ storage = m_storage;
+ storage->m_vector[i] = value;
+ ++storage->m_numValuesInVector;
checkConsistency();
} else
throwOutOfMemoryError(exec);
@@ -416,29 +434,30 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
}
}
- int baseBias = m_indexBias * sizeof(JSValue);
- char* baseStorage = reinterpret_cast<char*>(storage - baseBias);
+ void* baseStorage = storage->m_allocBase;
if (!tryFastRealloc(baseStorage, storageSize(newVectorLength + m_indexBias)).getValue(baseStorage)) {
throwOutOfMemoryError(exec);
return;
}
- storage = reinterpret_cast<ArrayStorage*>(baseStorage + baseBias);
- setArrayStorage(storage);
+ m_storage = reinterpret_cast<ArrayStorage*>(static_cast<char*>(baseStorage) + m_indexBias * sizeof(JSValue));
+ m_storage->m_allocBase = baseStorage;
+ storage = m_storage;
unsigned vectorLength = m_vectorLength;
+ JSValue* vector = storage->m_vector;
if (newNumValuesInVector == storage->m_numValuesInVector + 1) {
for (unsigned j = vectorLength; j < newVectorLength; ++j)
- m_vector[j] = JSValue();
+ vector[j] = JSValue();
if (i > MIN_SPARSE_ARRAY_INDEX)
map->remove(i);
} else {
for (unsigned j = vectorLength; j < max(vectorLength, MIN_SPARSE_ARRAY_INDEX); ++j)
- m_vector[j] = JSValue();
+ vector[j] = JSValue();
for (unsigned j = max(vectorLength, MIN_SPARSE_ARRAY_INDEX); j < newVectorLength; ++j)
- m_vector[j] = map->take(j);
+ vector[j] = map->take(j);
}
ASSERT(i < newVectorLength);
@@ -446,7 +465,7 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
m_vectorLength = newVectorLength;
storage->m_numValuesInVector = newNumValuesInVector;
- m_vector[i] = value;
+ storage->m_vector[i] = value;
checkConsistency();
@@ -470,10 +489,10 @@ bool JSArray::deleteProperty(ExecState* exec, unsigned i)
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
if (i < m_vectorLength) {
- JSValue& valueSlot = m_vector[i];
+ JSValue& valueSlot = storage->m_vector[i];
if (!valueSlot) {
checkConsistency();
return false;
@@ -509,11 +528,11 @@ void JSArray::getOwnPropertyNames(ExecState* exec, PropertyNameArray& propertyNa
// is incredibly inefficient for large arrays. We need a different approach,
// which almost certainly means a different structure for PropertyNameArray.
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned usedVectorLength = min(storage->m_length, m_vectorLength);
for (unsigned i = 0; i < usedVectorLength; ++i) {
- if (m_vector[i])
+ if (storage->m_vector[i])
propertyNames.add(Identifier::from(exec, i));
}
@@ -534,7 +553,7 @@ ALWAYS_INLINE unsigned JSArray::getNewVectorLength(unsigned desiredLength)
ASSERT(desiredLength <= MAX_STORAGE_VECTOR_LENGTH);
unsigned increasedLength;
- unsigned length = arrayStorage()->m_length;
+ unsigned length = m_storage->m_length;
if (desiredLength < length)
increasedLength = length;
@@ -561,22 +580,21 @@ bool JSArray::increaseVectorLength(unsigned newLength)
// This function leaves the array in an internally inconsistent state, because it does not move any values from sparse value map
// to the vector. Callers have to account for that, because they can do it more efficiently.
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned vectorLength = m_vectorLength;
ASSERT(newLength > vectorLength);
ASSERT(newLength <= MAX_STORAGE_VECTOR_INDEX);
unsigned newVectorLength = getNewVectorLength(newLength);
- int baseBias = m_indexBias * sizeof(JSValue);
- char* baseStorage = reinterpret_cast<char*>(storage) - baseBias;
+ void* baseStorage = storage->m_allocBase;
if (!tryFastRealloc(baseStorage, storageSize(newVectorLength + m_indexBias)).getValue(baseStorage))
return false;
-
- storage = reinterpret_cast<ArrayStorage*>(baseStorage + baseBias);
- setArrayStorage(storage);
- JSValue* vector = m_vector;
+ storage = m_storage = reinterpret_cast<ArrayStorage*>(static_cast<char*>(baseStorage) + m_indexBias * sizeof(JSValue));
+ m_storage->m_allocBase = baseStorage;
+
+ JSValue* vector = storage->m_vector;
for (unsigned i = vectorLength; i < newVectorLength; ++i)
vector[i] = JSValue();
@@ -592,33 +610,29 @@ bool JSArray::increaseVectorPrefixLength(unsigned newLength)
// This function leaves the array in an internally inconsistent state, because it does not move any values from sparse value map
// to the vector. Callers have to account for that, because they can do it more efficiently.
- ArrayStorage* storage = arrayStorage();
- ArrayStorage* newStorage;
+ ArrayStorage* storage = m_storage;
unsigned vectorLength = m_vectorLength;
ASSERT(newLength > vectorLength);
ASSERT(newLength <= MAX_STORAGE_VECTOR_INDEX);
unsigned newVectorLength = getNewVectorLength(newLength);
- char* baseStorage = reinterpret_cast<char*>(storage) - (m_indexBias * sizeof(JSValue));
-
- char* newBaseStorage = reinterpret_cast<char*>(fastMalloc(storageSize(newVectorLength + m_indexBias)));
+
+ void* newBaseStorage = fastMalloc(storageSize(newVectorLength + m_indexBias));
if (!newBaseStorage)
return false;
m_indexBias += newVectorLength - newLength;
- int newStorageOffset = m_indexBias * sizeof(JSValue);
-
- newStorage = reinterpret_cast<ArrayStorage*>(newBaseStorage + newStorageOffset);
- memcpy(newStorage, storage, storageSize(0));
- memcpy(&newStorage->m_vector[newLength - m_vectorLength], &storage->m_vector[0], storage->m_length * sizeof(JSValue));
+ m_storage = reinterpret_cast<ArrayStorage*>(static_cast<char*>(newBaseStorage) + m_indexBias * sizeof(JSValue));
+
+ memcpy(m_storage, storage, storageSize(0));
+ memcpy(&m_storage->m_vector[newLength - m_vectorLength], &storage->m_vector[0], vectorLength * sizeof(JSValue));
+ m_storage->m_allocBase = newBaseStorage;
m_vectorLength = newLength;
- fastFree(baseStorage);
+ fastFree(storage->m_allocBase);
- setArrayStorage(newStorage);
-
Heap::heap(this)->reportExtraMemoryCost(storageSize(newVectorLength) - storageSize(vectorLength));
return true;
@@ -627,21 +641,21 @@ bool JSArray::increaseVectorPrefixLength(unsigned newLength)
void JSArray::setLength(unsigned newLength)
{
+ ArrayStorage* storage = m_storage;
+
#if CHECK_ARRAY_CONSISTENCY
- if (!m_storage->m_inCompactInitialization)
+ if (!storage->m_inCompactInitialization)
checkConsistency();
else
- m_storage->m_inCompactInitialization = false;
+ storage->m_inCompactInitialization = false;
#endif
- ArrayStorage* storage = arrayStorage();
-
unsigned length = storage->m_length;
if (newLength < length) {
unsigned usedVectorLength = min(length, m_vectorLength);
for (unsigned i = newLength; i < usedVectorLength; ++i) {
- JSValue& valueSlot = m_vector[i];
+ JSValue& valueSlot = storage->m_vector[i];
bool hadValue = valueSlot;
valueSlot = JSValue();
storage->m_numValuesInVector -= hadValue;
@@ -670,7 +684,7 @@ JSValue JSArray::pop()
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned length = storage->m_length;
if (!length)
@@ -681,7 +695,7 @@ JSValue JSArray::pop()
JSValue result;
if (length < m_vectorLength) {
- JSValue& valueSlot = m_vector[length];
+ JSValue& valueSlot = storage->m_vector[length];
if (valueSlot) {
--storage->m_numValuesInVector;
result = valueSlot;
@@ -714,10 +728,10 @@ void JSArray::push(ExecState* exec, JSValue value)
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
if (storage->m_length < m_vectorLength) {
- m_vector[storage->m_length] = value;
+ storage->m_vector[storage->m_length] = value;
++storage->m_numValuesInVector;
++storage->m_length;
checkConsistency();
@@ -728,8 +742,8 @@ void JSArray::push(ExecState* exec, JSValue value)
SparseArrayValueMap* map = storage->m_sparseValueMap;
if (!map || map->isEmpty()) {
if (increaseVectorLength(storage->m_length + 1)) {
- storage = arrayStorage();
- m_vector[storage->m_length] = value;
+ storage = m_storage;
+ storage->m_vector[storage->m_length] = value;
++storage->m_numValuesInVector;
++storage->m_length;
checkConsistency();
@@ -748,7 +762,7 @@ void JSArray::shiftCount(ExecState* exec, int count)
{
ASSERT(count > 0);
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned oldLength = storage->m_length;
@@ -761,7 +775,7 @@ void JSArray::shiftCount(ExecState* exec, int count)
// slots and then fill them with possible properties. See ECMA spec.
// 15.4.4.9 steps 11 through 13.
for (unsigned i = count; i < oldLength; ++i) {
- if ((i >= m_vectorLength) || (!m_vector[i])) {
+ if ((i >= m_vectorLength) || (!m_storage->m_vector[i])) {
PropertySlot slot(this);
JSValue p = prototype();
if ((!p.isNull()) && (asObject(p)->getPropertySlot(exec, i, slot)))
@@ -769,11 +783,11 @@ void JSArray::shiftCount(ExecState* exec, int count)
}
}
- storage = arrayStorage(); // The put() above could have grown the vector and realloc'ed storage.
+ storage = m_storage; // The put() above could have grown the vector and realloc'ed storage.
// Need to decrement numValuesInvector based on number of real entries
for (unsigned i = 0; i < (unsigned)count; ++i)
- if ((i < m_vectorLength) && (m_vector[i]))
+ if ((i < m_vectorLength) && (storage->m_vector[i]))
--storage->m_numValuesInVector;
} else
storage->m_numValuesInVector -= count;
@@ -788,17 +802,16 @@ void JSArray::shiftCount(ExecState* exec, int count)
if (m_vectorLength) {
char* newBaseStorage = reinterpret_cast<char*>(storage) + count * sizeof(JSValue);
memmove(newBaseStorage, storage, storageSize(0));
- storage = reinterpret_cast<ArrayStorage*>(newBaseStorage);
+ m_storage = reinterpret_cast<ArrayStorage*>(newBaseStorage);
m_indexBias += count;
- setArrayStorage(storage);
}
}
}
void JSArray::unshiftCount(ExecState* exec, int count)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
ASSERT(m_indexBias >= 0);
ASSERT(count >= 0);
@@ -811,7 +824,7 @@ void JSArray::unshiftCount(ExecState* exec, int count)
// slots and then fill them with possible properties. See ECMA spec.
// 15.4.4.13 steps 8 through 10.
for (unsigned i = 0; i < length; ++i) {
- if ((i >= m_vectorLength) || (!m_vector[i])) {
+ if ((i >= m_vectorLength) || (!m_storage->m_vector[i])) {
PropertySlot slot(this);
JSValue p = prototype();
if ((!p.isNull()) && (asObject(p)->getPropertySlot(exec, i, slot)))
@@ -820,19 +833,22 @@ void JSArray::unshiftCount(ExecState* exec, int count)
}
}
- storage = arrayStorage(); // The put() above could have grown the vector and realloc'ed storage.
+ storage = m_storage; // The put() above could have grown the vector and realloc'ed storage.
if (m_indexBias >= count) {
m_indexBias -= count;
char* newBaseStorage = reinterpret_cast<char*>(storage) - count * sizeof(JSValue);
memmove(newBaseStorage, storage, storageSize(0));
- storage = reinterpret_cast<ArrayStorage*>(newBaseStorage);
- setArrayStorage(storage);
+ m_storage = reinterpret_cast<ArrayStorage*>(newBaseStorage);
m_vectorLength += count;
- } else if ((!m_indexBias) && (!increaseVectorPrefixLength(m_vectorLength + count))) {
+ } else if (!increaseVectorPrefixLength(m_vectorLength + count)) {
throwOutOfMemoryError(exec);
return;
}
+
+ JSValue* vector = m_storage->m_vector;
+ for (int i = 0; i < count; i++)
+ vector[i] = JSValue();
}
void JSArray::markChildren(MarkStack& markStack)
@@ -858,7 +874,7 @@ static int compareByStringPairForQSort(const void* a, const void* b)
void JSArray::sortNumeric(ExecState* exec, JSValue compareFunction, CallType callType, const CallData& callData)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned lengthNotIncludingUndefined = compactForSorting();
if (storage->m_sparseValueMap) {
@@ -872,7 +888,7 @@ void JSArray::sortNumeric(ExecState* exec, JSValue compareFunction, CallType cal
bool allValuesAreNumbers = true;
size_t size = storage->m_numValuesInVector;
for (size_t i = 0; i < size; ++i) {
- if (!m_vector[i].isNumber()) {
+ if (!storage->m_vector[i].isNumber()) {
allValuesAreNumbers = false;
break;
}
@@ -884,14 +900,14 @@ void JSArray::sortNumeric(ExecState* exec, JSValue compareFunction, CallType cal
// For numeric comparison, which is fast, qsort is faster than mergesort. We
// also don't require mergesort's stability, since there's no user visible
// side-effect from swapping the order of equal primitive values.
- qsort(m_vector, size, sizeof(JSValue), compareNumbersForQSort);
+ qsort(storage->m_vector, size, sizeof(JSValue), compareNumbersForQSort);
checkConsistency(SortConsistencyCheck);
}
void JSArray::sort(ExecState* exec)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned lengthNotIncludingUndefined = compactForSorting();
if (storage->m_sparseValueMap) {
@@ -914,7 +930,7 @@ void JSArray::sort(ExecState* exec)
}
for (size_t i = 0; i < lengthNotIncludingUndefined; i++) {
- JSValue value = m_vector[i];
+ JSValue value = storage->m_vector[i];
ASSERT(!value.isUndefined());
values[i].first = value;
}
@@ -946,7 +962,7 @@ void JSArray::sort(ExecState* exec)
// modifying the vector incorrectly.
for (size_t i = 0; i < lengthNotIncludingUndefined; i++)
- m_vector[i] = values[i].first;
+ storage->m_vector[i] = values[i].first;
checkConsistency(SortConsistencyCheck);
}
@@ -1033,7 +1049,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
// FIXME: This ignores exceptions raised in the compare function or in toNumber.
@@ -1072,14 +1088,14 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
// Iterate over the array, ignoring missing values, counting undefined ones, and inserting all other ones into the tree.
for (; numDefined < usedVectorLength; ++numDefined) {
- JSValue v = m_vector[numDefined];
+ JSValue v = storage->m_vector[numDefined];
if (!v || v.isUndefined())
break;
tree.abstractor().m_nodes[numDefined].value = v;
tree.insert(numDefined);
}
for (unsigned i = numDefined; i < usedVectorLength; ++i) {
- JSValue v = m_vector[i];
+ JSValue v = storage->m_vector[i];
if (v) {
if (v.isUndefined())
++numUndefined;
@@ -1103,7 +1119,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
}
}
- storage = arrayStorage();
+ storage = m_storage;
SparseArrayValueMap::iterator end = map->end();
for (SparseArrayValueMap::iterator it = map->begin(); it != end; ++it) {
@@ -1125,17 +1141,17 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
AVLTree<AVLTreeAbstractorForArrayCompare, 44>::Iterator iter;
iter.start_iter_least(tree);
for (unsigned i = 0; i < numDefined; ++i) {
- m_vector[i] = tree.abstractor().m_nodes[*iter].value;
+ storage->m_vector[i] = tree.abstractor().m_nodes[*iter].value;
++iter;
}
// Put undefined values back in.
for (unsigned i = numDefined; i < newUsedVectorLength; ++i)
- m_vector[i] = jsUndefined();
+ storage->m_vector[i] = jsUndefined();
// Ensure that unused values in the vector are zeroed out.
for (unsigned i = newUsedVectorLength; i < usedVectorLength; ++i)
- m_vector[i] = JSValue();
+ storage->m_vector[i] = JSValue();
storage->m_numValuesInVector = newUsedVectorLength;
@@ -1144,7 +1160,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
void JSArray::fillArgList(ExecState* exec, MarkedArgumentBuffer& args)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
JSValue* vector = storage->m_vector;
unsigned vectorEnd = min(storage->m_length, m_vectorLength);
@@ -1162,9 +1178,9 @@ void JSArray::fillArgList(ExecState* exec, MarkedArgumentBuffer& args)
void JSArray::copyToRegisters(ExecState* exec, Register* buffer, uint32_t maxSize)
{
- ASSERT(arrayStorage()->m_length >= maxSize);
+ ASSERT(m_storage->m_length >= maxSize);
UNUSED_PARAM(maxSize);
- JSValue* vector = m_vector;
+ JSValue* vector = m_storage->m_vector;
unsigned vectorEnd = min(maxSize, m_vectorLength);
unsigned i = 0;
for (; i < vectorEnd; ++i) {
@@ -1182,7 +1198,7 @@ unsigned JSArray::compactForSorting()
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned usedVectorLength = min(storage->m_length, m_vectorLength);
@@ -1190,17 +1206,17 @@ unsigned JSArray::compactForSorting()
unsigned numUndefined = 0;
for (; numDefined < usedVectorLength; ++numDefined) {
- JSValue v = m_vector[numDefined];
+ JSValue v = storage->m_vector[numDefined];
if (!v || v.isUndefined())
break;
}
for (unsigned i = numDefined; i < usedVectorLength; ++i) {
- JSValue v = m_vector[i];
+ JSValue v = storage->m_vector[i];
if (v) {
if (v.isUndefined())
++numUndefined;
else
- m_vector[numDefined++] = v;
+ storage->m_vector[numDefined++] = v;
}
}
@@ -1214,21 +1230,21 @@ unsigned JSArray::compactForSorting()
if ((newUsedVectorLength > MAX_STORAGE_VECTOR_LENGTH) || !increaseVectorLength(newUsedVectorLength))
return 0;
- storage = arrayStorage();
+ storage = m_storage;
}
SparseArrayValueMap::iterator end = map->end();
for (SparseArrayValueMap::iterator it = map->begin(); it != end; ++it)
- m_vector[numDefined++] = it->second;
+ storage->m_vector[numDefined++] = it->second;
delete map;
storage->m_sparseValueMap = 0;
}
for (unsigned i = numDefined; i < newUsedVectorLength; ++i)
- m_vector[i] = jsUndefined();
+ storage->m_vector[i] = jsUndefined();
for (unsigned i = newUsedVectorLength; i < usedVectorLength; ++i)
- m_vector[i] = JSValue();
+ storage->m_vector[i] = JSValue();
storage->m_numValuesInVector = newUsedVectorLength;
@@ -1239,19 +1255,19 @@ unsigned JSArray::compactForSorting()
void* JSArray::subclassData() const
{
- return arrayStorage()->subclassData;
+ return m_storage->subclassData;
}
void JSArray::setSubclassData(void* d)
{
- arrayStorage()->subclassData = d;
+ m_storage->subclassData = d;
}
#if CHECK_ARRAY_CONSISTENCY
void JSArray::checkConsistency(ConsistencyCheckType type)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
ASSERT(storage);
if (type == SortConsistencyCheck)
@@ -1259,7 +1275,7 @@ void JSArray::checkConsistency(ConsistencyCheckType type)
unsigned numValuesInVector = 0;
for (unsigned i = 0; i < m_vectorLength; ++i) {
- if (JSValue value = m_vector[i]) {
+ if (JSValue value = storage->m_vector[i]) {
ASSERT(i < storage->m_length);
if (type != DestructorConsistencyCheck)
value.isUndefined(); // Likely to crash if the object was deallocated.
@@ -1277,7 +1293,7 @@ void JSArray::checkConsistency(ConsistencyCheckType type)
for (SparseArrayValueMap::iterator it = storage->m_sparseValueMap->begin(); it != end; ++it) {
unsigned index = it->first;
ASSERT(index < storage->m_length);
- ASSERT(index >= m_vectorLength);
+ ASSERT(index >= storage->m_vectorLength);
ASSERT(index <= MAX_ARRAY_INDEX);
ASSERT(it->second);
if (type != DestructorConsistencyCheck)
diff --git a/JavaScriptCore/runtime/JSArray.h b/JavaScriptCore/runtime/JSArray.h
index a7ce328..f718d7e 100644
--- a/JavaScriptCore/runtime/JSArray.h
+++ b/JavaScriptCore/runtime/JSArray.h
@@ -39,6 +39,7 @@ namespace JSC {
unsigned m_numValuesInVector;
SparseArrayValueMap* m_sparseValueMap;
void* subclassData; // A JSArray subclass can use this to fill the vector lazily.
+ void* m_allocBase; // Pointer to base address returned by malloc(). Keeping this pointer does eliminate false positives from the leak detector.
size_t reportedMapCapacity;
#if CHECK_ARRAY_CONSISTENCY
bool m_inCompactInitialization;
@@ -61,6 +62,9 @@ namespace JSC {
friend class Walker;
public:
+ enum VPtrStealingHackType { VPtrStealingHack };
+ JSArray(VPtrStealingHackType);
+
explicit JSArray(NonNullPassRefPtr<Structure>);
JSArray(NonNullPassRefPtr<Structure>, unsigned initialLength, ArrayCreationMode);
JSArray(NonNullPassRefPtr<Structure>, const ArgList& initialValues);
@@ -73,7 +77,7 @@ namespace JSC {
static JS_EXPORTDATA const ClassInfo info;
- unsigned length() const { return arrayStorage()->m_length; }
+ unsigned length() const { return m_storage->m_length; }
void setLength(unsigned); // OK to use on new arrays, but not if it might be a RegExpMatchArray.
void sort(ExecState*);
@@ -86,11 +90,11 @@ namespace JSC {
void shiftCount(ExecState*, int count);
void unshiftCount(ExecState*, int count);
- bool canGetIndex(unsigned i) { return i < m_vectorLength && m_vector[i]; }
+ bool canGetIndex(unsigned i) { return i < m_vectorLength && m_storage->m_vector[i]; }
JSValue getIndex(unsigned i)
{
ASSERT(canGetIndex(i));
- return m_vector[i];
+ return m_storage->m_vector[i];
}
bool canSetIndex(unsigned i) { return i < m_vectorLength; }
@@ -98,9 +102,9 @@ namespace JSC {
{
ASSERT(canSetIndex(i));
- JSValue& x = m_vector[i];
+ JSValue& x = m_storage->m_vector[i];
if (!x) {
- ArrayStorage *storage = arrayStorage();
+ ArrayStorage *storage = m_storage;
++storage->m_numValuesInVector;
if (i >= storage->m_length)
storage->m_length = i + 1;
@@ -111,7 +115,7 @@ namespace JSC {
void uncheckedSetIndex(unsigned i, JSValue v)
{
ASSERT(canSetIndex(i));
- ArrayStorage *storage = arrayStorage();
+ ArrayStorage *storage = m_storage;
#if CHECK_ARRAY_CONSISTENCY
ASSERT(storage->m_inCompactInitialization);
#endif
@@ -139,16 +143,6 @@ namespace JSC {
void* subclassData() const;
void setSubclassData(void*);
- inline ArrayStorage *arrayStorage() const
- {
- return reinterpret_cast<ArrayStorage*>(reinterpret_cast<char*>(m_vector) - (sizeof(ArrayStorage) - sizeof(JSValue)));
- }
-
- inline void setArrayStorage(ArrayStorage *storage)
- {
- m_vector = &storage->m_vector[0];
- }
-
private:
virtual const ClassInfo* classInfo() const { return &info; }
@@ -166,7 +160,7 @@ namespace JSC {
unsigned m_vectorLength; // The valid length of m_vector
int m_indexBias; // The number of JSValue sized blocks before ArrayStorage.
- JSValue* m_vector; // Copy of ArrayStorage.m_vector. Used for quick vector access and to materialize ArrayStorage ptr.
+ ArrayStorage *m_storage;
};
JSArray* asArray(JSValue);
@@ -192,7 +186,7 @@ namespace JSC {
{
JSObject::markChildrenDirect(markStack);
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned usedVectorLength = std::min(storage->m_length, m_vectorLength);
markStack.appendValues(storage->m_vector, usedVectorLength, MayContainNullValues);
diff --git a/JavaScriptCore/runtime/JSGlobalData.cpp b/JavaScriptCore/runtime/JSGlobalData.cpp
index 065cbe1..abb2db2 100644
--- a/JavaScriptCore/runtime/JSGlobalData.cpp
+++ b/JavaScriptCore/runtime/JSGlobalData.cpp
@@ -85,7 +85,7 @@ void JSGlobalData::storeVPtrs()
void* storage = &cell;
COMPILE_ASSERT(sizeof(JSArray) <= sizeof(CollectorCell), sizeof_JSArray_must_be_less_than_CollectorCell);
- JSCell* jsArray = new (storage) JSArray(JSArray::createStructure(jsNull()));
+ JSCell* jsArray = new (storage) JSArray(JSArray::VPtrStealingHack);
JSGlobalData::jsArrayVPtr = jsArray->vptr();
jsArray->~JSCell();
diff --git a/JavaScriptCore/runtime/JSGlobalData.h b/JavaScriptCore/runtime/JSGlobalData.h
index 63f9ad8..1928f77 100644
--- a/JavaScriptCore/runtime/JSGlobalData.h
+++ b/JavaScriptCore/runtime/JSGlobalData.h
@@ -220,7 +220,9 @@ namespace JSC {
RegExpCache* m_regExpCache;
+#if ENABLE(YARR)
BumpPointerAllocator m_regexAllocator;
+#endif
#ifndef NDEBUG
ThreadIdentifier exclusiveThread;
diff --git a/JavaScriptCore/runtime/JSLock.cpp b/JavaScriptCore/runtime/JSLock.cpp
index a1cffbd..10f4f3f 100644
--- a/JavaScriptCore/runtime/JSLock.cpp
+++ b/JavaScriptCore/runtime/JSLock.cpp
@@ -65,6 +65,12 @@ JSLock::JSLock(ExecState* exec)
lock(m_lockBehavior);
}
+JSLock::JSLock(JSGlobalData* globalData)
+ : m_lockBehavior(globalData->isSharedInstance() ? LockForReal : SilenceAssertionsOnly)
+{
+ lock(m_lockBehavior);
+}
+
void JSLock::lock(JSLockBehavior lockBehavior)
{
#ifdef NDEBUG
diff --git a/JavaScriptCore/runtime/JSLock.h b/JavaScriptCore/runtime/JSLock.h
index 8b015c4..05b388c 100644
--- a/JavaScriptCore/runtime/JSLock.h
+++ b/JavaScriptCore/runtime/JSLock.h
@@ -49,12 +49,14 @@ namespace JSC {
// assertions working, so that clients that use the shared context don't break.
class ExecState;
+ class JSGlobalData;
enum JSLockBehavior { SilenceAssertionsOnly, LockForReal };
class JSLock : public Noncopyable {
public:
JSLock(ExecState*);
+ JSLock(JSGlobalData*);
JSLock(JSLockBehavior lockBehavior)
: m_lockBehavior(lockBehavior)
diff --git a/JavaScriptCore/runtime/UStringImpl.h b/JavaScriptCore/runtime/UStringImpl.h
index 08f1fa5..6401d3b 100644
--- a/JavaScriptCore/runtime/UStringImpl.h
+++ b/JavaScriptCore/runtime/UStringImpl.h
@@ -25,6 +25,6 @@
// FIXME: Remove this redundant name!
#include <wtf/text/StringImpl.h>
-namespace JSC { typedef WebCore::StringImpl UStringImpl; }
+namespace JSC { typedef StringImpl UStringImpl; }
#endif
diff --git a/JavaScriptCore/runtime/WeakGCPtr.h b/JavaScriptCore/runtime/WeakGCPtr.h
index 9dce858..ac77cf3 100644
--- a/JavaScriptCore/runtime/WeakGCPtr.h
+++ b/JavaScriptCore/runtime/WeakGCPtr.h
@@ -27,6 +27,7 @@
#define WeakGCPtr_h
#include "Collector.h"
+#include "GCHandle.h"
#include <wtf/Noncopyable.h>
namespace JSC {
@@ -34,23 +35,34 @@ namespace JSC {
// A smart pointer whose get() function returns 0 for cells awaiting destruction.
template <typename T> class WeakGCPtr : Noncopyable {
public:
- WeakGCPtr() : m_ptr(0) { }
+ WeakGCPtr()
+ : m_ptr(0)
+ {
+ }
+
WeakGCPtr(T* ptr) { assign(ptr); }
+ ~WeakGCPtr()
+ {
+ if (m_ptr)
+ m_ptr->pool()->free(m_ptr);
+ }
+
T* get() const
{
- if (!m_ptr || !Heap::isCellMarked(m_ptr))
- return 0;
- return m_ptr;
+ if (m_ptr && m_ptr->isValidPtr())
+ return static_cast<T*>(m_ptr->get());
+ return 0;
}
- bool clear(JSCell* ptr)
+ bool clear(JSCell* p)
{
- if (ptr == m_ptr) {
- m_ptr = 0;
- return true;
- }
- return false;
+ if (!m_ptr || m_ptr->get() != p)
+ return false;
+
+ m_ptr->pool()->free(m_ptr);
+ m_ptr = 0;
+ return true;
}
T& operator*() const { return *get(); }
@@ -62,7 +74,7 @@ public:
#if COMPILER(WINSCW)
operator bool() const { return m_ptr; }
#else
- typedef T* WeakGCPtr::*UnspecifiedBoolType;
+ typedef WeakGCHandle* WeakGCPtr::*UnspecifiedBoolType;
operator UnspecifiedBoolType() const { return get() ? &WeakGCPtr::m_ptr : 0; }
#endif
@@ -73,14 +85,16 @@ public:
#endif
private:
- void assign(T* ptr)
+ void assign(JSCell* ptr)
{
ASSERT(ptr);
- Heap::markCell(ptr);
- m_ptr = ptr;
+ if (m_ptr)
+ m_ptr->set(ptr);
+ else
+ m_ptr = Heap::heap(ptr)->addWeakGCHandle(ptr);
}
- T* m_ptr;
+ WeakGCHandle* m_ptr;
};
template <typename T> inline WeakGCPtr<T>& WeakGCPtr<T>::operator=(T* optr)
@@ -129,7 +143,7 @@ template <typename T, typename U> inline WeakGCPtr<T> const_pointer_cast(const W
return WeakGCPtr<T>(const_cast<T*>(p.get()));
}
-template <typename T> inline T* getPtr(const WeakGCPtr<T>& p)
+template <typename T> inline T* get(const WeakGCPtr<T>& p)
{
return p.get();
}
diff --git a/JavaScriptCore/wscript b/JavaScriptCore/wscript
index 4f9d868..8f62349 100644
--- a/JavaScriptCore/wscript
+++ b/JavaScriptCore/wscript
@@ -29,7 +29,7 @@ import commands
from settings import *
-jscore_excludes = ['jsc.cpp', 'ucptable.cpp','ProfilerServer.mm']
+jscore_excludes = ['jsc.cpp', 'ucptable.cpp','ProfilerServer.mm', 'ExecutableAllocatorPosix.cpp']
jscore_excludes.extend(get_excludes(jscore_dir, ['*Brew.cpp', '*CF.cpp', '*Symbian.cpp']))
sources = []
@@ -37,7 +37,7 @@ sources = []
jscore_excludes.extend(get_excludes(jscore_dir, ['*None.cpp']))
if building_on_win32:
- jscore_excludes += ['ExecutableAllocatorPosix.cpp', 'MarkStackPosix.cpp', 'ThreadingPthreads.cpp']
+ jscore_excludes += ['MarkStackPosix.cpp', 'ThreadingPthreads.cpp']
sources += ['runtime/MarkStackWin.cpp']
else:
jscore_excludes.append('JSStringRefBSTR.cpp')
diff --git a/JavaScriptCore/wtf/Bitmap.h b/JavaScriptCore/wtf/Bitmap.h
new file mode 100644
index 0000000..4dd88f6
--- /dev/null
+++ b/JavaScriptCore/wtf/Bitmap.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+#ifndef Bitmap_h
+#define Bitmap_h
+
+#include "FixedArray.h"
+#include "StdLibExtras.h"
+
+#include <stdint.h>
+
+namespace WTF {
+
+template<size_t size>
+class Bitmap {
+private:
+ typedef uint32_t WordType;
+
+public:
+ Bitmap();
+
+ bool get(size_t) const;
+ void set(size_t);
+ void clear(size_t);
+ void clearAll();
+ void advanceToNextFreeBit(size_t&) const;
+ size_t count(size_t = 0) const;
+ size_t isEmpty() const;
+ size_t isFull() const;
+
+private:
+ static const WordType wordSize = sizeof(WordType) * 8;
+ static const WordType words = (size + wordSize - 1) / wordSize;
+
+ // the literal '1' is of type signed int. We want to use an unsigned
+ // version of the correct size when doing the calculations because if
+ // WordType is larger than int, '1 << 31' will first be sign extended
+ // and then casted to unsigned, meaning that set(31) when WordType is
+ // a 64 bit unsigned int would give 0xffff8000
+ static const WordType one = 1;
+
+ FixedArray<WordType, words> bits;
+};
+
+template<size_t size>
+inline Bitmap<size>::Bitmap()
+{
+ clearAll();
+}
+
+template<size_t size>
+inline bool Bitmap<size>::get(size_t n) const
+{
+ return !!(bits[n / wordSize] & (one << (n % wordSize)));
+}
+
+template<size_t size>
+inline void Bitmap<size>::set(size_t n)
+{
+ bits[n / wordSize] |= (one << (n % wordSize));
+}
+
+template<size_t size>
+inline void Bitmap<size>::clear(size_t n)
+{
+ bits[n / wordSize] &= ~(one << (n % wordSize));
+}
+
+template<size_t size>
+inline void Bitmap<size>::clearAll()
+{
+ memset(bits.data(), 0, sizeof(bits));
+}
+
+template<size_t size>
+inline void Bitmap<size>::advanceToNextFreeBit(size_t& start) const
+{
+ if (!~bits[start / wordSize])
+ start = ((start / wordSize) + 1) * wordSize;
+ else
+ ++start;
+}
+
+template<size_t size>
+inline size_t Bitmap<size>::count(size_t start) const
+{
+ size_t result = 0;
+ for ( ; (start % wordSize); ++start) {
+ if (get(start))
+ ++result;
+ }
+ for (size_t i = start / wordSize; i < words; ++i)
+ result += WTF::bitCount(bits[i]);
+ return result;
+}
+
+template<size_t size>
+inline size_t Bitmap<size>::isEmpty() const
+{
+ for (size_t i = 0; i < words; ++i)
+ if (bits[i])
+ return false;
+ return true;
+}
+
+template<size_t size>
+inline size_t Bitmap<size>::isFull() const
+{
+ for (size_t i = 0; i < words; ++i)
+ if (~bits[i])
+ return false;
+ return true;
+}
+
+}
+#endif
diff --git a/JavaScriptCore/wtf/FastMalloc.cpp b/JavaScriptCore/wtf/FastMalloc.cpp
index 9dfbc6b..c440417 100644
--- a/JavaScriptCore/wtf/FastMalloc.cpp
+++ b/JavaScriptCore/wtf/FastMalloc.cpp
@@ -4454,10 +4454,10 @@ extern "C" {
malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
&FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
-#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !OS(IPHONE_OS)
+#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD)
, 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
#endif
-#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_SNOW_LEOPARD) && !OS(IPHONE_OS)
+#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_SNOW_LEOPARD)
, 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher.
#endif
diff --git a/JavaScriptCore/wtf/Forward.h b/JavaScriptCore/wtf/Forward.h
index 448de7d..a2cc75b 100644
--- a/JavaScriptCore/wtf/Forward.h
+++ b/JavaScriptCore/wtf/Forward.h
@@ -31,6 +31,12 @@ namespace WTF {
template<typename T> class PassRefPtr;
template<typename T> class RefPtr;
template<typename T, size_t inlineCapacity> class Vector;
+
+ class AtomicString;
+ class AtomicStringImpl;
+ class String;
+ class StringBuffer;
+ class StringImpl;
}
using WTF::ListRefPtr;
@@ -41,4 +47,10 @@ using WTF::PassRefPtr;
using WTF::RefPtr;
using WTF::Vector;
+using WTF::AtomicString;
+using WTF::AtomicStringImpl;
+using WTF::String;
+using WTF::StringBuffer;
+using WTF::StringImpl;
+
#endif // WTF_Forward_h
diff --git a/JavaScriptCore/wtf/PageAllocation.cpp b/JavaScriptCore/wtf/PageAllocation.cpp
index 4cf2ea9..f3fe997 100644
--- a/JavaScriptCore/wtf/PageAllocation.cpp
+++ b/JavaScriptCore/wtf/PageAllocation.cpp
@@ -26,206 +26,23 @@
#include "config.h"
#include "PageAllocation.h"
-
-#if HAVE(ERRNO_H)
-#include <errno.h>
-#endif
-
-#if HAVE(MMAP)
-#include <sys/mman.h>
-#include <unistd.h>
-#endif
-
-#if OS(WINDOWS)
-#include "windows.h"
-#endif
-
-#if OS(SYMBIAN)
-#include <e32hal.h>
-#endif
+#include "PageReservation.h"
namespace WTF {
-#if HAVE(MMAP)
+size_t PageAllocation::s_pageSize = 0;
-bool PageAllocation::commit(void* start, size_t size, bool, bool) const
-{
-#if HAVE(MADV_FREE_REUSE)
- while (madvise(start, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
-#else
- UNUSED_PARAM(start);
- UNUSED_PARAM(size);
-#endif
- return true;
-}
+#ifndef NDEBUG
-void PageAllocation::decommit(void* start, size_t size) const
+int PageAllocation::lastError()
{
-#if HAVE(MADV_FREE_REUSE)
- while (madvise(start, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
-#elif HAVE(MADV_FREE)
- while (madvise(start, size, MADV_FREE) == -1 && errno == EAGAIN) { }
-#elif HAVE(MADV_DONTNEED)
- while (madvise(start, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+#if OS(WINCE)
+ return GetLastError();
#else
- UNUSED_PARAM(start);
- UNUSED_PARAM(size);
+ return errno;
#endif
}
-PageAllocation PageAllocation::allocate(size_t size, Usage usage, bool writable, bool executable)
-{
- return allocateAt(0, false, size, usage, writable, executable);
-}
-
-PageAllocation PageAllocation::reserve(size_t size, Usage usage, bool writable, bool executable)
-{
- return reserveAt(0, false, size, usage, writable, executable);
-}
-
-PageAllocation PageAllocation::allocateAt(void* address, bool fixed, size_t size, Usage usage, bool writable, bool executable)
-{
- int flags = MAP_PRIVATE | MAP_ANON;
- if (fixed)
- flags |= MAP_FIXED;
-
- int protection = PROT_READ;
- if (writable)
- protection |= PROT_WRITE;
- if (executable)
- protection |= PROT_EXEC;
-
- void* base = mmap(address, size, protection, flags, usage, 0);
- if (base == MAP_FAILED)
- base = 0;
-
- return PageAllocation(base, size);
-}
-
-PageAllocation PageAllocation::reserveAt(void* address, bool fixed, size_t size, Usage usage, bool writable, bool executable)
-{
- PageAllocation result = allocateAt(address, fixed, size, usage, writable, executable);
- if (!!result)
- result.decommit(result.base(), size);
- return result;
-}
-
-void PageAllocation::deallocate()
-{
- int result = munmap(m_base, m_size);
- ASSERT_UNUSED(result, !result);
- m_base = 0;
-}
-
-size_t PageAllocation::pagesize()
-{
- static size_t size = 0;
- if (!size)
- size = getpagesize();
- return size;
-}
-
-#elif HAVE(VIRTUALALLOC)
-
-static DWORD protection(bool writable, bool executable)
-{
- if (executable)
- return writable ?PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
- return writable ?PAGE_READWRITE : PAGE_READONLY;
-}
-
-bool PageAllocation::commit(void* start, size_t size, bool writable, bool executable) const
-{
- return VirtualAlloc(start, size, MEM_COMMIT, protection(writable, executable)) == start;
-}
-
-void PageAllocation::decommit(void* start, size_t size) const
-{
- VirtualFree(start, size, MEM_DECOMMIT);
-}
-
-PageAllocation PageAllocation::allocate(size_t size, Usage, bool writable, bool executable)
-{
- return PageAllocation(VirtualAlloc(0, size, MEM_COMMIT | MEM_RESERVE, protection(writable, executable)), size);
-}
-
-PageAllocation PageAllocation::reserve(size_t size, Usage usage, bool writable, bool executable)
-{
- return PageAllocation(VirtualAlloc(0, size, MEM_RESERVE, protection(writable, executable)), size);
-}
-
-void PageAllocation::deallocate()
-{
- VirtualFree(m_base, 0, MEM_RELEASE);
- m_base = 0;
-}
-
-size_t PageAllocation::pagesize()
-{
- static size_t size = 0;
- if (!size) {
- SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- size = system_info.dwPageSize;
- }
- return size;
-}
-
-#elif OS(SYMBIAN)
-
-bool PageAllocation::commit(void* start, size_t size, bool writable, bool executable) const
-{
- if (m_chunk) {
- intptr_t offset = reinterpret_cast<intptr_t>(base()) - reinterpret_cast<intptr_t>(start);
- m_chunk->Commit(offset, size);
- }
- return true;
-}
-
-void PageAllocation::decommit(void* start, size_t size) const
-{
- if (m_chunk) {
- intptr_t offset = reinterpret_cast<intptr_t>(base()) - reinterpret_cast<intptr_t>(start);
- m_chunk->Decommit(offset, size);
- }
-}
-
-PageAllocation PageAllocation::allocate(size_t size, Usage usage, bool writable, bool executable)
-{
- if (!executable)
- return PageAllocation(fastMalloc(size), size, 0);
- RChunk* rchunk = new RChunk();
- TInt errorCode = rchunk->CreateLocalCode(size, size);
- return PageAllocation(rchunk->Base(), size, rchunk);
-}
-
-PageAllocation PageAllocation::reserve(size_t size, Usage usage, bool writable, bool executable)
-{
- if (!executable)
- return PageAllocation(fastMalloc(size), size, 0);
- RChunk* rchunk = new RChunk();
- TInt errorCode = rchunk->CreateLocalCode(0, size);
- return PageAllocation(rchunk->Base(), size, rchunk);
-}
-
-void PageAllocation::deallocate()
-{
- if (m_chunk) {
- m_chunk->Close();
- delete m_chunk;
- } else
- fastFree(m_base);
- m_base = 0;
-}
-
-size_t PageAllocation::pagesize()
-{
- static TInt page_size = 0;
- if (!page_size)
- UserHal::PageSizeInBytes(page_size);
- return page_size;
-}
-
#endif
}
diff --git a/JavaScriptCore/wtf/PageAllocation.h b/JavaScriptCore/wtf/PageAllocation.h
index b846482..26d53a5 100644
--- a/JavaScriptCore/wtf/PageAllocation.h
+++ b/JavaScriptCore/wtf/PageAllocation.h
@@ -26,21 +26,63 @@
#ifndef PageAllocation_h
#define PageAllocation_h
+#include <wtf/Assertions.h>
#include <wtf/UnusedParam.h>
#include <wtf/VMTags.h>
+#if OS(DARWIN)
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#endif
+
+#if OS(HAIKU)
+#include <OS.h>
+#endif
+
+#if OS(WINDOWS)
+#include <malloc.h>
+#include <windows.h>
+#endif
+
#if OS(SYMBIAN)
+#include <e32hal.h>
#include <e32std.h>
#endif
+#if HAVE(ERRNO_H)
+#include <errno.h>
+#endif
+
#if HAVE(MMAP)
-#define PAGE_ALLOCATION_ALLOCATE_AT 1
-#else
-#define PAGE_ALLOCATION_ALLOCATE_AT 0
+#include <sys/mman.h>
+#include <unistd.h>
#endif
namespace WTF {
+/*
+ PageAllocation
+
+ The PageAllocation class provides a cross-platform memory allocation interface
+ with similar capabilities to posix mmap/munmap. Memory is allocated by calling
+ PageAllocation::allocate, and deallocated by calling deallocate on the
+ PageAllocation object. The PageAllocation holds the allocation's base pointer
+ and size.
+
+ The allocate method is passed the size required (which must be a multiple of
+ the system page size, which can be accessed using PageAllocation::pageSize).
+ Callers may also optinally provide a flag indicating the usage (for use by
+ system memory usage tracking tools, where implemented), and boolean values
+ specifying the required protection (defaulting to writable, non-executable).
+
+ Where HAVE(PAGE_ALLOCATE_AT) and HAVE(PAGE_ALLOCATE_ALIGNED) are available
+ memory may also be allocated at a specified address, or with a specified
+ alignment respectively. PageAllocation::allocateAt take an address to try
+ to allocate at, and a boolean indicating whether this behaviour is strictly
+ required (if this address is unavailable, should memory at another address
+ be allocated instead). PageAllocation::allocateAligned requires that the
+ size is a power of two that is >= system page size.
+*/
class PageAllocation {
public:
enum Usage {
@@ -60,42 +102,56 @@ public:
{
}
- // Create a PageAllocation object representing a sub-region of an existing allocation;
- // deallocate should never be called on an object represnting a subregion, only on the
- // initial allocation.
- PageAllocation(void* base, size_t size, const PageAllocation& parent)
- : m_base(base)
- , m_size(size)
-#if OS(SYMBIAN)
- , m_chunk(parent.m_chunk)
-#endif
+ bool operator!() const { return !m_base; }
+ void* base() const { return m_base; }
+ size_t size() const { return m_size; }
+
+ static PageAllocation allocate(size_t size, Usage usage = UnknownUsage, bool writable = true, bool executable = false)
{
-#if defined(NDEBUG) && !OS(SYMBIAN)
- UNUSED_PARAM(parent);
-#endif
- ASSERT(base >= parent.m_base);
- ASSERT(size <= parent.m_size);
- ASSERT(static_cast<char*>(base) + size <= static_cast<char*>(parent.m_base) + parent.m_size);
+ ASSERT(isPageAligned(size));
+ return systemAllocate(size, usage, writable, executable);
}
- void* base() const { return m_base; }
- size_t size() const { return m_size; }
+#if HAVE(PAGE_ALLOCATE_AT)
+ static PageAllocation allocateAt(void* address, bool fixed, size_t size, Usage usage = UnknownUsage, bool writable = true, bool executable = false)
+ {
+ ASSERT(isPageAligned(address));
+ ASSERT(isPageAligned(size));
+ return systemAllocateAt(address, fixed, size, usage, writable, executable);
+ }
+#endif
- bool operator!() const { return !m_base; }
+#if HAVE(PAGE_ALLOCATE_ALIGNED)
+ static PageAllocation allocateAligned(size_t size, Usage usage = UnknownUsage)
+ {
+ ASSERT(isPageAligned(size));
+ ASSERT(isPowerOfTwo(size));
+ return systemAllocateAligned(size, usage);
+ }
+#endif
+
+ void deallocate()
+ {
+ ASSERT(m_base);
+ systemDeallocate(true);
+ }
- bool commit(void*, size_t, bool writable = true, bool executable = false) const;
- void decommit(void*, size_t) const;
- void deallocate();
+ static size_t pageSize()
+ {
+ if (!s_pageSize)
+ s_pageSize = systemPageSize();
+ ASSERT(isPowerOfTwo(s_pageSize));
+ return s_pageSize;
+ }
- static PageAllocation allocate(size_t, Usage = UnknownUsage, bool writable = true, bool executable = false);
- static PageAllocation reserve(size_t, Usage = UnknownUsage, bool writable = true, bool executable = false);
-#if PAGE_ALLOCATION_ALLOCATE_AT
- static PageAllocation allocateAt(void* address, bool fixed, size_t, Usage = UnknownUsage, bool writable = true, bool executable = false);
- static PageAllocation reserveAt(void* address, bool fixed, size_t, Usage = UnknownUsage, bool writable = true, bool executable = false);
+#ifndef NDEBUG
+ static bool isPageAligned(void* address) { return !(reinterpret_cast<intptr_t>(address) & (pageSize() - 1)); }
+ static bool isPageAligned(size_t size) { return !(size & (pageSize() - 1)); }
+ static bool isPowerOfTwo(size_t size) { return !(size & (size - 1)); }
+ static int lastError();
#endif
- static size_t pagesize();
-private:
+protected:
#if OS(SYMBIAN)
PageAllocation(void* base, size_t size, RChunk* chunk)
: m_base(base)
@@ -111,13 +167,193 @@ private:
}
#endif
+ static PageAllocation systemAllocate(size_t, Usage, bool, bool);
+#if HAVE(PAGE_ALLOCATE_AT)
+ static PageAllocation systemAllocateAt(void*, bool, size_t, Usage, bool, bool);
+#endif
+#if HAVE(PAGE_ALLOCATE_ALIGNED)
+ static PageAllocation systemAllocateAligned(size_t, Usage);
+#endif
+ // systemDeallocate takes a parameter indicating whether memory is currently committed
+ // (this should always be true for PageAllocation, false for PageReservation).
+ void systemDeallocate(bool committed);
+ static size_t systemPageSize();
+
void* m_base;
size_t m_size;
#if OS(SYMBIAN)
RChunk* m_chunk;
#endif
+
+ static JS_EXPORTDATA size_t s_pageSize;
};
+
+#if HAVE(MMAP)
+
+
+inline PageAllocation PageAllocation::systemAllocate(size_t size, Usage usage, bool writable, bool executable)
+{
+ return systemAllocateAt(0, false, size, usage, writable, executable);
+}
+
+inline PageAllocation PageAllocation::systemAllocateAt(void* address, bool fixed, size_t size, Usage usage, bool writable, bool executable)
+{
+ int protection = PROT_READ;
+ if (writable)
+ protection |= PROT_WRITE;
+ if (executable)
+ protection |= PROT_EXEC;
+
+ int flags = MAP_PRIVATE | MAP_ANON;
+ if (fixed)
+ flags |= MAP_FIXED;
+
+#if OS(DARWIN) && !defined(BUILDING_ON_TIGER)
+ int fd = usage;
+#else
+ int fd = -1;
+#endif
+
+ void* base = mmap(address, size, protection, flags, fd, 0);
+ if (base == MAP_FAILED)
+ base = 0;
+ return PageAllocation(base, size);
+}
+
+inline PageAllocation PageAllocation::systemAllocateAligned(size_t size, Usage usage)
+{
+#if OS(DARWIN)
+ vm_address_t address = 0;
+ int flags = VM_FLAGS_ANYWHERE;
+ if (usage != -1)
+ flags |= usage;
+ vm_map(current_task(), &address, size, (size - 1), flags, MEMORY_OBJECT_NULL, 0, FALSE, PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE | PROT_EXEC, VM_INHERIT_DEFAULT);
+ return PageAllocation(reinterpret_cast<void*>(address), size);
+#elif HAVE(POSIX_MEMALIGN)
+ void* address;
+ posix_memalign(&address, size, size);
+ return PageAllocation(address, size);
+#else
+ size_t extra = size - pageSize();
+
+ // Check for overflow.
+ if ((size + extra) < size)
+ return PageAllocation(0, size);
+
+#if OS(DARWIN) && !defined(BUILDING_ON_TIGER)
+ int fd = usage;
+#else
+ int fd = -1;
+#endif
+ void* mmapResult = mmap(0, size + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, fd, 0);
+ if (mmapResult == MAP_FAILED)
+ return PageAllocation(0, size);
+ uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
+
+ size_t adjust = 0;
+ if ((address & (size - 1)))
+ adjust = size - (address & (size - 1));
+ if (adjust > 0)
+ munmap(reinterpret_cast<char*>(address), adjust);
+ if (adjust < extra)
+ munmap(reinterpret_cast<char*>(address + adjust + size), extra - adjust);
+ address += adjust;
+
+ return PageAllocation(reinterpret_cast<void*>(address), size);
+#endif
+}
+
+inline void PageAllocation::systemDeallocate(bool)
+{
+ int result = munmap(m_base, m_size);
+ ASSERT_UNUSED(result, !result);
+ m_base = 0;
+}
+
+inline size_t PageAllocation::systemPageSize()
+{
+ return getpagesize();
+}
+
+
+#elif HAVE(VIRTUALALLOC)
+
+
+inline PageAllocation PageAllocation::systemAllocate(size_t size, Usage, bool writable, bool executable)
+{
+ DWORD protection = executable ?
+ (writable ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ) :
+ (writable ? PAGE_READWRITE : PAGE_READONLY);
+ return PageAllocation(VirtualAlloc(0, size, MEM_COMMIT | MEM_RESERVE, protection), size);
+}
+
+#if HAVE(ALIGNED_MALLOC)
+inline PageAllocation PageAllocation::systemAllocateAligned(size_t size, Usage usage)
+{
+#if COMPILER(MINGW) && !COMPILER(MINGW64)
+ void* address = __mingw_aligned_malloc(size, size);
+#else
+ void* address = _aligned_malloc(size, size);
+#endif
+ memset(address, 0, size);
+ return PageAllocation(address, size);
+}
+#endif
+
+inline void PageAllocation::systemDeallocate(bool committed)
+{
+#if OS(WINCE)
+ if (committed)
+ VirtualFree(m_base, m_size, MEM_DECOMMIT);
+#else
+ UNUSED_PARAM(committed);
+#endif
+ VirtualFree(m_base, 0, MEM_RELEASE);
+ m_base = 0;
+}
+
+inline size_t PageAllocation::systemPageSize()
+{
+ static size_t size = 0;
+ SYSTEM_INFO system_info;
+ GetSystemInfo(&system_info);
+ size = system_info.dwPageSize;
+ return size;
+}
+
+
+#elif OS(SYMBIAN)
+
+
+inline PageAllocation PageAllocation::systemAllocate(size_t size, Usage usage, bool writable, bool executable)
+{
+ RChunk* rchunk = new RChunk();
+ if (executable)
+ rchunk->CreateLocalCode(size, size);
+ else
+ rchunk->CreateLocal(size, size);
+ return PageAllocation(rchunk->Base(), size, rchunk);
+}
+
+inline void PageAllocation::systemDeallocate(bool)
+{
+ m_chunk->Close();
+ delete m_chunk;
+ m_base = 0;
+}
+
+inline size_t PageAllocation::systemPageSize()
+{
+ static TInt page_size = 0;
+ UserHal::PageSizeInBytes(page_size);
+ return page_size;
+}
+
+
+#endif
+
+
}
using WTF::PageAllocation;
diff --git a/JavaScriptCore/wtf/PageReservation.h b/JavaScriptCore/wtf/PageReservation.h
new file mode 100644
index 0000000..906b5a4
--- /dev/null
+++ b/JavaScriptCore/wtf/PageReservation.h
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PageReservation_h
+#define PageReservation_h
+
+#include <wtf/PageAllocation.h>
+
+namespace WTF {
+
+/*
+ PageReservation
+
+ Like PageAllocation, the PageReservation class provides a cross-platform memory
+ allocation interface, but with a set of capabilities more similar to that of
+ VirtualAlloc than posix mmap. PageReservation can be used to allocate virtual
+ memory without committing physical memory pages using PageReservation::reserve.
+ Following a call to reserve all memory in the region is in a decommited state,
+ in which the memory should not be used (accessing the memory may cause a fault).
+
+ Before using memory it must be committed by calling commit, which is passed start
+ and size values (both of which require system page size granularity). One the
+ committed memory is no longer needed 'decommit' may be called to return the
+ memory to its devommitted state. Commit should only be called on memory that is
+ currently decommitted, and decommit should only be called on memory regions that
+ are currently committed. All memory should be decommited before the reservation
+ is deallocated. Values in memory may not be retained accross a pair of calls if
+ the region of memory is decommitted and then committed again.
+
+ Where HAVE(PAGE_ALLOCATE_AT) is available a PageReservation::reserveAt method
+ also exists, with behaviour mirroring PageAllocation::allocateAt.
+
+ Memory protection should not be changed on decommitted memory, and if protection
+ is changed on memory while it is committed it should be returned to the orignal
+ protection before decommit is called.
+
+ Note: Inherits from PageAllocation privately to prevent clients accidentally
+ calling PageAllocation::deallocate on a PageReservation.
+*/
+class PageReservation : private PageAllocation {
+public:
+ PageReservation()
+ {
+ }
+
+ using PageAllocation::operator!;
+ using PageAllocation::base;
+ using PageAllocation::size;
+
+ bool commit(void* start, size_t size)
+ {
+ ASSERT(m_base);
+ ASSERT(isPageAligned(start));
+ ASSERT(isPageAligned(size));
+
+ bool commited = systemCommit(start, size);
+#ifndef NDEBUG
+ if (commited)
+ m_committed += size;
+#endif
+ return commited;
+ }
+ void decommit(void* start, size_t size)
+ {
+ ASSERT(m_base);
+ ASSERT(isPageAligned(start));
+ ASSERT(isPageAligned(size));
+
+#ifndef NDEBUG
+ m_committed -= size;
+#endif
+ systemDecommit(start, size);
+ }
+
+ static PageReservation reserve(size_t size, Usage usage = UnknownUsage, bool writable = true, bool executable = false)
+ {
+ ASSERT(isPageAligned(size));
+ return systemReserve(size, usage, writable, executable);
+ }
+
+#if HAVE(PAGE_ALLOCATE_AT)
+ static PageReservation reserveAt(void* address, bool fixed, size_t size, Usage usage = UnknownUsage, bool writable = true, bool executable = false)
+ {
+ ASSERT(isPageAligned(address));
+ ASSERT(isPageAligned(size));
+ return systemReserveAt(address, fixed, size, usage, writable, executable);
+ }
+#endif
+
+ void deallocate()
+ {
+ ASSERT(m_base);
+ ASSERT(!m_committed);
+ systemDeallocate(false);
+ }
+
+#ifndef NDEBUG
+ using PageAllocation::lastError;
+#endif
+
+private:
+#if OS(SYMBIAN)
+ PageReservation(void* base, size_t size, RChunk* chunk)
+ : PageAllocation(base, size, chunk)
+#else
+ PageReservation(void* base, size_t size)
+ : PageAllocation(base, size)
+#endif
+#ifndef NDEBUG
+ , m_committed(0)
+#endif
+ {
+ }
+
+ bool systemCommit(void*, size_t);
+ void systemDecommit(void*, size_t);
+ static PageReservation systemReserve(size_t, Usage, bool, bool);
+#if HAVE(PAGE_ALLOCATE_AT)
+ static PageReservation systemReserveAt(void*, bool, size_t, Usage, bool, bool);
+#endif
+
+#if HAVE(VIRTUALALLOC)
+ DWORD m_protection;
+#endif
+#ifndef NDEBUG
+ size_t m_committed;
+#endif
+};
+
+
+#if HAVE(MMAP)
+
+
+inline bool PageReservation::systemCommit(void* start, size_t size)
+{
+#if HAVE(MADV_FREE_REUSE)
+ while (madvise(start, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+#else
+ UNUSED_PARAM(start);
+ UNUSED_PARAM(size);
+#endif
+ return true;
+}
+
+inline void PageReservation::systemDecommit(void* start, size_t size)
+{
+#if HAVE(MADV_FREE_REUSE)
+ while (madvise(start, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+#elif HAVE(MADV_FREE)
+ while (madvise(start, size, MADV_FREE) == -1 && errno == EAGAIN) { }
+#elif HAVE(MADV_DONTNEED)
+ while (madvise(start, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+#else
+ UNUSED_PARAM(start);
+ UNUSED_PARAM(size);
+#endif
+}
+
+inline PageReservation PageReservation::systemReserve(size_t size, Usage usage, bool writable, bool executable)
+{
+ return systemReserveAt(0, false, size, usage, writable, executable);
+}
+
+inline PageReservation PageReservation::systemReserveAt(void* address, bool fixed, size_t size, Usage usage, bool writable, bool executable)
+{
+ void* base = systemAllocateAt(address, fixed, size, usage, writable, executable).base();
+#if HAVE(MADV_FREE_REUSE)
+ // When using MADV_FREE_REUSE we keep all decommitted memory marked as REUSABLE.
+ // We call REUSE on commit, and REUSABLE on decommit.
+ if (base)
+ while (madvise(base, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+#endif
+ return PageReservation(base, size);
+}
+
+
+#elif HAVE(VIRTUALALLOC)
+
+
+inline bool PageReservation::systemCommit(void* start, size_t size)
+{
+ return VirtualAlloc(start, size, MEM_COMMIT, m_protection) == start;
+}
+
+inline void PageReservation::systemDecommit(void* start, size_t size)
+{
+ VirtualFree(start, size, MEM_DECOMMIT);
+}
+
+inline PageReservation PageReservation::systemReserve(size_t size, Usage usage, bool writable, bool executable)
+{
+ // Record the protection for use during commit.
+ DWORD protection = executable ?
+ (writable ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ) :
+ (writable ? PAGE_READWRITE : PAGE_READONLY);
+ PageReservation reservation(VirtualAlloc(0, size, MEM_RESERVE, protection), size);
+ reservation.m_protection = protection;
+ return reservation;
+}
+
+
+#elif OS(SYMBIAN)
+
+
+inline bool PageReservation::systemCommit(void* start, size_t size)
+{
+ intptr_t offset = reinterpret_cast<intptr_t>(m_base) - reinterpret_cast<intptr_t>(start);
+ m_chunk->Commit(offset, size);
+ return true;
+}
+
+inline void PageReservation::systemDecommit(void* start, size_t size)
+{
+ intptr_t offset = reinterpret_cast<intptr_t>(m_base) - reinterpret_cast<intptr_t>(start);
+ m_chunk->Decommit(offset, size);
+}
+
+inline PageReservation PageReservation::systemReserve(size_t size, Usage usage, bool writable, bool executable)
+{
+ RChunk* rchunk = new RChunk();
+ if (executable)
+ rchunk->CreateLocalCode(0, size);
+ else
+ rchunk->CreateDisconnectedLocal(0, 0, size);
+ return PageReservation(rchunk->Base(), size, rchunk);
+}
+
+
+#endif
+
+
+}
+
+using WTF::PageReservation;
+
+#endif // PageReservation_h
diff --git a/JavaScriptCore/wtf/Platform.h b/JavaScriptCore/wtf/Platform.h
index eca4248..95eb67f 100644
--- a/JavaScriptCore/wtf/Platform.h
+++ b/JavaScriptCore/wtf/Platform.h
@@ -142,6 +142,8 @@
#define WTF_MIPS_ARCH_REV __mips_isa_rev
#define WTF_MIPS_ISA_REV(v) (defined WTF_MIPS_ARCH_REV && WTF_MIPS_ARCH_REV == v)
#define WTF_MIPS_DOUBLE_FLOAT (defined __mips_hard_float && !defined __mips_single_float)
+/* MIPS requires allocators to use aligned memory */
+#define WTF_USE_ARENA_ALLOC_ALIGNMENT_INTEGER 1
#endif /* MIPS */
/* CPU(PPC) - PowerPC 32-bit */
@@ -368,12 +370,12 @@
#endif
-/* OS(IPHONE_OS) - iPhone OS */
-/* OS(MAC_OS_X) - Mac OS X (not including iPhone OS) */
+/* OS(IOS) - iOS */
+/* OS(MAC_OS_X) - Mac OS X (not including iOS) */
#if OS(DARWIN) && ((defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED) \
|| (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE) \
|| (defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR))
-#define WTF_OS_IPHONE_OS 1
+#define WTF_OS_IOS 1
#elif OS(DARWIN) && defined(TARGET_OS_MAC) && TARGET_OS_MAC
#define WTF_OS_MAC_OS_X 1
#endif
@@ -481,22 +483,22 @@
#define WTF_PLATFORM_WIN 1
#endif
-/* PLATFORM(IPHONE) */
+/* PLATFORM(IOS) */
/* FIXME: this is sometimes used as an OS switch and sometimes for higher-level things */
#if (defined(TARGET_OS_EMBEDDED) && TARGET_OS_EMBEDDED) || (defined(TARGET_OS_IPHONE) && TARGET_OS_IPHONE)
-#define WTF_PLATFORM_IPHONE 1
+#define WTF_PLATFORM_IOS 1
#endif
-/* PLATFORM(IPHONE_SIMULATOR) */
+/* PLATFORM(IOS_SIMULATOR) */
#if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR
-#define WTF_PLATFORM_IPHONE 1
-#define WTF_PLATFORM_IPHONE_SIMULATOR 1
+#define WTF_PLATFORM_IOS 1
+#define WTF_PLATFORM_IOS_SIMULATOR 1
#else
-#define WTF_PLATFORM_IPHONE_SIMULATOR 0
+#define WTF_PLATFORM_IOS_SIMULATOR 0
#endif
-#if !defined(WTF_PLATFORM_IPHONE)
-#define WTF_PLATFORM_IPHONE 0
+#if !defined(WTF_PLATFORM_IOS)
+#define WTF_PLATFORM_IOS 0
#endif
/* PLATFORM(ANDROID) */
@@ -509,10 +511,10 @@
/* Graphics engines */
/* PLATFORM(CG) and PLATFORM(CI) */
-#if PLATFORM(MAC) || PLATFORM(IPHONE)
+#if PLATFORM(MAC) || PLATFORM(IOS)
#define WTF_PLATFORM_CG 1
#endif
-#if PLATFORM(MAC) && !PLATFORM(IPHONE)
+#if PLATFORM(MAC) && !PLATFORM(IOS)
#define WTF_PLATFORM_CI 1
#endif
@@ -543,7 +545,7 @@
#include <ce_time.h>
#endif
-#if (PLATFORM(IPHONE) || PLATFORM(MAC) || PLATFORM(WIN) || (PLATFORM(QT) && OS(DARWIN) && !ENABLE(SINGLE_THREADED))) && !defined(ENABLE_JSC_MULTIPLE_THREADS)
+#if (PLATFORM(IOS) || PLATFORM(MAC) || PLATFORM(WIN) || (PLATFORM(QT) && OS(DARWIN) && !ENABLE(SINGLE_THREADED))) && !defined(ENABLE_JSC_MULTIPLE_THREADS)
#define ENABLE_JSC_MULTIPLE_THREADS 1
#endif
@@ -579,7 +581,7 @@
#define WTF_USE_ICU_UNICODE 1
#endif
-#if PLATFORM(MAC) && !PLATFORM(IPHONE)
+#if PLATFORM(MAC) && !PLATFORM(IOS)
#define WTF_PLATFORM_CF 1
#define WTF_USE_PTHREADS 1
#define HAVE_PTHREAD_RWLOCK 1
@@ -594,7 +596,7 @@
#endif
#define HAVE_READLINE 1
#define HAVE_RUNLOOP_TIMER 1
-#endif /* PLATFORM(MAC) && !PLATFORM(IPHONE) */
+#endif /* PLATFORM(MAC) && !PLATFORM(IOS) */
#if PLATFORM(MAC)
#define WTF_USE_CARBON_SECURE_INPUT_MODE 1
@@ -615,7 +617,7 @@
#define WTF_PLATFORM_CF 1
#endif
-#if PLATFORM(IPHONE)
+#if PLATFORM(IOS)
#define ENABLE_CONTEXT_MENUS 0
#define ENABLE_DRAG_SUPPORT 0
#define ENABLE_FTPDIR 1
@@ -649,7 +651,8 @@
#endif
#if PLATFORM(WIN)
-#define WTF_USE_WININET 1
+#define WTF_PLATFORM_CF 1
+#define WTF_USE_PTHREADS 0
#endif
#if PLATFORM(WX)
@@ -686,7 +689,7 @@
#endif
#if !defined(HAVE_ACCESSIBILITY)
-#if PLATFORM(IPHONE) || PLATFORM(MAC) || PLATFORM(WIN) || PLATFORM(GTK) || PLATFORM(CHROMIUM)
+#if PLATFORM(IOS) || PLATFORM(MAC) || PLATFORM(WIN) || PLATFORM(GTK) || PLATFORM(CHROMIUM)
#define HAVE_ACCESSIBILITY 1
#endif
#endif /* !defined(HAVE_ACCESSIBILITY) */
@@ -720,7 +723,7 @@
#define HAVE_DISPATCH_H 1
#define HAVE_HOSTED_CORE_ANIMATION 1
-#if !PLATFORM(IPHONE)
+#if !PLATFORM(IOS)
#define HAVE_MADV_FREE_REUSE 1
#define HAVE_MADV_FREE 1
#define HAVE_PTHREAD_SETNAME_NP 1
@@ -728,7 +731,7 @@
#endif
-#if PLATFORM(IPHONE)
+#if PLATFORM(IOS)
#define HAVE_MADV_FREE 1
#endif
@@ -738,6 +741,7 @@
#define HAVE_ERRNO_H 0
#else
#define HAVE_SYS_TIMEB_H 1
+#define HAVE_ALIGNED_MALLOC 1
#endif
#define HAVE_VIRTUALALLOC 1
@@ -794,6 +798,13 @@
#endif
+#if HAVE(MMAP) || (HAVE(VIRTUALALLOC) && HAVE(ALIGNED_MALLOC))
+#define HAVE_PAGE_ALLOCATE_ALIGNED 1
+#endif
+#if HAVE(MMAP)
+#define HAVE_PAGE_ALLOCATE_AT 1
+#endif
+
/* ENABLE macro defaults */
#if PLATFORM(QT)
@@ -883,7 +894,7 @@
#define ENABLE_NOTIFICATIONS 0
#endif
-#if PLATFORM(IPHONE)
+#if PLATFORM(IOS)
#define ENABLE_TEXT_CARET 0
#endif
@@ -910,9 +921,10 @@
#if (CPU(X86_64) && (OS(UNIX) || OS(WINDOWS))) \
|| (CPU(IA64) && !CPU(IA64_32)) \
|| CPU(ALPHA) \
- || CPU(SPARC64)
+ || CPU(SPARC64) \
+ || CPU(PPC64)
#define WTF_USE_JSVALUE64 1
-#elif CPU(ARM_TRADITIONAL) || CPU(PPC64) || CPU(MIPS)
+#elif CPU(MIPS) || (CPU(ARM_TRADITIONAL) && COMPILER(MSVC))
#define WTF_USE_JSVALUE32 1
#elif OS(WINDOWS) && COMPILER(MINGW)
/* Using JSVALUE32_64 causes padding/alignement issues for JITStubArg
@@ -1005,7 +1017,7 @@ on MinGW. See https://bugs.webkit.org/show_bug.cgi?id=29268 */
#endif
/* Setting this flag prevents the assembler from using RWX memory; this may improve
security but currectly comes at a significant performance cost. */
-#if PLATFORM(IPHONE)
+#if PLATFORM(IOS)
#define ENABLE_ASSEMBLER_WX_EXCLUSIVE 1
#else
#define ENABLE_ASSEMBLER_WX_EXCLUSIVE 0
@@ -1032,8 +1044,17 @@ on MinGW. See https://bugs.webkit.org/show_bug.cgi?id=29268 */
#define WTF_USE_QXMLQUERY 1
#endif
-/* Accelerated compositing */
#if PLATFORM(MAC)
+/* Complex text framework */
+#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD)
+#define WTF_USE_ATSUI 0
+#define WTF_USE_CORE_TEXT 1
+#else
+#define WTF_USE_ATSUI 1
+#define WTF_USE_CORE_TEXT 0
+#endif
+
+/* Accelerated compositing */
#if !defined(BUILDING_ON_TIGER)
#define WTF_USE_ACCELERATED_COMPOSITING 1
#endif
@@ -1043,7 +1064,7 @@ on MinGW. See https://bugs.webkit.org/show_bug.cgi?id=29268 */
#define WTF_USE_ACCELERATED_COMPOSITING 1
#endif
-#if PLATFORM(IPHONE)
+#if PLATFORM(IOS)
#define WTF_USE_ACCELERATED_COMPOSITING 1
#endif
@@ -1058,7 +1079,7 @@ on MinGW. See https://bugs.webkit.org/show_bug.cgi?id=29268 */
#endif
#endif
-#if (PLATFORM(MAC) && !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD)) || PLATFORM(IPHONE)
+#if (PLATFORM(MAC) && !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD)) || PLATFORM(IOS)
#define WTF_USE_PROTECTION_SPACE_AUTH_CALLBACK 1
#endif
@@ -1078,7 +1099,7 @@ on MinGW. See https://bugs.webkit.org/show_bug.cgi?id=29268 */
#define ENABLE_JSC_ZOMBIES 0
/* FIXME: Eventually we should enable this for all platforms and get rid of the define. */
-#if PLATFORM(MAC)
+#if PLATFORM(MAC) || PLATFORM(WIN)
#define WTF_USE_PLATFORM_STRATEGIES 1
#endif
@@ -1089,4 +1110,8 @@ on MinGW. See https://bugs.webkit.org/show_bug.cgi?id=29268 */
#define WTF_USE_PREEMPT_GEOLOCATION_PERMISSION 1
#endif
+#if CPU(ARM_THUMB2)
+#define ENABLE_BRANCH_COMPACTION 1
+#endif
+
#endif /* WTF_Platform_h */
diff --git a/JavaScriptCore/wtf/WTFThreadData.cpp b/JavaScriptCore/wtf/WTFThreadData.cpp
index 0716dc9..729b48e 100644
--- a/JavaScriptCore/wtf/WTFThreadData.cpp
+++ b/JavaScriptCore/wtf/WTFThreadData.cpp
@@ -54,4 +54,4 @@ WTFThreadData::~WTFThreadData()
#endif
}
-} // namespace WebCore
+}
diff --git a/JavaScriptCore/wtf/WTFThreadData.h b/JavaScriptCore/wtf/WTFThreadData.h
index d2c379b..c596260 100644
--- a/JavaScriptCore/wtf/WTFThreadData.h
+++ b/JavaScriptCore/wtf/WTFThreadData.h
@@ -45,15 +45,6 @@
#include <wtf/Threading.h>
#endif
-// FIXME: This is a temporary layering violation while we move more string code to WTF.
-namespace WebCore {
-class AtomicStringTable;
-class StringImpl;
-}
-using WebCore::StringImpl;
-
-typedef void (*AtomicStringTableDestructor)(WebCore::AtomicStringTable*);
-
#if USE(JSC)
// FIXME: This is a temporary layering violation while we move more string code to WTF.
namespace JSC {
@@ -82,12 +73,16 @@ private:
namespace WTF {
+class AtomicStringTable;
+
+typedef void (*AtomicStringTableDestructor)(AtomicStringTable*);
+
class WTFThreadData : public Noncopyable {
public:
WTFThreadData();
~WTFThreadData();
- WebCore::AtomicStringTable* atomicStringTable()
+ AtomicStringTable* atomicStringTable()
{
return m_atomicStringTable;
}
@@ -118,7 +113,7 @@ public:
#endif
private:
- WebCore::AtomicStringTable* m_atomicStringTable;
+ AtomicStringTable* m_atomicStringTable;
AtomicStringTableDestructor m_atomicStringTableDestructor;
#if USE(JSC)
@@ -132,7 +127,7 @@ private:
static JS_EXPORTDATA WTFThreadData* staticData;
#endif
friend WTFThreadData& wtfThreadData();
- friend class WebCore::AtomicStringTable;
+ friend class AtomicStringTable;
};
inline WTFThreadData& wtfThreadData()
diff --git a/JavaScriptCore/wtf/dtoa.cpp b/JavaScriptCore/wtf/dtoa.cpp
index 9edc2a0..2c478a0 100644
--- a/JavaScriptCore/wtf/dtoa.cpp
+++ b/JavaScriptCore/wtf/dtoa.cpp
@@ -167,6 +167,7 @@
#endif
#define INFNAN_CHECK
+#define No_Hex_NaN
#if defined(IEEE_8087) + defined(IEEE_MC68k) + defined(IEEE_ARM) != 1
Exactly one of IEEE_8087, IEEE_ARM or IEEE_MC68k should be defined.
diff --git a/JavaScriptCore/wtf/gobject/GOwnPtr.h b/JavaScriptCore/wtf/gobject/GOwnPtr.h
index 40c0bf4..731326e 100644
--- a/JavaScriptCore/wtf/gobject/GOwnPtr.h
+++ b/JavaScriptCore/wtf/gobject/GOwnPtr.h
@@ -34,7 +34,6 @@ typedef struct _GCond GCond;
typedef struct _GMutex GMutex;
typedef struct _GPatternSpec GPatternSpec;
typedef struct _GDir GDir;
-typedef struct _GHashTable GHashTable;
typedef struct _GFile GFile;
extern "C" void g_free(void*);
@@ -47,7 +46,6 @@ template<> void freeOwnedGPtr<GCond>(GCond*);
template<> void freeOwnedGPtr<GMutex>(GMutex*);
template<> void freeOwnedGPtr<GPatternSpec>(GPatternSpec*);
template<> void freeOwnedGPtr<GDir>(GDir*);
-template<> void freeOwnedGPtr<GHashTable>(GHashTable*);
template<> void freeOwnedGPtr<GFile>(GFile*);
template <typename T> class GOwnPtr : public Noncopyable {
diff --git a/JavaScriptCore/wtf/qt/StringQt.cpp b/JavaScriptCore/wtf/qt/StringQt.cpp
index b2c621a..c02505a 100644
--- a/JavaScriptCore/wtf/qt/StringQt.cpp
+++ b/JavaScriptCore/wtf/qt/StringQt.cpp
@@ -29,7 +29,7 @@
#include <QString>
-namespace WebCore {
+namespace WTF {
// String conversions
String::String(const QString& qstr)
diff --git a/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.cpp b/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.cpp
deleted file mode 100644
index 6a28e9e..0000000
--- a/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if OS(SYMBIAN)
-
-#include "BlockAllocatorSymbian.h"
-
-
-namespace WTF {
-
-/** Efficiently allocates blocks of size blockSize with blockSize alignment.
- * Primarly designed for JSC Collector's needs.
- * Not thread-safe.
- */
-AlignedBlockAllocator::AlignedBlockAllocator(TUint32 reservationSize, TUint32 blockSize )
- : m_reservation(reservationSize),
- m_blockSize(blockSize)
-{
-
- // Get system's page size value.
- SYMBIAN_PAGESIZE(m_pageSize);
-
- // We only accept multiples of system page size for both initial reservation and the alignment/block size
- m_reservation = SYMBIAN_ROUNDUPTOMULTIPLE(m_reservation, m_pageSize);
- __ASSERT_ALWAYS(SYMBIAN_ROUNDUPTOMULTIPLE(m_blockSize, m_pageSize), User::Panic(_L("AlignedBlockAllocator1"), KErrArgument));
-
- // Calculate max. bit flags we need to carve a reservationSize range into blockSize-sized blocks
- m_map.numBits = m_reservation / m_blockSize;
- const TUint32 bitsPerWord = 8*sizeof(TUint32);
- const TUint32 numWords = (m_map.numBits + bitsPerWord -1) / bitsPerWord;
-
- m_map.bits = new TUint32[numWords];
- __ASSERT_ALWAYS(m_map.bits, User::Panic(_L("AlignedBlockAllocator2"), KErrNoMemory));
- m_map.clearAll();
-
- // Open a Symbian RChunk, and reserve requested virtual address range
- // Any thread in this process can operate this rchunk due to EOwnerProcess access rights.
- TInt ret = m_chunk.CreateDisconnectedLocal(0 , 0, (TInt)m_reservation , EOwnerProcess);
- if (ret != KErrNone)
- User::Panic(_L("AlignedBlockAllocator3"), ret);
-
- // This is the offset to m_chunk.Base() required to make it m_blockSize-aligned
- m_offset = SYMBIAN_ROUNDUPTOMULTIPLE(TUint32(m_chunk.Base()), m_blockSize) - TUint(m_chunk.Base());
-
-}
-
-void* AlignedBlockAllocator::alloc()
-{
-
- TInt freeRam = 0;
- void* address = 0;
-
- // Look up first free slot in bit map
- const TInt freeIdx = m_map.findFree();
-
- // Pseudo OOM: We ate up the address space we reserved..
- // ..even though the device may have free RAM left
- if (freeIdx < 0)
- return 0;
-
- TInt ret = m_chunk.Commit(m_offset + (m_blockSize * freeIdx), m_blockSize);
- if (ret != KErrNone)
- return 0; // True OOM: Device didn't have physical RAM to spare
-
- // Updated bit to mark region as in use.
- m_map.set(freeIdx);
-
- // Calculate address of committed region (block)
- address = (void*)( (m_chunk.Base() + m_offset) + (TUint)(m_blockSize * freeIdx) );
-
- return address;
-}
-
-void AlignedBlockAllocator::free(void* block)
-{
- // Calculate index of block to be freed
- TInt idx = TUint(static_cast<TUint8*>(block) - m_chunk.Base() - m_offset) / m_blockSize;
-
- __ASSERT_DEBUG(idx >= 0 && idx < m_map.numBits, User::Panic(_L("AlignedBlockAllocator4"), KErrCorrupt)); // valid index check
- __ASSERT_DEBUG(m_map.get(idx), User::Panic(_L("AlignedBlockAllocator5"), KErrCorrupt)); // in-use flag check
-
- // Return committed region to system RAM pool (the physical RAM becomes usable by others)
- TInt ret = m_chunk.Decommit(m_offset + m_blockSize * idx, m_blockSize);
-
- // mark this available again
- m_map.clear(idx);
-}
-
-void AlignedBlockAllocator::destroy()
-{
- // release everything!
- m_chunk.Decommit(0, m_chunk.MaxSize());
- m_map.clearAll();
-}
-
-AlignedBlockAllocator::~AlignedBlockAllocator()
-{
- destroy();
- m_chunk.Close();
- delete [] m_map.bits;
-}
-
-} // end of namespace
-
-#endif // SYMBIAN
diff --git a/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.h b/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.h
deleted file mode 100644
index 21422f6..0000000
--- a/JavaScriptCore/wtf/symbian/BlockAllocatorSymbian.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies)
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef BlockAllocatorSymbian_h
-#define BlockAllocatorSymbian_h
-
-#include <e32cmn.h>
-#include <e32std.h>
-#include <hal.h>
-
-
-#define SYMBIAN_PAGESIZE(x) (HAL::Get(HALData::EMemoryPageSize, x));
-#define SYMBIAN_FREERAM(x) (HAL::Get(HALData::EMemoryRAMFree, x));
-#define SYMBIAN_ROUNDUPTOMULTIPLE(x, multipleof) ( (x + multipleof - 1) & ~(multipleof - 1) )
-
-// Set sane defaults if -D<flagname=value> wasn't provided via compiler args
-#ifndef JSCCOLLECTOR_VIRTUALMEM_RESERVATION
-#if defined(__WINS__)
- // Emulator has limited virtual address space
- #define JSCCOLLECTOR_VIRTUALMEM_RESERVATION (4*1024*1024)
-#else
- // HW has plenty of virtual addresses
- #define JSCCOLLECTOR_VIRTUALMEM_RESERVATION (128*1024*1024)
-#endif
-#endif
-
-namespace WTF {
-
-/**
- * Allocates contiguous region of size blockSize with blockSize-aligned address.
- * blockSize must be a multiple of system page size (typically 4K on Symbian/ARM)
- *
- * @param reservationSize Virtual address range to be reserved upon creation of chunk (bytes).
- * @param blockSize Size of a single allocation. Returned address will also be blockSize-aligned.
- */
-class AlignedBlockAllocator {
- public:
- AlignedBlockAllocator(TUint32 reservationSize, TUint32 blockSize);
- ~AlignedBlockAllocator();
- void destroy();
- void* alloc();
- void free(void* data);
-
- private:
- RChunk m_chunk; // Symbian chunk that lets us reserve/commit/decommit
- TUint m_offset; // offset of first committed region from base
- TInt m_pageSize; // cached value of system page size, typically 4K on Symbian
- TUint32 m_reservation;
- TUint32 m_blockSize;
-
- // Tracks comitted/decommitted state of a blockSize region
- struct {
-
- TUint32 *bits; // array of bit flags
- TUint32 numBits; // number of regions to keep track of
-
- bool get(TUint32 n) const
- {
- return !!(bits[n >> 5] & (1 << (n & 0x1F)));
- }
-
- void set(TUint32 n)
- {
- bits[n >> 5] |= (1 << (n & 0x1F));
- }
-
- void clear(TUint32 n)
- {
- bits[n >> 5] &= ~(1 << (n & 0x1F));
- }
-
- void clearAll()
- {
- for (TUint32 i = 0; i < numBits; i++)
- clear(i);
- }
-
- TInt findFree() const
- {
- for (TUint32 i = 0; i < numBits; i++) {
- if (!get(i))
- return i;
- }
- return -1;
- }
-
- } m_map;
-
-};
-
-}
-
-#endif // end of BlockAllocatorSymbian_h
-
-
diff --git a/JavaScriptCore/wtf/text/AtomicString.cpp b/JavaScriptCore/wtf/text/AtomicString.cpp
index 0547b8c..6e95292 100644
--- a/JavaScriptCore/wtf/text/AtomicString.cpp
+++ b/JavaScriptCore/wtf/text/AtomicString.cpp
@@ -27,7 +27,7 @@
#include <wtf/Threading.h>
#include <wtf/WTFThreadData.h>
-namespace WebCore {
+namespace WTF {
COMPILE_ASSERT(sizeof(AtomicString) == sizeof(String), atomic_string_and_string_must_be_same_size);
@@ -164,7 +164,7 @@ struct UCharBufferTranslator {
static bool equal(StringImpl* const& str, const UCharBuffer& buf)
{
- return WebCore::equal(str, buf.s, buf.length);
+ return WTF::equal(str, buf.s, buf.length);
}
static void translate(StringImpl*& location, const UCharBuffer& buf, unsigned hash)
@@ -190,7 +190,7 @@ struct HashAndCharactersTranslator {
static bool equal(StringImpl* const& string, const HashAndCharacters& buffer)
{
- return WebCore::equal(string, buffer.characters, buffer.length);
+ return WTF::equal(string, buffer.characters, buffer.length);
}
static void translate(StringImpl*& location, const HashAndCharacters& buffer, unsigned hash)
diff --git a/JavaScriptCore/wtf/text/AtomicString.h b/JavaScriptCore/wtf/text/AtomicString.h
index 5bb2cf9..d29981a 100644
--- a/JavaScriptCore/wtf/text/AtomicString.h
+++ b/JavaScriptCore/wtf/text/AtomicString.h
@@ -32,11 +32,13 @@
#define ATOMICSTRING_CONVERSION
#endif
-// FIXME: This is a temporary layering violation while we move string code to WTF.
-// Landing the file moves in one patch, will follow on with patches to change the namespaces.
+// FIXME: this should be in WTF, too!
namespace WebCore {
-
struct AtomicStringHash;
+}
+using WebCore::AtomicStringHash;
+
+namespace WTF {
class AtomicString {
public:
@@ -156,17 +158,23 @@ inline bool equalIgnoringCase(const String& a, const AtomicString& b) { return e
extern const JS_EXPORTDATA AtomicString xmlnsAtom;
#endif
-} // namespace WebCore
-
-
-namespace WTF {
-
// AtomicStringHash is the default hash for AtomicString
template<typename T> struct DefaultHash;
- template<> struct DefaultHash<WebCore::AtomicString> {
- typedef WebCore::AtomicStringHash Hash;
+ template<> struct DefaultHash<AtomicString> {
+ typedef AtomicStringHash Hash;
};
} // namespace WTF
+#ifndef ATOMICSTRING_HIDE_GLOBALS
+using WTF::AtomicString;
+using WTF::nullAtom;
+using WTF::emptyAtom;
+using WTF::textAtom;
+using WTF::commentAtom;
+using WTF::starAtom;
+using WTF::xmlAtom;
+using WTF::xmlnsAtom;
+#endif
+
#endif // AtomicString_h
diff --git a/JavaScriptCore/wtf/text/AtomicStringImpl.h b/JavaScriptCore/wtf/text/AtomicStringImpl.h
index 4b813f8..3f0c376 100644
--- a/JavaScriptCore/wtf/text/AtomicStringImpl.h
+++ b/JavaScriptCore/wtf/text/AtomicStringImpl.h
@@ -23,9 +23,7 @@
#include "StringImpl.h"
-// FIXME: This is a temporary layering violation while we move string code to WTF.
-// Landing the file moves in one patch, will follow on with patches to change the namespaces.
-namespace WebCore {
+namespace WTF {
class AtomicStringImpl : public StringImpl
{
@@ -35,4 +33,6 @@ public:
}
+using WTF::AtomicStringImpl;
+
#endif
diff --git a/JavaScriptCore/wtf/text/StringBuffer.h b/JavaScriptCore/wtf/text/StringBuffer.h
index 353a44a..c29dd79 100644
--- a/JavaScriptCore/wtf/text/StringBuffer.h
+++ b/JavaScriptCore/wtf/text/StringBuffer.h
@@ -33,7 +33,7 @@
#include <wtf/Noncopyable.h>
#include <wtf/unicode/Unicode.h>
-namespace WebCore {
+namespace WTF {
class StringBuffer : public Noncopyable {
public:
@@ -74,4 +74,6 @@ private:
}
+using WTF::StringBuffer;
+
#endif
diff --git a/JavaScriptCore/wtf/text/StringHash.h b/JavaScriptCore/wtf/text/StringHash.h
index b820004..8872fb3 100644
--- a/JavaScriptCore/wtf/text/StringHash.h
+++ b/JavaScriptCore/wtf/text/StringHash.h
@@ -24,13 +24,12 @@
#include "AtomicString.h"
#include "WTFString.h"
+#include <wtf/Forward.h>
#include <wtf/HashTraits.h>
#include <wtf/StringHashFunctions.h>
#include <wtf/unicode/Unicode.h>
-// FIXME: This is a temporary layering violation while we move string code to WTF.
-// Landing the file moves in one patch, will follow on with patches to change the namespaces.
-namespace WebCore {
+namespace WTF {
// The hash() functions on StringHash and CaseFoldingHash do not support
// null strings. get(), contains(), and add() on HashMap<String,..., StringHash>
@@ -253,16 +252,16 @@ namespace WebCore {
}
};
-}
-
-namespace WTF {
-
- template<> struct HashTraits<WebCore::String> : GenericHashTraits<WebCore::String> {
+ template<> struct HashTraits<String> : GenericHashTraits<String> {
static const bool emptyValueIsZero = true;
- static void constructDeletedValue(WebCore::String& slot) { new (&slot) WebCore::String(HashTableDeletedValue); }
- static bool isDeletedValue(const WebCore::String& slot) { return slot.isHashTableDeletedValue(); }
+ static void constructDeletedValue(String& slot) { new (&slot) String(HashTableDeletedValue); }
+ static bool isDeletedValue(const String& slot) { return slot.isHashTableDeletedValue(); }
};
}
+using WTF::StringHash;
+using WTF::CaseFoldingHash;
+using WTF::AlreadyHashed;
+
#endif
diff --git a/JavaScriptCore/wtf/text/StringImpl.cpp b/JavaScriptCore/wtf/text/StringImpl.cpp
index 698cab9..3669628 100644
--- a/JavaScriptCore/wtf/text/StringImpl.cpp
+++ b/JavaScriptCore/wtf/text/StringImpl.cpp
@@ -31,10 +31,9 @@
#include <wtf/StdLibExtras.h>
#include <wtf/WTFThreadData.h>
-using namespace WTF;
-using namespace Unicode;
+namespace WTF {
-namespace WebCore {
+using namespace Unicode;
static const unsigned minLengthToShare = 20;
@@ -535,12 +534,12 @@ int StringImpl::find(const char* chs, int index, bool caseSensitive)
int StringImpl::find(UChar c, int start)
{
- return WebCore::find(m_data, m_length, c, start);
+ return WTF::find(m_data, m_length, c, start);
}
int StringImpl::find(CharacterMatchFunctionPtr matchFunction, int start)
{
- return WebCore::find(m_data, m_length, matchFunction, start);
+ return WTF::find(m_data, m_length, matchFunction, start);
}
int StringImpl::find(StringImpl* str, int index, bool caseSensitive)
@@ -601,7 +600,7 @@ int StringImpl::find(StringImpl* str, int index, bool caseSensitive)
int StringImpl::reverseFind(UChar c, int index)
{
- return WebCore::reverseFind(m_data, m_length, c, index);
+ return WTF::reverseFind(m_data, m_length, c, index);
}
int StringImpl::reverseFind(StringImpl* str, int index, bool caseSensitive)
@@ -961,4 +960,4 @@ PassRefPtr<StringImpl> StringImpl::crossThreadString()
return threadsafeCopy();
}
-} // namespace WebCore
+} // namespace WTF
diff --git a/JavaScriptCore/wtf/text/StringImpl.h b/JavaScriptCore/wtf/text/StringImpl.h
index 244009f..6080474 100644
--- a/JavaScriptCore/wtf/text/StringImpl.h
+++ b/JavaScriptCore/wtf/text/StringImpl.h
@@ -26,6 +26,7 @@
#include <limits.h>
#include <wtf/ASCIICType.h>
#include <wtf/CrossThreadRefCounted.h>
+#include <wtf/Forward.h>
#include <wtf/OwnFastMallocPtr.h>
#include <wtf/StdLibExtras.h>
#include <wtf/StringHashFunctions.h>
@@ -44,21 +45,14 @@ typedef const struct __CFString * CFStringRef;
// FIXME: This is a temporary layering violation while we move string code to WTF.
// Landing the file moves in one patch, will follow on with patches to change the namespaces.
namespace JSC {
-
struct IdentifierCStringTranslator;
struct IdentifierUCharBufferTranslator;
-
}
-// FIXME: This is a temporary layering violation while we move string code to WTF.
-// Landing the file moves in one patch, will follow on with patches to change the namespaces.
-namespace WebCore {
-
-class StringBuffer;
+namespace WTF {
struct CStringTranslator;
struct HashAndCharactersTranslator;
-struct StringHash;
struct UCharBufferTranslator;
enum TextCaseSensitivity { TextCaseSensitive, TextCaseInsensitive };
@@ -70,9 +64,9 @@ typedef bool (*CharacterMatchFunctionPtr)(UChar);
class StringImpl : public StringImplBase {
friend struct JSC::IdentifierCStringTranslator;
friend struct JSC::IdentifierUCharBufferTranslator;
- friend struct CStringTranslator;
- friend struct HashAndCharactersTranslator;
- friend struct UCharBufferTranslator;
+ friend struct WTF::CStringTranslator;
+ friend struct WTF::HashAndCharactersTranslator;
+ friend struct WTF::UCharBufferTranslator;
friend class AtomicStringImpl;
private:
// Used to construct static strings, which have an special refCount that can never hit zero.
@@ -384,21 +378,23 @@ inline PassRefPtr<StringImpl> StringImpl::createStrippingNullCharacters(const UC
return StringImpl::createStrippingNullCharactersSlowCase(characters, length);
}
-}
-
-using WebCore::equal;
-
-namespace WTF {
+struct StringHash;
- // WebCore::StringHash is the default hash for StringImpl* and RefPtr<StringImpl>
- template<typename T> struct DefaultHash;
- template<> struct DefaultHash<WebCore::StringImpl*> {
- typedef WebCore::StringHash Hash;
- };
- template<> struct DefaultHash<RefPtr<WebCore::StringImpl> > {
- typedef WebCore::StringHash Hash;
- };
+// StringHash is the default hash for StringImpl* and RefPtr<StringImpl>
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<StringImpl*> {
+ typedef StringHash Hash;
+};
+template<> struct DefaultHash<RefPtr<StringImpl> > {
+ typedef StringHash Hash;
+};
}
+using WTF::StringImpl;
+using WTF::equal;
+using WTF::TextCaseSensitivity;
+using WTF::TextCaseSensitive;
+using WTF::TextCaseInsensitive;
+
#endif
diff --git a/JavaScriptCore/wtf/text/StringStatics.cpp b/JavaScriptCore/wtf/text/StringStatics.cpp
index 4a23a16..5654044 100644
--- a/JavaScriptCore/wtf/text/StringStatics.cpp
+++ b/JavaScriptCore/wtf/text/StringStatics.cpp
@@ -33,7 +33,7 @@
#include "StaticConstructors.h"
#include "StringImpl.h"
-namespace WebCore {
+namespace WTF {
StringImpl* StringImpl::empty()
{
diff --git a/JavaScriptCore/wtf/text/WTFString.cpp b/JavaScriptCore/wtf/text/WTFString.cpp
index 2d4417f..6c4de6e 100644
--- a/JavaScriptCore/wtf/text/WTFString.cpp
+++ b/JavaScriptCore/wtf/text/WTFString.cpp
@@ -32,10 +32,9 @@
#include <wtf/unicode/UTF8.h>
#include <wtf/unicode/Unicode.h>
-using namespace WTF;
-using namespace WTF::Unicode;
+namespace WTF {
-namespace WebCore {
+using namespace Unicode;
String::String(const UChar* str)
{
@@ -905,14 +904,14 @@ float charactersToFloat(const UChar* data, size_t length, bool* ok)
return static_cast<float>(charactersToDouble(data, length, ok));
}
-} // namespace WebCore
+} // namespace WTF
#ifndef NDEBUG
// For use in the debugger - leaks memory
-WebCore::String* string(const char*);
+String* string(const char*);
-WebCore::String* string(const char* s)
+String* string(const char* s)
{
- return new WebCore::String(s);
+ return new String(s);
}
#endif
diff --git a/JavaScriptCore/wtf/text/WTFString.h b/JavaScriptCore/wtf/text/WTFString.h
index 90d9a71..6af519c 100644
--- a/JavaScriptCore/wtf/text/WTFString.h
+++ b/JavaScriptCore/wtf/text/WTFString.h
@@ -51,16 +51,8 @@ class BString;
#endif
namespace WTF {
-class CString;
-}
-using WTF::CString;
-// FIXME: This is a temporary layering violation while we move string code to WTF.
-// Landing the file moves in one patch, will follow on with patches to change the namespaces.
-namespace WebCore {
-
-class SharedBuffer;
-struct StringHash;
+class CString;
// Declarations of string operations
@@ -417,16 +409,30 @@ inline void appendNumber(Vector<UChar>& vector, unsigned char number)
}
}
-} // namespace WebCore
-
-namespace WTF {
+struct StringHash;
- // StringHash is the default hash for String
- template<typename T> struct DefaultHash;
- template<> struct DefaultHash<WebCore::String> {
- typedef WebCore::StringHash Hash;
- };
+// StringHash is the default hash for String
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<String> {
+ typedef StringHash Hash;
+};
}
+using WTF::CString;
+using WTF::String;
+
+using WTF::isSpaceOrNewline;
+using WTF::find;
+using WTF::reverseFind;
+using WTF::append;
+using WTF::appendNumber;
+using WTF::equal;
+using WTF::equalIgnoringCase;
+using WTF::charactersAreAllASCII;
+using WTF::charactersToInt;
+using WTF::charactersToFloat;
+using WTF::charactersToDouble;
+using WTF::operator+;
+
#endif
diff --git a/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp b/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp
index 5112de5..805b114 100644
--- a/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp
+++ b/JavaScriptCore/wtf/unicode/icu/CollatorICU.cpp
@@ -61,7 +61,7 @@ PassOwnPtr<Collator> Collator::userDefault()
{
#if OS(DARWIN) && PLATFORM(CF)
// Mac OS X doesn't set UNIX locale to match user-selected one, so ICU default doesn't work.
-#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !OS(IPHONE_OS)
+#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !OS(IOS)
RetainPtr<CFLocaleRef> currentLocale(AdoptCF, CFLocaleCopyCurrent());
CFStringRef collationOrder = (CFStringRef)CFLocaleGetValue(currentLocale.get(), kCFLocaleCollatorIdentifier);
#else
diff --git a/JavaScriptCore/yarr/RegexJIT.cpp b/JavaScriptCore/yarr/RegexJIT.cpp
index 9eff75a..5a53ced 100644
--- a/JavaScriptCore/yarr/RegexJIT.cpp
+++ b/JavaScriptCore/yarr/RegexJIT.cpp
@@ -1466,7 +1466,17 @@ public:
{
generate();
- LinkBuffer patchBuffer(this, globalData->executableAllocator.poolForSize(size()));
+ RefPtr<ExecutablePool> executablePool = globalData->executableAllocator.poolForSize(size());
+ if (!executablePool) {
+ m_shouldFallBack = true;
+ return;
+ }
+
+ LinkBuffer patchBuffer(this, executablePool.release(), 0);
+ if (!patchBuffer.allocationSuccessful()) {
+ m_shouldFallBack = true;
+ return;
+ }
for (unsigned i = 0; i < m_backtrackRecords.size(); ++i)
patchBuffer.patch(m_backtrackRecords[i].dataLabel, patchBuffer.locationOf(m_backtrackRecords[i].backtrackLocation));