summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/bytecompiler
diff options
context:
space:
mode:
authorThe Android Open Source Project <initial-contribution@android.com>2009-03-05 14:34:32 -0800
committerThe Android Open Source Project <initial-contribution@android.com>2009-03-05 14:34:32 -0800
commit635860845790a19bf50bbc51ba8fb66a96dde068 (patch)
treeef6ad9ff73a5b57f65249d4232a202fa77e6a140 /JavaScriptCore/bytecompiler
parent8e35f3cfc7fba1d1c829dc557ebad6409cbe16a2 (diff)
downloadexternal_webkit-635860845790a19bf50bbc51ba8fb66a96dde068.zip
external_webkit-635860845790a19bf50bbc51ba8fb66a96dde068.tar.gz
external_webkit-635860845790a19bf50bbc51ba8fb66a96dde068.tar.bz2
auto import from //depot/cupcake/@136594
Diffstat (limited to 'JavaScriptCore/bytecompiler')
-rw-r--r--JavaScriptCore/bytecompiler/BytecodeGenerator.cpp1792
-rw-r--r--JavaScriptCore/bytecompiler/BytecodeGenerator.h480
-rw-r--r--JavaScriptCore/bytecompiler/Label.h92
-rw-r--r--JavaScriptCore/bytecompiler/LabelScope.h79
-rw-r--r--JavaScriptCore/bytecompiler/RegisterID.h121
-rw-r--r--JavaScriptCore/bytecompiler/SegmentedVector.h170
6 files changed, 2734 insertions, 0 deletions
diff --git a/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
new file mode 100644
index 0000000..cd89c1e
--- /dev/null
+++ b/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
@@ -0,0 +1,1792 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BytecodeGenerator.h"
+
+#include "BatchedTransitionOptimizer.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "UString.h"
+
+using namespace std;
+
+namespace JSC {
+
+/*
+ The layout of a register frame looks like this:
+
+ For
+
+ function f(x, y) {
+ var v1;
+ function g() { }
+ var v2;
+ return (x) * (y);
+ }
+
+ assuming (x) and (y) generated temporaries t1 and t2, you would have
+
+ ------------------------------------
+ | x | y | g | v2 | v1 | t1 | t2 | <-- value held
+ ------------------------------------
+ | -5 | -4 | -3 | -2 | -1 | +0 | +1 | <-- register index
+ ------------------------------------
+ | params->|<-locals | temps->
+
+ Because temporary registers are allocated in a stack-like fashion, we
+ can reclaim them with a simple popping algorithm. The same goes for labels.
+ (We never reclaim parameter or local registers, because parameters and
+ locals are DontDelete.)
+
+ The register layout before a function call looks like this:
+
+ For
+
+ function f(x, y)
+ {
+ }
+
+ f(1);
+
+ > <------------------------------
+ < > reserved: call frame | 1 | <-- value held
+ > >snip< <------------------------------
+ < > +0 | +1 | +2 | +3 | +4 | +5 | <-- register index
+ > <------------------------------
+ | params->|<-locals | temps->
+
+ The call instruction fills in the "call frame" registers. It also pads
+ missing arguments at the end of the call:
+
+ > <-----------------------------------
+ < > reserved: call frame | 1 | ? | <-- value held ("?" stands for "undefined")
+ > >snip< <-----------------------------------
+ < > +0 | +1 | +2 | +3 | +4 | +5 | +6 | <-- register index
+ > <-----------------------------------
+ | params->|<-locals | temps->
+
+ After filling in missing arguments, the call instruction sets up the new
+ stack frame to overlap the end of the old stack frame:
+
+ |----------------------------------> <
+ | reserved: call frame | 1 | ? < > <-- value held ("?" stands for "undefined")
+ |----------------------------------> >snip< <
+ | -7 | -6 | -5 | -4 | -3 | -2 | -1 < > <-- register index
+ |----------------------------------> <
+ | | params->|<-locals | temps->
+
+ That way, arguments are "copied" into the callee's stack frame for free.
+
+ If the caller supplies too many arguments, this trick doesn't work. The
+ extra arguments protrude into space reserved for locals and temporaries.
+ In that case, the call instruction makes a real copy of the call frame header,
+ along with just the arguments expected by the callee, leaving the original
+ call frame header and arguments behind. (The call instruction can't just discard
+ extra arguments, because the "arguments" object may access them later.)
+ This copying strategy ensures that all named values will be at the indices
+ expected by the callee.
+*/
+
+#ifndef NDEBUG
+static bool s_dumpsGeneratedCode = false;
+#endif
+
+void BytecodeGenerator::setDumpsGeneratedCode(bool dumpsGeneratedCode)
+{
+#ifndef NDEBUG
+ s_dumpsGeneratedCode = dumpsGeneratedCode;
+#else
+ UNUSED_PARAM(dumpsGeneratedCode);
+#endif
+}
+
+bool BytecodeGenerator::dumpsGeneratedCode()
+{
+#ifndef NDEBUG
+ return s_dumpsGeneratedCode;
+#else
+ return false;
+#endif
+}
+
+void BytecodeGenerator::generate()
+{
+ m_codeBlock->setThisRegister(m_thisRegister.index());
+
+ m_scopeNode->emitBytecode(*this);
+
+#ifndef NDEBUG
+ m_codeBlock->setInstructionCount(m_codeBlock->instructions().size());
+
+ if (s_dumpsGeneratedCode)
+ m_codeBlock->dump(m_scopeChain->globalObject()->globalExec());
+#endif
+
+ if ((m_codeType == FunctionCode && !m_codeBlock->needsFullScopeChain() && !m_codeBlock->usesArguments()) || m_codeType == EvalCode)
+ symbolTable().clear();
+
+ m_codeBlock->setIsNumericCompareFunction(instructions() == m_globalData->numericCompareFunction(m_scopeChain->globalObject()->globalExec()));
+
+#if !ENABLE(OPCODE_SAMPLING)
+ if (!m_regeneratingForExceptionInfo && (m_codeType == FunctionCode || m_codeType == EvalCode))
+ m_codeBlock->clearExceptionInfo();
+#endif
+
+ m_codeBlock->shrinkToFit();
+}
+
+bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
+{
+ int index = m_calleeRegisters.size();
+ SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
+ pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
+
+ if (!result.second) {
+ r0 = &registerFor(result.first->second.getIndex());
+ return false;
+ }
+
+ ++m_codeBlock->m_numVars;
+ r0 = newRegister();
+ return true;
+}
+
+bool BytecodeGenerator::addGlobalVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
+{
+ int index = m_nextGlobalIndex;
+ SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
+ pair<SymbolTable::iterator, bool> result = symbolTable().add(ident.ustring().rep(), newEntry);
+
+ if (!result.second)
+ index = result.first->second.getIndex();
+ else {
+ --m_nextGlobalIndex;
+ m_globals.append(index + m_globalVarStorageOffset);
+ }
+
+ r0 = &registerFor(index);
+ return result.second;
+}
+
+void BytecodeGenerator::allocateConstants(size_t count)
+{
+ m_codeBlock->m_numConstants = count;
+ if (!count)
+ return;
+
+ m_nextConstantIndex = m_calleeRegisters.size();
+
+ for (size_t i = 0; i < count; ++i)
+ newRegister();
+ m_lastConstant = &m_calleeRegisters.last();
+}
+
+BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, ProgramCodeBlock* codeBlock)
+ : m_shouldEmitDebugHooks(!!debugger)
+ , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
+ , m_scopeChain(&scopeChain)
+ , m_symbolTable(symbolTable)
+ , m_scopeNode(programNode)
+ , m_codeBlock(codeBlock)
+ , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
+ , m_finallyDepth(0)
+ , m_dynamicScopeDepth(0)
+ , m_baseScopeDepth(0)
+ , m_codeType(GlobalCode)
+ , m_nextGlobalIndex(-1)
+ , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
+ , m_lastOpcodeID(op_end)
+ , m_emitNodeDepth(0)
+ , m_regeneratingForExceptionInfo(false)
+ , m_codeBlockBeingRegeneratedFrom(0)
+{
+ if (m_shouldEmitDebugHooks)
+ m_codeBlock->setNeedsFullScopeChain(true);
+
+ emitOpcode(op_enter);
+ codeBlock->setGlobalData(m_globalData);
+
+ // FIXME: Move code that modifies the global object to Interpreter::execute.
+
+ m_codeBlock->m_numParameters = 1; // Allocate space for "this"
+
+ JSGlobalObject* globalObject = scopeChain.globalObject();
+ ExecState* exec = globalObject->globalExec();
+ RegisterFile* registerFile = &exec->globalData().interpreter->registerFile();
+
+ // Shift register indexes in generated code to elide registers allocated by intermediate stack frames.
+ m_globalVarStorageOffset = -RegisterFile::CallFrameHeaderSize - m_codeBlock->m_numParameters - registerFile->size();
+
+ // Add previously defined symbols to bookkeeping.
+ m_globals.grow(symbolTable->size());
+ SymbolTable::iterator end = symbolTable->end();
+ for (SymbolTable::iterator it = symbolTable->begin(); it != end; ++it)
+ registerFor(it->second.getIndex()).setIndex(it->second.getIndex() + m_globalVarStorageOffset);
+
+ BatchedTransitionOptimizer optimizer(globalObject);
+
+ const VarStack& varStack = programNode->varStack();
+ const FunctionStack& functionStack = programNode->functionStack();
+ bool canOptimizeNewGlobals = symbolTable->size() + functionStack.size() + varStack.size() < registerFile->maxGlobals();
+ if (canOptimizeNewGlobals) {
+ // Shift new symbols so they get stored prior to existing symbols.
+ m_nextGlobalIndex -= symbolTable->size();
+
+ for (size_t i = 0; i < functionStack.size(); ++i) {
+ FuncDeclNode* funcDecl = functionStack[i].get();
+ globalObject->removeDirect(funcDecl->m_ident); // Make sure our new function is not shadowed by an old property.
+ emitNewFunction(addGlobalVar(funcDecl->m_ident, false), funcDecl);
+ }
+
+ Vector<RegisterID*, 32> newVars;
+ for (size_t i = 0; i < varStack.size(); ++i)
+ if (!globalObject->hasProperty(exec, varStack[i].first))
+ newVars.append(addGlobalVar(varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant));
+
+ allocateConstants(programNode->neededConstants());
+
+ for (size_t i = 0; i < newVars.size(); ++i)
+ emitLoad(newVars[i], jsUndefined());
+ } else {
+ for (size_t i = 0; i < functionStack.size(); ++i) {
+ FuncDeclNode* funcDecl = functionStack[i].get();
+ globalObject->putWithAttributes(exec, funcDecl->m_ident, funcDecl->makeFunction(exec, scopeChain.node()), DontDelete);
+ }
+ for (size_t i = 0; i < varStack.size(); ++i) {
+ if (globalObject->hasProperty(exec, varStack[i].first))
+ continue;
+ int attributes = DontDelete;
+ if (varStack[i].second & DeclarationStacks::IsConstant)
+ attributes |= ReadOnly;
+ globalObject->putWithAttributes(exec, varStack[i].first, jsUndefined(), attributes);
+ }
+
+ allocateConstants(programNode->neededConstants());
+ }
+}
+
+BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, CodeBlock* codeBlock)
+ : m_shouldEmitDebugHooks(!!debugger)
+ , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
+ , m_scopeChain(&scopeChain)
+ , m_symbolTable(symbolTable)
+ , m_scopeNode(functionBody)
+ , m_codeBlock(codeBlock)
+ , m_finallyDepth(0)
+ , m_dynamicScopeDepth(0)
+ , m_baseScopeDepth(0)
+ , m_codeType(FunctionCode)
+ , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
+ , m_lastOpcodeID(op_end)
+ , m_emitNodeDepth(0)
+ , m_regeneratingForExceptionInfo(false)
+ , m_codeBlockBeingRegeneratedFrom(0)
+{
+ if (m_shouldEmitDebugHooks)
+ m_codeBlock->setNeedsFullScopeChain(true);
+
+ codeBlock->setGlobalData(m_globalData);
+
+ bool usesArguments = functionBody->usesArguments();
+ codeBlock->setUsesArguments(usesArguments);
+ if (usesArguments) {
+ m_argumentsRegister.setIndex(RegisterFile::OptionalCalleeArguments);
+ addVar(propertyNames().arguments, false);
+ }
+
+ if (m_codeBlock->needsFullScopeChain()) {
+ ++m_codeBlock->m_numVars;
+ m_activationRegisterIndex = newRegister()->index();
+ emitOpcode(op_enter_with_activation);
+ instructions().append(m_activationRegisterIndex);
+ } else
+ emitOpcode(op_enter);
+
+ if (usesArguments)
+ emitOpcode(op_create_arguments);
+
+ const DeclarationStacks::FunctionStack& functionStack = functionBody->functionStack();
+ for (size_t i = 0; i < functionStack.size(); ++i) {
+ FuncDeclNode* funcDecl = functionStack[i].get();
+ const Identifier& ident = funcDecl->m_ident;
+ m_functions.add(ident.ustring().rep());
+ emitNewFunction(addVar(ident, false), funcDecl);
+ }
+
+ const DeclarationStacks::VarStack& varStack = functionBody->varStack();
+ for (size_t i = 0; i < varStack.size(); ++i)
+ addVar(varStack[i].first, varStack[i].second & DeclarationStacks::IsConstant);
+
+ const Identifier* parameters = functionBody->parameters();
+ size_t parameterCount = functionBody->parameterCount();
+ m_nextParameterIndex = -RegisterFile::CallFrameHeaderSize - parameterCount - 1;
+ m_parameters.grow(1 + parameterCount); // reserve space for "this"
+
+ // Add "this" as a parameter
+ m_thisRegister.setIndex(m_nextParameterIndex);
+ ++m_nextParameterIndex;
+ ++m_codeBlock->m_numParameters;
+
+ if (functionBody->usesThis() || m_shouldEmitDebugHooks) {
+ emitOpcode(op_convert_this);
+ instructions().append(m_thisRegister.index());
+ }
+
+ for (size_t i = 0; i < parameterCount; ++i)
+ addParameter(parameters[i]);
+
+ allocateConstants(functionBody->neededConstants());
+}
+
+BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, const Debugger* debugger, const ScopeChain& scopeChain, SymbolTable* symbolTable, EvalCodeBlock* codeBlock)
+ : m_shouldEmitDebugHooks(!!debugger)
+ , m_shouldEmitProfileHooks(scopeChain.globalObject()->supportsProfiling())
+ , m_scopeChain(&scopeChain)
+ , m_symbolTable(symbolTable)
+ , m_scopeNode(evalNode)
+ , m_codeBlock(codeBlock)
+ , m_thisRegister(RegisterFile::ProgramCodeThisRegister)
+ , m_finallyDepth(0)
+ , m_dynamicScopeDepth(0)
+ , m_baseScopeDepth(codeBlock->baseScopeDepth())
+ , m_codeType(EvalCode)
+ , m_globalData(&scopeChain.globalObject()->globalExec()->globalData())
+ , m_lastOpcodeID(op_end)
+ , m_emitNodeDepth(0)
+ , m_regeneratingForExceptionInfo(false)
+ , m_codeBlockBeingRegeneratedFrom(0)
+{
+ if (m_shouldEmitDebugHooks || m_baseScopeDepth)
+ m_codeBlock->setNeedsFullScopeChain(true);
+
+ emitOpcode(op_enter);
+ codeBlock->setGlobalData(m_globalData);
+ m_codeBlock->m_numParameters = 1; // Allocate space for "this"
+
+ allocateConstants(evalNode->neededConstants());
+}
+
+RegisterID* BytecodeGenerator::addParameter(const Identifier& ident)
+{
+ // Parameters overwrite var declarations, but not function declarations.
+ RegisterID* result = 0;
+ UString::Rep* rep = ident.ustring().rep();
+ if (!m_functions.contains(rep)) {
+ symbolTable().set(rep, m_nextParameterIndex);
+ RegisterID& parameter = registerFor(m_nextParameterIndex);
+ parameter.setIndex(m_nextParameterIndex);
+ result = &parameter;
+ }
+
+ // To maintain the calling convention, we have to allocate unique space for
+ // each parameter, even if the parameter doesn't make it into the symbol table.
+ ++m_nextParameterIndex;
+ ++m_codeBlock->m_numParameters;
+ return result;
+}
+
+RegisterID* BytecodeGenerator::registerFor(const Identifier& ident)
+{
+ if (ident == propertyNames().thisIdentifier)
+ return &m_thisRegister;
+
+ if (!shouldOptimizeLocals())
+ return 0;
+
+ SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
+ if (entry.isNull())
+ return 0;
+
+ return &registerFor(entry.getIndex());
+}
+
+RegisterID* BytecodeGenerator::constRegisterFor(const Identifier& ident)
+{
+ if (m_codeType == EvalCode)
+ return 0;
+
+ SymbolTableEntry entry = symbolTable().get(ident.ustring().rep());
+ ASSERT(!entry.isNull());
+
+ return &registerFor(entry.getIndex());
+}
+
+bool BytecodeGenerator::isLocal(const Identifier& ident)
+{
+ if (ident == propertyNames().thisIdentifier)
+ return true;
+
+ return shouldOptimizeLocals() && symbolTable().contains(ident.ustring().rep());
+}
+
+bool BytecodeGenerator::isLocalConstant(const Identifier& ident)
+{
+ return symbolTable().get(ident.ustring().rep()).isReadOnly();
+}
+
+RegisterID* BytecodeGenerator::newRegister()
+{
+ m_calleeRegisters.append(m_calleeRegisters.size());
+ m_codeBlock->m_numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
+ return &m_calleeRegisters.last();
+}
+
+RegisterID* BytecodeGenerator::newTemporary()
+{
+ // Reclaim free register IDs.
+ while (m_calleeRegisters.size() && !m_calleeRegisters.last().refCount())
+ m_calleeRegisters.removeLast();
+
+ RegisterID* result = newRegister();
+ result->setTemporary();
+ return result;
+}
+
+RegisterID* BytecodeGenerator::highestUsedRegister()
+{
+ size_t count = m_codeBlock->m_numCalleeRegisters;
+ while (m_calleeRegisters.size() < count)
+ newRegister();
+ return &m_calleeRegisters.last();
+}
+
+PassRefPtr<LabelScope> BytecodeGenerator::newLabelScope(LabelScope::Type type, const Identifier* name)
+{
+ // Reclaim free label scopes.
+ while (m_labelScopes.size() && !m_labelScopes.last().refCount())
+ m_labelScopes.removeLast();
+
+ // Allocate new label scope.
+ LabelScope scope(type, name, scopeDepth(), newLabel(), type == LabelScope::Loop ? newLabel() : 0); // Only loops have continue targets.
+ m_labelScopes.append(scope);
+ return &m_labelScopes.last();
+}
+
+PassRefPtr<Label> BytecodeGenerator::newLabel()
+{
+ // Reclaim free label IDs.
+ while (m_labels.size() && !m_labels.last().refCount())
+ m_labels.removeLast();
+
+ // Allocate new label ID.
+ m_labels.append(m_codeBlock);
+ return &m_labels.last();
+}
+
+PassRefPtr<Label> BytecodeGenerator::emitLabel(Label* l0)
+{
+ unsigned newLabelIndex = instructions().size();
+ l0->setLocation(newLabelIndex);
+
+ if (m_codeBlock->numberOfJumpTargets()) {
+ unsigned lastLabelIndex = m_codeBlock->lastJumpTarget();
+ ASSERT(lastLabelIndex <= newLabelIndex);
+ if (newLabelIndex == lastLabelIndex) {
+ // Peephole optimizations have already been disabled by emitting the last label
+ return l0;
+ }
+ }
+
+ m_codeBlock->addJumpTarget(newLabelIndex);
+
+ // This disables peephole optimizations when an instruction is a jump target
+ m_lastOpcodeID = op_end;
+ return l0;
+}
+
+void BytecodeGenerator::emitOpcode(OpcodeID opcodeID)
+{
+ instructions().append(globalData()->interpreter->getOpcode(opcodeID));
+ m_lastOpcodeID = opcodeID;
+}
+
+void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index)
+{
+ ASSERT(instructions().size() >= 4);
+ size_t size = instructions().size();
+ dstIndex = instructions().at(size - 3).u.operand;
+ src1Index = instructions().at(size - 2).u.operand;
+ src2Index = instructions().at(size - 1).u.operand;
+}
+
+void BytecodeGenerator::retrieveLastUnaryOp(int& dstIndex, int& srcIndex)
+{
+ ASSERT(instructions().size() >= 3);
+ size_t size = instructions().size();
+ dstIndex = instructions().at(size - 2).u.operand;
+ srcIndex = instructions().at(size - 1).u.operand;
+}
+
+void ALWAYS_INLINE BytecodeGenerator::rewindBinaryOp()
+{
+ ASSERT(instructions().size() >= 4);
+ instructions().shrink(instructions().size() - 4);
+}
+
+void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp()
+{
+ ASSERT(instructions().size() >= 3);
+ instructions().shrink(instructions().size() - 3);
+}
+
+PassRefPtr<Label> BytecodeGenerator::emitJump(Label* target)
+{
+ emitOpcode(target->isForward() ? op_jmp : op_loop);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+}
+
+PassRefPtr<Label> BytecodeGenerator::emitJumpIfTrue(RegisterID* cond, Label* target)
+{
+ if (m_lastOpcodeID == op_less && !target->isForward()) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+ emitOpcode(op_loop_if_less);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_lesseq && !target->isForward()) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+ emitOpcode(op_loop_if_lesseq);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_eq_null && target->isForward()) {
+ int dstIndex;
+ int srcIndex;
+
+ retrieveLastUnaryOp(dstIndex, srcIndex);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindUnaryOp();
+ emitOpcode(op_jeq_null);
+ instructions().append(srcIndex);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_neq_null && target->isForward()) {
+ int dstIndex;
+ int srcIndex;
+
+ retrieveLastUnaryOp(dstIndex, srcIndex);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindUnaryOp();
+ emitOpcode(op_jneq_null);
+ instructions().append(srcIndex);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+ }
+ }
+
+ emitOpcode(target->isForward() ? op_jtrue : op_loop_if_true);
+ instructions().append(cond->index());
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+}
+
+PassRefPtr<Label> BytecodeGenerator::emitJumpIfFalse(RegisterID* cond, Label* target)
+{
+ ASSERT(target->isForward());
+
+ if (m_lastOpcodeID == op_less) {
+ int dstIndex;
+ int src1Index;
+ int src2Index;
+
+ retrieveLastBinaryOp(dstIndex, src1Index, src2Index);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindBinaryOp();
+ emitOpcode(op_jnless);
+ instructions().append(src1Index);
+ instructions().append(src2Index);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_not) {
+ int dstIndex;
+ int srcIndex;
+
+ retrieveLastUnaryOp(dstIndex, srcIndex);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindUnaryOp();
+ emitOpcode(op_jtrue);
+ instructions().append(srcIndex);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_eq_null) {
+ int dstIndex;
+ int srcIndex;
+
+ retrieveLastUnaryOp(dstIndex, srcIndex);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindUnaryOp();
+ emitOpcode(op_jneq_null);
+ instructions().append(srcIndex);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+ }
+ } else if (m_lastOpcodeID == op_neq_null) {
+ int dstIndex;
+ int srcIndex;
+
+ retrieveLastUnaryOp(dstIndex, srcIndex);
+
+ if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {
+ rewindUnaryOp();
+ emitOpcode(op_jeq_null);
+ instructions().append(srcIndex);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+ }
+ }
+
+ emitOpcode(op_jfalse);
+ instructions().append(cond->index());
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+}
+
+unsigned BytecodeGenerator::addConstant(FuncDeclNode* n)
+{
+ // No need to explicitly unique function body nodes -- they're unique already.
+ return m_codeBlock->addFunction(n);
+}
+
+unsigned BytecodeGenerator::addConstant(FuncExprNode* n)
+{
+ // No need to explicitly unique function expression nodes -- they're unique already.
+ return m_codeBlock->addFunctionExpression(n);
+}
+
+unsigned BytecodeGenerator::addConstant(const Identifier& ident)
+{
+ UString::Rep* rep = ident.ustring().rep();
+ pair<IdentifierMap::iterator, bool> result = m_identifierMap.add(rep, m_codeBlock->numberOfIdentifiers());
+ if (result.second) // new entry
+ m_codeBlock->addIdentifier(Identifier(m_globalData, rep));
+
+ return result.first->second;
+}
+
+RegisterID* BytecodeGenerator::addConstant(JSValuePtr v)
+{
+ pair<JSValueMap::iterator, bool> result = m_jsValueMap.add(JSValuePtr::encode(v), m_nextConstantIndex);
+ if (result.second) {
+ RegisterID& constant = m_calleeRegisters[m_nextConstantIndex];
+
+ ++m_nextConstantIndex;
+
+ m_codeBlock->addConstantRegister(JSValuePtr(v));
+ return &constant;
+ }
+
+ return &registerFor(result.first->second);
+}
+
+unsigned BytecodeGenerator::addUnexpectedConstant(JSValuePtr v)
+{
+ return m_codeBlock->addUnexpectedConstant(v);
+}
+
+unsigned BytecodeGenerator::addRegExp(RegExp* r)
+{
+ return m_codeBlock->addRegExp(r);
+}
+
+RegisterID* BytecodeGenerator::emitMove(RegisterID* dst, RegisterID* src)
+{
+ emitOpcode(op_mov);
+ instructions().append(dst->index());
+ instructions().append(src->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src)
+{
+ emitOpcode(opcodeID);
+ instructions().append(dst->index());
+ instructions().append(src->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitPreInc(RegisterID* srcDst)
+{
+ emitOpcode(op_pre_inc);
+ instructions().append(srcDst->index());
+ return srcDst;
+}
+
+RegisterID* BytecodeGenerator::emitPreDec(RegisterID* srcDst)
+{
+ emitOpcode(op_pre_dec);
+ instructions().append(srcDst->index());
+ return srcDst;
+}
+
+RegisterID* BytecodeGenerator::emitPostInc(RegisterID* dst, RegisterID* srcDst)
+{
+ emitOpcode(op_post_inc);
+ instructions().append(dst->index());
+ instructions().append(srcDst->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitPostDec(RegisterID* dst, RegisterID* srcDst)
+{
+ emitOpcode(op_post_dec);
+ instructions().append(dst->index());
+ instructions().append(srcDst->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types)
+{
+ emitOpcode(opcodeID);
+ instructions().append(dst->index());
+ instructions().append(src1->index());
+ instructions().append(src2->index());
+
+ if (opcodeID == op_bitor || opcodeID == op_bitand || opcodeID == op_bitxor ||
+ opcodeID == op_add || opcodeID == op_mul || opcodeID == op_sub) {
+ instructions().append(types.toInt());
+ }
+
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitEqualityOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2)
+{
+ if (m_lastOpcodeID == op_typeof) {
+ int dstIndex;
+ int srcIndex;
+
+ retrieveLastUnaryOp(dstIndex, srcIndex);
+
+ if (src1->index() == dstIndex
+ && src1->isTemporary()
+ && m_codeBlock->isConstantRegisterIndex(src2->index())
+ && m_codeBlock->constantRegister(src2->index() - m_codeBlock->m_numVars).jsValue(m_scopeChain->globalObject()->globalExec()).isString()) {
+ const UString& value = asString(m_codeBlock->constantRegister(src2->index() - m_codeBlock->m_numVars).jsValue(m_scopeChain->globalObject()->globalExec()))->value();
+ if (value == "undefined") {
+ rewindUnaryOp();
+ emitOpcode(op_is_undefined);
+ instructions().append(dst->index());
+ instructions().append(srcIndex);
+ return dst;
+ }
+ if (value == "boolean") {
+ rewindUnaryOp();
+ emitOpcode(op_is_boolean);
+ instructions().append(dst->index());
+ instructions().append(srcIndex);
+ return dst;
+ }
+ if (value == "number") {
+ rewindUnaryOp();
+ emitOpcode(op_is_number);
+ instructions().append(dst->index());
+ instructions().append(srcIndex);
+ return dst;
+ }
+ if (value == "string") {
+ rewindUnaryOp();
+ emitOpcode(op_is_string);
+ instructions().append(dst->index());
+ instructions().append(srcIndex);
+ return dst;
+ }
+ if (value == "object") {
+ rewindUnaryOp();
+ emitOpcode(op_is_object);
+ instructions().append(dst->index());
+ instructions().append(srcIndex);
+ return dst;
+ }
+ if (value == "function") {
+ rewindUnaryOp();
+ emitOpcode(op_is_function);
+ instructions().append(dst->index());
+ instructions().append(srcIndex);
+ return dst;
+ }
+ }
+ }
+
+ emitOpcode(opcodeID);
+ instructions().append(dst->index());
+ instructions().append(src1->index());
+ instructions().append(src2->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, bool b)
+{
+ return emitLoad(dst, jsBoolean(b));
+}
+
+RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, double number)
+{
+ // FIXME: Our hash tables won't hold infinity, so we make a new JSNumberCell each time.
+ // Later we can do the extra work to handle that like the other cases.
+ if (number == HashTraits<double>::emptyValue() || HashTraits<double>::isDeletedValue(number))
+ return emitLoad(dst, jsNumber(globalData(), number));
+ JSValuePtr& valueInMap = m_numberMap.add(number, noValue()).first->second;
+ if (!valueInMap)
+ valueInMap = jsNumber(globalData(), number);
+ return emitLoad(dst, valueInMap);
+}
+
+RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, const Identifier& identifier)
+{
+ JSString*& stringInMap = m_stringMap.add(identifier.ustring().rep(), 0).first->second;
+ if (!stringInMap)
+ stringInMap = jsOwnedString(globalData(), identifier.ustring());
+ return emitLoad(dst, JSValuePtr(stringInMap));
+}
+
+RegisterID* BytecodeGenerator::emitLoad(RegisterID* dst, JSValuePtr v)
+{
+ RegisterID* constantID = addConstant(v);
+ if (dst)
+ return emitMove(dst, constantID);
+ return constantID;
+}
+
+RegisterID* BytecodeGenerator::emitUnexpectedLoad(RegisterID* dst, bool b)
+{
+ emitOpcode(op_unexpected_load);
+ instructions().append(dst->index());
+ instructions().append(addUnexpectedConstant(jsBoolean(b)));
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitUnexpectedLoad(RegisterID* dst, double d)
+{
+ emitOpcode(op_unexpected_load);
+ instructions().append(dst->index());
+ instructions().append(addUnexpectedConstant(jsNumber(globalData(), d)));
+ return dst;
+}
+
+bool BytecodeGenerator::findScopedProperty(const Identifier& property, int& index, size_t& stackDepth, bool forWriting, JSObject*& globalObject)
+{
+ // Cases where we cannot statically optimize the lookup.
+ if (property == propertyNames().arguments || !canOptimizeNonLocals()) {
+ stackDepth = 0;
+ index = missingSymbolMarker();
+
+ if (shouldOptimizeLocals() && m_codeType == GlobalCode) {
+ ScopeChainIterator iter = m_scopeChain->begin();
+ globalObject = *iter;
+ ASSERT((++iter) == m_scopeChain->end());
+ }
+ return false;
+ }
+
+ size_t depth = 0;
+
+ ScopeChainIterator iter = m_scopeChain->begin();
+ ScopeChainIterator end = m_scopeChain->end();
+ for (; iter != end; ++iter, ++depth) {
+ JSObject* currentScope = *iter;
+ if (!currentScope->isVariableObject())
+ break;
+ JSVariableObject* currentVariableObject = static_cast<JSVariableObject*>(currentScope);
+ SymbolTableEntry entry = currentVariableObject->symbolTable().get(property.ustring().rep());
+
+ // Found the property
+ if (!entry.isNull()) {
+ if (entry.isReadOnly() && forWriting) {
+ stackDepth = 0;
+ index = missingSymbolMarker();
+ if (++iter == end)
+ globalObject = currentVariableObject;
+ return false;
+ }
+ stackDepth = depth;
+ index = entry.getIndex();
+ if (++iter == end)
+ globalObject = currentVariableObject;
+ return true;
+ }
+ if (currentVariableObject->isDynamicScope())
+ break;
+ }
+
+ // Can't locate the property but we're able to avoid a few lookups.
+ stackDepth = depth;
+ index = missingSymbolMarker();
+ JSObject* scope = *iter;
+ if (++iter == end)
+ globalObject = scope;
+ return true;
+}
+
+RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype)
+{
+ emitOpcode(op_instanceof);
+ instructions().append(dst->index());
+ instructions().append(value->index());
+ instructions().append(base->index());
+ instructions().append(basePrototype->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const Identifier& property)
+{
+ size_t depth = 0;
+ int index = 0;
+ JSObject* globalObject = 0;
+ if (!findScopedProperty(property, index, depth, false, globalObject) && !globalObject) {
+ // We can't optimise at all :-(
+ emitOpcode(op_resolve);
+ instructions().append(dst->index());
+ instructions().append(addConstant(property));
+ return dst;
+ }
+
+ if (globalObject) {
+ bool forceGlobalResolve = false;
+ if (m_regeneratingForExceptionInfo) {
+#if ENABLE(JIT)
+ forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInfoAtBytecodeOffset(instructions().size());
+#else
+ forceGlobalResolve = m_codeBlockBeingRegeneratedFrom->hasGlobalResolveInstructionAtBytecodeOffset(instructions().size());
+#endif
+ }
+
+ if (index != missingSymbolMarker() && !forceGlobalResolve) {
+ // Directly index the property lookup across multiple scopes.
+ return emitGetScopedVar(dst, depth, index, globalObject);
+ }
+
+#if ENABLE(JIT)
+ m_codeBlock->addGlobalResolveInfo(instructions().size());
+#else
+ m_codeBlock->addGlobalResolveInstruction(instructions().size());
+#endif
+ emitOpcode(op_resolve_global);
+ instructions().append(dst->index());
+ instructions().append(globalObject);
+ instructions().append(addConstant(property));
+ instructions().append(0);
+ instructions().append(0);
+ return dst;
+ }
+
+ if (index != missingSymbolMarker()) {
+ // Directly index the property lookup across multiple scopes.
+ return emitGetScopedVar(dst, depth, index, globalObject);
+ }
+
+ // In this case we are at least able to drop a few scope chains from the
+ // lookup chain, although we still need to hash from then on.
+ emitOpcode(op_resolve_skip);
+ instructions().append(dst->index());
+ instructions().append(addConstant(property));
+ instructions().append(depth);
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitGetScopedVar(RegisterID* dst, size_t depth, int index, JSValuePtr globalObject)
+{
+ if (globalObject) {
+ emitOpcode(op_get_global_var);
+ instructions().append(dst->index());
+ instructions().append(asCell(globalObject));
+ instructions().append(index);
+ return dst;
+ }
+
+ emitOpcode(op_get_scoped_var);
+ instructions().append(dst->index());
+ instructions().append(index);
+ instructions().append(depth);
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitPutScopedVar(size_t depth, int index, RegisterID* value, JSValuePtr globalObject)
+{
+ if (globalObject) {
+ emitOpcode(op_put_global_var);
+ instructions().append(asCell(globalObject));
+ instructions().append(index);
+ instructions().append(value->index());
+ return value;
+ }
+ emitOpcode(op_put_scoped_var);
+ instructions().append(index);
+ instructions().append(depth);
+ instructions().append(value->index());
+ return value;
+}
+
+RegisterID* BytecodeGenerator::emitResolveBase(RegisterID* dst, const Identifier& property)
+{
+ emitOpcode(op_resolve_base);
+ instructions().append(dst->index());
+ instructions().append(addConstant(property));
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property)
+{
+ emitOpcode(op_resolve_with_base);
+ instructions().append(baseDst->index());
+ instructions().append(propDst->index());
+ instructions().append(addConstant(property));
+ return baseDst;
+}
+
+RegisterID* BytecodeGenerator::emitResolveFunction(RegisterID* baseDst, RegisterID* funcDst, const Identifier& property)
+{
+ emitOpcode(op_resolve_func);
+ instructions().append(baseDst->index());
+ instructions().append(funcDst->index());
+ instructions().append(addConstant(property));
+ return baseDst;
+}
+
+RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
+{
+#if ENABLE(JIT)
+ m_codeBlock->addStructureStubInfo(StructureStubInfo(op_get_by_id));
+#else
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+#endif
+
+ emitOpcode(op_get_by_id);
+ instructions().append(dst->index());
+ instructions().append(base->index());
+ instructions().append(addConstant(property));
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(0);
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
+{
+#if ENABLE(JIT)
+ m_codeBlock->addStructureStubInfo(StructureStubInfo(op_put_by_id));
+#else
+ m_codeBlock->addPropertyAccessInstruction(instructions().size());
+#endif
+
+ emitOpcode(op_put_by_id);
+ instructions().append(base->index());
+ instructions().append(addConstant(property));
+ instructions().append(value->index());
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(0);
+ instructions().append(0);
+ return value;
+}
+
+RegisterID* BytecodeGenerator::emitPutGetter(RegisterID* base, const Identifier& property, RegisterID* value)
+{
+ emitOpcode(op_put_getter);
+ instructions().append(base->index());
+ instructions().append(addConstant(property));
+ instructions().append(value->index());
+ return value;
+}
+
+RegisterID* BytecodeGenerator::emitPutSetter(RegisterID* base, const Identifier& property, RegisterID* value)
+{
+ emitOpcode(op_put_setter);
+ instructions().append(base->index());
+ instructions().append(addConstant(property));
+ instructions().append(value->index());
+ return value;
+}
+
+RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier& property)
+{
+ emitOpcode(op_del_by_id);
+ instructions().append(dst->index());
+ instructions().append(base->index());
+ instructions().append(addConstant(property));
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
+{
+ emitOpcode(op_get_by_val);
+ instructions().append(dst->index());
+ instructions().append(base->index());
+ instructions().append(property->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value)
+{
+ emitOpcode(op_put_by_val);
+ instructions().append(base->index());
+ instructions().append(property->index());
+ instructions().append(value->index());
+ return value;
+}
+
+RegisterID* BytecodeGenerator::emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property)
+{
+ emitOpcode(op_del_by_val);
+ instructions().append(dst->index());
+ instructions().append(base->index());
+ instructions().append(property->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitPutByIndex(RegisterID* base, unsigned index, RegisterID* value)
+{
+ emitOpcode(op_put_by_index);
+ instructions().append(base->index());
+ instructions().append(index);
+ instructions().append(value->index());
+ return value;
+}
+
+RegisterID* BytecodeGenerator::emitNewObject(RegisterID* dst)
+{
+ emitOpcode(op_new_object);
+ instructions().append(dst->index());
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elements)
+{
+ Vector<RefPtr<RegisterID>, 16> argv;
+ for (ElementNode* n = elements; n; n = n->next()) {
+ if (n->elision())
+ break;
+ argv.append(newTemporary());
+ // op_new_array requires the initial values to be a sequential range of registers
+ ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
+ emitNode(argv.last().get(), n->value());
+ }
+ emitOpcode(op_new_array);
+ instructions().append(dst->index());
+ instructions().append(argv.size() ? argv[0]->index() : 0); // argv
+ instructions().append(argv.size()); // argc
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitNewFunction(RegisterID* dst, FuncDeclNode* n)
+{
+ emitOpcode(op_new_func);
+ instructions().append(dst->index());
+ instructions().append(addConstant(n));
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitNewRegExp(RegisterID* dst, RegExp* regExp)
+{
+ emitOpcode(op_new_regexp);
+ instructions().append(dst->index());
+ instructions().append(addRegExp(regExp));
+ return dst;
+}
+
+
+RegisterID* BytecodeGenerator::emitNewFunctionExpression(RegisterID* r0, FuncExprNode* n)
+{
+ emitOpcode(op_new_func_exp);
+ instructions().append(r0->index());
+ instructions().append(addConstant(n));
+ return r0;
+}
+
+RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+{
+ return emitCall(op_call, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
+}
+
+RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+{
+ return emitCall(op_call_eval, dst, func, thisRegister, argumentsNode, divot, startOffset, endOffset);
+}
+
+RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+{
+ ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
+ ASSERT(func->refCount());
+ ASSERT(thisRegister->refCount());
+
+ RegisterID* originalFunc = func;
+ if (m_shouldEmitProfileHooks) {
+ // If codegen decided to recycle func as this call's destination register,
+ // we need to undo that optimization here so that func will still be around
+ // for the sake of op_profile_did_call.
+ if (dst == func) {
+ RefPtr<RegisterID> movedThisRegister = emitMove(newTemporary(), thisRegister);
+ RefPtr<RegisterID> movedFunc = emitMove(thisRegister, func);
+
+ thisRegister = movedThisRegister.release().releaseRef();
+ func = movedFunc.release().releaseRef();
+ }
+ }
+
+ // Generate code for arguments.
+ Vector<RefPtr<RegisterID>, 16> argv;
+ argv.append(thisRegister);
+ for (ArgumentListNode* n = argumentsNode->m_listNode.get(); n; n = n->m_next.get()) {
+ argv.append(newTemporary());
+ // op_call requires the arguments to be a sequential range of registers
+ ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
+ emitNode(argv.last().get(), n);
+ }
+
+ // Reserve space for call frame.
+ Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
+ for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+ callFrame.append(newTemporary());
+
+ if (m_shouldEmitProfileHooks) {
+ emitOpcode(op_profile_will_call);
+ instructions().append(func->index());
+
+#if ENABLE(JIT)
+ m_codeBlock->addFunctionRegisterInfo(instructions().size(), func->index());
+#endif
+ }
+
+ emitExpressionInfo(divot, startOffset, endOffset);
+
+#if ENABLE(JIT)
+ m_codeBlock->addCallLinkInfo();
+#endif
+
+ // Emit call.
+ emitOpcode(opcodeID);
+ instructions().append(dst->index()); // dst
+ instructions().append(func->index()); // func
+ instructions().append(argv.size()); // argCount
+ instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
+
+ if (m_shouldEmitProfileHooks) {
+ emitOpcode(op_profile_did_call);
+ instructions().append(func->index());
+
+ if (dst == originalFunc) {
+ thisRegister->deref();
+ func->deref();
+ }
+ }
+
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitReturn(RegisterID* src)
+{
+ if (m_codeBlock->needsFullScopeChain()) {
+ emitOpcode(op_tear_off_activation);
+ instructions().append(m_activationRegisterIndex);
+ } else if (m_codeBlock->usesArguments() && m_codeBlock->m_numParameters > 1)
+ emitOpcode(op_tear_off_arguments);
+
+ return emitUnaryNoDstOp(op_ret, src);
+}
+
+RegisterID* BytecodeGenerator::emitUnaryNoDstOp(OpcodeID opcodeID, RegisterID* src)
+{
+ emitOpcode(opcodeID);
+ instructions().append(src->index());
+ return src;
+}
+
+RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
+{
+ ASSERT(func->refCount());
+
+ RegisterID* originalFunc = func;
+ if (m_shouldEmitProfileHooks) {
+ // If codegen decided to recycle func as this call's destination register,
+ // we need to undo that optimization here so that func will still be around
+ // for the sake of op_profile_did_call.
+ if (dst == func) {
+ RefPtr<RegisterID> movedFunc = emitMove(newTemporary(), func);
+ func = movedFunc.release().releaseRef();
+ }
+ }
+
+ RefPtr<RegisterID> funcProto = newTemporary();
+
+ // Generate code for arguments.
+ Vector<RefPtr<RegisterID>, 16> argv;
+ argv.append(newTemporary()); // reserve space for "this"
+ for (ArgumentListNode* n = argumentsNode ? argumentsNode->m_listNode.get() : 0; n; n = n->m_next.get()) {
+ argv.append(newTemporary());
+ // op_construct requires the arguments to be a sequential range of registers
+ ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
+ emitNode(argv.last().get(), n);
+ }
+
+ if (m_shouldEmitProfileHooks) {
+ emitOpcode(op_profile_will_call);
+ instructions().append(func->index());
+ }
+
+ // Load prototype.
+ emitExpressionInfo(divot, startOffset, endOffset);
+ emitGetByIdExceptionInfo(op_construct);
+ emitGetById(funcProto.get(), func, globalData()->propertyNames->prototype);
+
+ // Reserve space for call frame.
+ Vector<RefPtr<RegisterID>, RegisterFile::CallFrameHeaderSize> callFrame;
+ for (int i = 0; i < RegisterFile::CallFrameHeaderSize; ++i)
+ callFrame.append(newTemporary());
+
+ emitExpressionInfo(divot, startOffset, endOffset);
+
+#if ENABLE(JIT)
+ m_codeBlock->addCallLinkInfo();
+#endif
+
+ emitOpcode(op_construct);
+ instructions().append(dst->index()); // dst
+ instructions().append(func->index()); // func
+ instructions().append(argv.size()); // argCount
+ instructions().append(argv[0]->index() + argv.size() + RegisterFile::CallFrameHeaderSize); // registerOffset
+ instructions().append(funcProto->index()); // proto
+ instructions().append(argv[0]->index()); // thisRegister
+
+ emitOpcode(op_construct_verify);
+ instructions().append(dst->index());
+ instructions().append(argv[0]->index());
+
+ if (m_shouldEmitProfileHooks) {
+ emitOpcode(op_profile_did_call);
+ instructions().append(func->index());
+
+ if (dst == originalFunc)
+ func->deref();
+ }
+
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitPushScope(RegisterID* scope)
+{
+ ASSERT(scope->isTemporary());
+ ControlFlowContext context;
+ context.isFinallyBlock = false;
+ m_scopeContextStack.append(context);
+ m_dynamicScopeDepth++;
+
+ return emitUnaryNoDstOp(op_push_scope, scope);
+}
+
+void BytecodeGenerator::emitPopScope()
+{
+ ASSERT(m_scopeContextStack.size());
+ ASSERT(!m_scopeContextStack.last().isFinallyBlock);
+
+ emitOpcode(op_pop_scope);
+
+ m_scopeContextStack.removeLast();
+ m_dynamicScopeDepth--;
+}
+
+void BytecodeGenerator::emitDebugHook(DebugHookID debugHookID, int firstLine, int lastLine)
+{
+ if (!m_shouldEmitDebugHooks)
+ return;
+ emitOpcode(op_debug);
+ instructions().append(debugHookID);
+ instructions().append(firstLine);
+ instructions().append(lastLine);
+}
+
+void BytecodeGenerator::pushFinallyContext(Label* target, RegisterID* retAddrDst)
+{
+ ControlFlowContext scope;
+ scope.isFinallyBlock = true;
+ FinallyContext context = { target, retAddrDst };
+ scope.finallyContext = context;
+ m_scopeContextStack.append(scope);
+ m_finallyDepth++;
+}
+
+void BytecodeGenerator::popFinallyContext()
+{
+ ASSERT(m_scopeContextStack.size());
+ ASSERT(m_scopeContextStack.last().isFinallyBlock);
+ ASSERT(m_finallyDepth > 0);
+ m_scopeContextStack.removeLast();
+ m_finallyDepth--;
+}
+
+LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
+{
+ // Reclaim free label scopes.
+ while (m_labelScopes.size() && !m_labelScopes.last().refCount())
+ m_labelScopes.removeLast();
+
+ if (!m_labelScopes.size())
+ return 0;
+
+ // We special-case the following, which is a syntax error in Firefox:
+ // label:
+ // break;
+ if (name.isEmpty()) {
+ for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
+ LabelScope* scope = &m_labelScopes[i];
+ if (scope->type() != LabelScope::NamedLabel) {
+ ASSERT(scope->breakTarget());
+ return scope;
+ }
+ }
+ return 0;
+ }
+
+ for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
+ LabelScope* scope = &m_labelScopes[i];
+ if (scope->name() && *scope->name() == name) {
+ ASSERT(scope->breakTarget());
+ return scope;
+ }
+ }
+ return 0;
+}
+
+LabelScope* BytecodeGenerator::continueTarget(const Identifier& name)
+{
+ // Reclaim free label scopes.
+ while (m_labelScopes.size() && !m_labelScopes.last().refCount())
+ m_labelScopes.removeLast();
+
+ if (!m_labelScopes.size())
+ return 0;
+
+ if (name.isEmpty()) {
+ for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
+ LabelScope* scope = &m_labelScopes[i];
+ if (scope->type() == LabelScope::Loop) {
+ ASSERT(scope->continueTarget());
+ return scope;
+ }
+ }
+ return 0;
+ }
+
+ // Continue to the loop nested nearest to the label scope that matches
+ // 'name'.
+ LabelScope* result = 0;
+ for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
+ LabelScope* scope = &m_labelScopes[i];
+ if (scope->type() == LabelScope::Loop) {
+ ASSERT(scope->continueTarget());
+ result = scope;
+ }
+ if (scope->name() && *scope->name() == name)
+ return result; // may be 0
+ }
+ return 0;
+}
+
+PassRefPtr<Label> BytecodeGenerator::emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope)
+{
+ while (topScope > bottomScope) {
+ // First we count the number of dynamic scopes we need to remove to get
+ // to a finally block.
+ int nNormalScopes = 0;
+ while (topScope > bottomScope) {
+ if (topScope->isFinallyBlock)
+ break;
+ ++nNormalScopes;
+ --topScope;
+ }
+
+ if (nNormalScopes) {
+ // We need to remove a number of dynamic scopes to get to the next
+ // finally block
+ emitOpcode(op_jmp_scopes);
+ instructions().append(nNormalScopes);
+
+ // If topScope == bottomScope then there isn't actually a finally block
+ // left to emit, so make the jmp_scopes jump directly to the target label
+ if (topScope == bottomScope) {
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+ }
+
+ // Otherwise we just use jmp_scopes to pop a group of scopes and go
+ // to the next instruction
+ RefPtr<Label> nextInsn = newLabel();
+ instructions().append(nextInsn->offsetFrom(instructions().size()));
+ emitLabel(nextInsn.get());
+ }
+
+ // To get here there must be at least one finally block present
+ do {
+ ASSERT(topScope->isFinallyBlock);
+ emitJumpSubroutine(topScope->finallyContext.retAddrDst, topScope->finallyContext.finallyAddr);
+ --topScope;
+ if (!topScope->isFinallyBlock)
+ break;
+ } while (topScope > bottomScope);
+ }
+ return emitJump(target);
+}
+
+PassRefPtr<Label> BytecodeGenerator::emitJumpScopes(Label* target, int targetScopeDepth)
+{
+ ASSERT(scopeDepth() - targetScopeDepth >= 0);
+ ASSERT(target->isForward());
+
+ size_t scopeDelta = scopeDepth() - targetScopeDepth;
+ ASSERT(scopeDelta <= m_scopeContextStack.size());
+ if (!scopeDelta)
+ return emitJump(target);
+
+ if (m_finallyDepth)
+ return emitComplexJumpScopes(target, &m_scopeContextStack.last(), &m_scopeContextStack.last() - scopeDelta);
+
+ emitOpcode(op_jmp_scopes);
+ instructions().append(scopeDelta);
+ instructions().append(target->offsetFrom(instructions().size()));
+ return target;
+}
+
+RegisterID* BytecodeGenerator::emitNextPropertyName(RegisterID* dst, RegisterID* iter, Label* target)
+{
+ emitOpcode(op_next_pname);
+ instructions().append(dst->index());
+ instructions().append(iter->index());
+ instructions().append(target->offsetFrom(instructions().size()));
+ return dst;
+}
+
+RegisterID* BytecodeGenerator::emitCatch(RegisterID* targetRegister, Label* start, Label* end)
+{
+#if ENABLE(JIT)
+ HandlerInfo info = { start->offsetFrom(0), end->offsetFrom(0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, 0 };
+#else
+ HandlerInfo info = { start->offsetFrom(0), end->offsetFrom(0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth };
+#endif
+
+ m_codeBlock->addExceptionHandler(info);
+ emitOpcode(op_catch);
+ instructions().append(targetRegister->index());
+ return targetRegister;
+}
+
+RegisterID* BytecodeGenerator::emitNewError(RegisterID* dst, ErrorType type, JSValuePtr message)
+{
+ emitOpcode(op_new_error);
+ instructions().append(dst->index());
+ instructions().append(static_cast<int>(type));
+ instructions().append(addUnexpectedConstant(message));
+ return dst;
+}
+
+PassRefPtr<Label> BytecodeGenerator::emitJumpSubroutine(RegisterID* retAddrDst, Label* finally)
+{
+ emitOpcode(op_jsr);
+ instructions().append(retAddrDst->index());
+ instructions().append(finally->offsetFrom(instructions().size()));
+ return finally;
+}
+
+void BytecodeGenerator::emitSubroutineReturn(RegisterID* retAddrSrc)
+{
+ emitOpcode(op_sret);
+ instructions().append(retAddrSrc->index());
+}
+
+void BytecodeGenerator::emitPushNewScope(RegisterID* dst, Identifier& property, RegisterID* value)
+{
+ ControlFlowContext context;
+ context.isFinallyBlock = false;
+ m_scopeContextStack.append(context);
+ m_dynamicScopeDepth++;
+
+ emitOpcode(op_push_new_scope);
+ instructions().append(dst->index());
+ instructions().append(addConstant(property));
+ instructions().append(value->index());
+}
+
+void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::SwitchType type)
+{
+ SwitchInfo info = { instructions().size(), type };
+ switch (type) {
+ case SwitchInfo::SwitchImmediate:
+ emitOpcode(op_switch_imm);
+ break;
+ case SwitchInfo::SwitchCharacter:
+ emitOpcode(op_switch_char);
+ break;
+ case SwitchInfo::SwitchString:
+ emitOpcode(op_switch_string);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ instructions().append(0); // place holder for table index
+ instructions().append(0); // place holder for default target
+ instructions().append(scrutineeRegister->index());
+ m_switchContextStack.append(info);
+}
+
+static int32_t keyForImmediateSwitch(ExpressionNode* node, int32_t min, int32_t max)
+{
+ UNUSED_PARAM(max);
+ ASSERT(node->isNumber());
+ double value = static_cast<NumberNode*>(node)->value();
+ int32_t key = static_cast<int32_t>(value);
+ ASSERT(JSValuePtr::makeInt32Fast(key) && (JSValuePtr::makeInt32Fast(key).getInt32Fast() == value));
+ ASSERT(key == value);
+ ASSERT(key >= min);
+ ASSERT(key <= max);
+ return key - min;
+}
+
+static void prepareJumpTableForImmediateSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
+{
+ jumpTable.min = min;
+ jumpTable.branchOffsets.resize(max - min + 1);
+ jumpTable.branchOffsets.fill(0);
+ for (uint32_t i = 0; i < clauseCount; ++i) {
+ // We're emitting this after the clause labels should have been fixed, so
+ // the labels should not be "forward" references
+ ASSERT(!labels[i]->isForward());
+ jumpTable.add(keyForImmediateSwitch(nodes[i], min, max), labels[i]->offsetFrom(switchAddress));
+ }
+}
+
+static int32_t keyForCharacterSwitch(ExpressionNode* node, int32_t min, int32_t max)
+{
+ UNUSED_PARAM(max);
+ ASSERT(node->isString());
+ UString::Rep* clause = static_cast<StringNode*>(node)->value().ustring().rep();
+ ASSERT(clause->size() == 1);
+
+ int32_t key = clause->data()[0];
+ ASSERT(key >= min);
+ ASSERT(key <= max);
+ return key - min;
+}
+
+static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, int32_t min, int32_t max)
+{
+ jumpTable.min = min;
+ jumpTable.branchOffsets.resize(max - min + 1);
+ jumpTable.branchOffsets.fill(0);
+ for (uint32_t i = 0; i < clauseCount; ++i) {
+ // We're emitting this after the clause labels should have been fixed, so
+ // the labels should not be "forward" references
+ ASSERT(!labels[i]->isForward());
+ jumpTable.add(keyForCharacterSwitch(nodes[i], min, max), labels[i]->offsetFrom(switchAddress));
+ }
+}
+
+static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t switchAddress, uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes)
+{
+ for (uint32_t i = 0; i < clauseCount; ++i) {
+ // We're emitting this after the clause labels should have been fixed, so
+ // the labels should not be "forward" references
+ ASSERT(!labels[i]->isForward());
+
+ ASSERT(nodes[i]->isString());
+ UString::Rep* clause = static_cast<StringNode*>(nodes[i])->value().ustring().rep();
+ OffsetLocation location;
+ location.branchOffset = labels[i]->offsetFrom(switchAddress);
+#if ENABLE(JIT)
+ location.ctiOffset = 0;
+#endif
+ jumpTable.offsetTable.add(clause, location);
+ }
+}
+
+void BytecodeGenerator::endSwitch(uint32_t clauseCount, RefPtr<Label>* labels, ExpressionNode** nodes, Label* defaultLabel, int32_t min, int32_t max)
+{
+ SwitchInfo switchInfo = m_switchContextStack.last();
+ m_switchContextStack.removeLast();
+ if (switchInfo.switchType == SwitchInfo::SwitchImmediate) {
+ instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfImmediateSwitchJumpTables();
+ instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->offsetFrom(switchInfo.bytecodeOffset + 3);
+
+ SimpleJumpTable& jumpTable = m_codeBlock->addImmediateSwitchJumpTable();
+ prepareJumpTableForImmediateSwitch(jumpTable, switchInfo.bytecodeOffset + 3, clauseCount, labels, nodes, min, max);
+ } else if (switchInfo.switchType == SwitchInfo::SwitchCharacter) {
+ instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfCharacterSwitchJumpTables();
+ instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->offsetFrom(switchInfo.bytecodeOffset + 3);
+
+ SimpleJumpTable& jumpTable = m_codeBlock->addCharacterSwitchJumpTable();
+ prepareJumpTableForCharacterSwitch(jumpTable, switchInfo.bytecodeOffset + 3, clauseCount, labels, nodes, min, max);
+ } else {
+ ASSERT(switchInfo.switchType == SwitchInfo::SwitchString);
+ instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables();
+ instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->offsetFrom(switchInfo.bytecodeOffset + 3);
+
+ StringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable();
+ prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset + 3, clauseCount, labels, nodes);
+ }
+}
+
+RegisterID* BytecodeGenerator::emitThrowExpressionTooDeepException()
+{
+ // It would be nice to do an even better job of identifying exactly where the expression is.
+ // And we could make the caller pass the node pointer in, if there was some way of getting
+ // that from an arbitrary node. However, calling emitExpressionInfo without any useful data
+ // is still good enough to get us an accurate line number.
+ emitExpressionInfo(0, 0, 0);
+ RegisterID* exception = emitNewError(newTemporary(), SyntaxError, jsString(globalData(), "Expression too deep"));
+ emitThrow(exception);
+ return exception;
+}
+
+} // namespace JSC
diff --git a/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/JavaScriptCore/bytecompiler/BytecodeGenerator.h
new file mode 100644
index 0000000..ceb5881
--- /dev/null
+++ b/JavaScriptCore/bytecompiler/BytecodeGenerator.h
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BytecodeGenerator_h
+#define BytecodeGenerator_h
+
+#include "CodeBlock.h"
+#include "HashTraits.h"
+#include "Instruction.h"
+#include "Label.h"
+#include "LabelScope.h"
+#include "Interpreter.h"
+#include "RegisterID.h"
+#include "SegmentedVector.h"
+#include "SymbolTable.h"
+#include "Debugger.h"
+#include "Nodes.h"
+#include <wtf/PassRefPtr.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+ class Identifier;
+ class ScopeChain;
+ class ScopeNode;
+
+ struct FinallyContext {
+ Label* finallyAddr;
+ RegisterID* retAddrDst;
+ };
+
+ struct ControlFlowContext {
+ bool isFinallyBlock;
+ FinallyContext finallyContext;
+ };
+
+ class BytecodeGenerator {
+ public:
+ typedef DeclarationStacks::VarStack VarStack;
+ typedef DeclarationStacks::FunctionStack FunctionStack;
+
+ static void setDumpsGeneratedCode(bool dumpsGeneratedCode);
+ static bool dumpsGeneratedCode();
+
+ BytecodeGenerator(ProgramNode*, const Debugger*, const ScopeChain&, SymbolTable*, ProgramCodeBlock*);
+ BytecodeGenerator(FunctionBodyNode*, const Debugger*, const ScopeChain&, SymbolTable*, CodeBlock*);
+ BytecodeGenerator(EvalNode*, const Debugger*, const ScopeChain&, SymbolTable*, EvalCodeBlock*);
+
+ JSGlobalData* globalData() const { return m_globalData; }
+ const CommonIdentifiers& propertyNames() const { return *m_globalData->propertyNames; }
+
+ void generate();
+
+ // Returns the register corresponding to a local variable, or 0 if no
+ // such register exists. Registers returned by registerFor do not
+ // require explicit reference counting.
+ RegisterID* registerFor(const Identifier&);
+
+ // Behaves as registerFor does, but ignores dynamic scope as
+ // dynamic scope should not interfere with const initialisation
+ RegisterID* constRegisterFor(const Identifier&);
+
+ // Searches the scope chain in an attempt to statically locate the requested
+ // property. Returns false if for any reason the property cannot be safely
+ // optimised at all. Otherwise it will return the index and depth of the
+ // VariableObject that defines the property. If the property cannot be found
+ // statically, depth will contain the depth of the scope chain where dynamic
+ // lookup must begin.
+ //
+ // NB: depth does _not_ include the local scope. eg. a depth of 0 refers
+ // to the scope containing this codeblock.
+ bool findScopedProperty(const Identifier&, int& index, size_t& depth, bool forWriting, JSObject*& globalObject);
+
+ // Returns the register storing "this"
+ RegisterID* thisRegister() { return &m_thisRegister; }
+
+ bool isLocal(const Identifier&);
+ bool isLocalConstant(const Identifier&);
+
+ // Returns the next available temporary register. Registers returned by
+ // newTemporary require a modified form of reference counting: any
+ // register with a refcount of 0 is considered "available", meaning that
+ // the next instruction may overwrite it.
+ RegisterID* newTemporary();
+
+ RegisterID* highestUsedRegister();
+
+ // The same as newTemporary(), but this function returns "suggestion" if
+ // "suggestion" is a temporary. This function is helpful in situations
+ // where you've put "suggestion" in a RefPtr, but you'd like to allow
+ // the next instruction to overwrite it anyway.
+ RegisterID* newTemporaryOr(RegisterID* suggestion) { return suggestion->isTemporary() ? suggestion : newTemporary(); }
+
+ // Functions for handling of dst register
+
+ RegisterID* ignoredResult() { return &m_ignoredResultRegister; }
+
+ // Returns a place to write intermediate values of an operation
+ // which reuses dst if it is safe to do so.
+ RegisterID* tempDestination(RegisterID* dst)
+ {
+ return (dst && dst != ignoredResult() && dst->isTemporary()) ? dst : newTemporary();
+ }
+
+ // Returns the place to write the final output of an operation.
+ RegisterID* finalDestination(RegisterID* originalDst, RegisterID* tempDst = 0)
+ {
+ if (originalDst && originalDst != ignoredResult())
+ return originalDst;
+ ASSERT(tempDst != ignoredResult());
+ if (tempDst && tempDst->isTemporary())
+ return tempDst;
+ return newTemporary();
+ }
+
+ RegisterID* destinationForAssignResult(RegisterID* dst)
+ {
+ if (dst && dst != ignoredResult() && m_codeBlock->needsFullScopeChain())
+ return dst->isTemporary() ? dst : newTemporary();
+ return 0;
+ }
+
+ // Moves src to dst if dst is not null and is different from src, otherwise just returns src.
+ RegisterID* moveToDestinationIfNeeded(RegisterID* dst, RegisterID* src)
+ {
+ return dst == ignoredResult() ? 0 : (dst && dst != src) ? emitMove(dst, src) : src;
+ }
+
+ PassRefPtr<LabelScope> newLabelScope(LabelScope::Type, const Identifier* = 0);
+ PassRefPtr<Label> newLabel();
+
+ // The emitNode functions are just syntactic sugar for calling
+ // Node::emitCode. These functions accept a 0 for the register,
+ // meaning that the node should allocate a register, or ignoredResult(),
+ // meaning that the node need not put the result in a register.
+ // Other emit functions do not accept 0 or ignoredResult().
+ RegisterID* emitNode(RegisterID* dst, Node* n)
+ {
+ // Node::emitCode assumes that dst, if provided, is either a local or a referenced temporary.
+ ASSERT(!dst || dst == ignoredResult() || !dst->isTemporary() || dst->refCount());
+ if (!m_codeBlock->numberOfLineInfos() || m_codeBlock->lastLineInfo().lineNumber != n->lineNo()) {
+ LineInfo info = { instructions().size(), n->lineNo() };
+ m_codeBlock->addLineInfo(info);
+ }
+ if (m_emitNodeDepth >= s_maxEmitNodeDepth)
+ return emitThrowExpressionTooDeepException();
+ ++m_emitNodeDepth;
+ RegisterID* r = n->emitBytecode(*this, dst);
+ --m_emitNodeDepth;
+ return r;
+ }
+
+ RegisterID* emitNode(Node* n)
+ {
+ return emitNode(0, n);
+ }
+
+ void emitExpressionInfo(unsigned divot, unsigned startOffset, unsigned endOffset)
+ {
+ divot -= m_codeBlock->sourceOffset();
+ if (divot > ExpressionRangeInfo::MaxDivot) {
+ // Overflow has occurred, we can only give line number info for errors for this region
+ divot = 0;
+ startOffset = 0;
+ endOffset = 0;
+ } else if (startOffset > ExpressionRangeInfo::MaxOffset) {
+ // If the start offset is out of bounds we clear both offsets
+ // so we only get the divot marker. Error message will have to be reduced
+ // to line and column number.
+ startOffset = 0;
+ endOffset = 0;
+ } else if (endOffset > ExpressionRangeInfo::MaxOffset) {
+ // The end offset is only used for additional context, and is much more likely
+ // to overflow (eg. function call arguments) so we are willing to drop it without
+ // dropping the rest of the range.
+ endOffset = 0;
+ }
+
+ ExpressionRangeInfo info;
+ info.instructionOffset = instructions().size();
+ info.divotPoint = divot;
+ info.startOffset = startOffset;
+ info.endOffset = endOffset;
+ m_codeBlock->addExpressionInfo(info);
+ }
+
+ void emitGetByIdExceptionInfo(OpcodeID opcodeID)
+ {
+ // Only op_construct and op_instanceof need exception info for
+ // a preceding op_get_by_id.
+ ASSERT(opcodeID == op_construct || opcodeID == op_instanceof);
+ GetByIdExceptionInfo info;
+ info.bytecodeOffset = instructions().size();
+ info.isOpConstruct = (opcodeID == op_construct);
+ m_codeBlock->addGetByIdExceptionInfo(info);
+ }
+
+ ALWAYS_INLINE bool leftHandSideNeedsCopy(bool rightHasAssignments, bool rightIsPure)
+ {
+ return (m_codeType != FunctionCode || m_codeBlock->needsFullScopeChain() || rightHasAssignments) && !rightIsPure;
+ }
+
+ ALWAYS_INLINE PassRefPtr<RegisterID> emitNodeForLeftHandSide(ExpressionNode* n, bool rightHasAssignments, bool rightIsPure)
+ {
+ if (leftHandSideNeedsCopy(rightHasAssignments, rightIsPure)) {
+ PassRefPtr<RegisterID> dst = newTemporary();
+ emitNode(dst.get(), n);
+ return dst;
+ }
+
+ return PassRefPtr<RegisterID>(emitNode(n));
+ }
+
+ RegisterID* emitLoad(RegisterID* dst, bool);
+ RegisterID* emitLoad(RegisterID* dst, double);
+ RegisterID* emitLoad(RegisterID* dst, const Identifier&);
+ RegisterID* emitLoad(RegisterID* dst, JSValuePtr);
+ RegisterID* emitUnexpectedLoad(RegisterID* dst, bool);
+ RegisterID* emitUnexpectedLoad(RegisterID* dst, double);
+
+ RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src);
+ RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes);
+ RegisterID* emitEqualityOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2);
+ RegisterID* emitUnaryNoDstOp(OpcodeID, RegisterID* src);
+
+ RegisterID* emitNewObject(RegisterID* dst);
+ RegisterID* emitNewArray(RegisterID* dst, ElementNode*); // stops at first elision
+
+ RegisterID* emitNewFunction(RegisterID* dst, FuncDeclNode* func);
+ RegisterID* emitNewFunctionExpression(RegisterID* dst, FuncExprNode* func);
+ RegisterID* emitNewRegExp(RegisterID* dst, RegExp* regExp);
+
+ RegisterID* emitMove(RegisterID* dst, RegisterID* src);
+
+ RegisterID* emitToJSNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_jsnumber, dst, src); }
+ RegisterID* emitPreInc(RegisterID* srcDst);
+ RegisterID* emitPreDec(RegisterID* srcDst);
+ RegisterID* emitPostInc(RegisterID* dst, RegisterID* srcDst);
+ RegisterID* emitPostDec(RegisterID* dst, RegisterID* srcDst);
+
+ RegisterID* emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* base, RegisterID* basePrototype);
+ RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); }
+ RegisterID* emitIn(RegisterID* dst, RegisterID* property, RegisterID* base) { return emitBinaryOp(op_in, dst, property, base, OperandTypes()); }
+
+ RegisterID* emitResolve(RegisterID* dst, const Identifier& property);
+ RegisterID* emitGetScopedVar(RegisterID* dst, size_t skip, int index, JSValuePtr globalObject);
+ RegisterID* emitPutScopedVar(size_t skip, int index, RegisterID* value, JSValuePtr globalObject);
+
+ RegisterID* emitResolveBase(RegisterID* dst, const Identifier& property);
+ RegisterID* emitResolveWithBase(RegisterID* baseDst, RegisterID* propDst, const Identifier& property);
+ RegisterID* emitResolveFunction(RegisterID* baseDst, RegisterID* funcDst, const Identifier& property);
+
+ RegisterID* emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property);
+ RegisterID* emitPutById(RegisterID* base, const Identifier& property, RegisterID* value);
+ RegisterID* emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier&);
+ RegisterID* emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property);
+ RegisterID* emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value);
+ RegisterID* emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property);
+ RegisterID* emitPutByIndex(RegisterID* base, unsigned index, RegisterID* value);
+ RegisterID* emitPutGetter(RegisterID* base, const Identifier& property, RegisterID* value);
+ RegisterID* emitPutSetter(RegisterID* base, const Identifier& property, RegisterID* value);
+
+ RegisterID* emitCall(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode*, unsigned divot, unsigned startOffset, unsigned endOffset);
+ RegisterID* emitCallEval(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode*, unsigned divot, unsigned startOffset, unsigned endOffset);
+
+ RegisterID* emitReturn(RegisterID* src);
+ RegisterID* emitEnd(RegisterID* src) { return emitUnaryNoDstOp(op_end, src); }
+
+ RegisterID* emitConstruct(RegisterID* dst, RegisterID* func, ArgumentsNode*, unsigned divot, unsigned startOffset, unsigned endOffset);
+
+ PassRefPtr<Label> emitLabel(Label*);
+ PassRefPtr<Label> emitJump(Label* target);
+ PassRefPtr<Label> emitJumpIfTrue(RegisterID* cond, Label* target);
+ PassRefPtr<Label> emitJumpIfFalse(RegisterID* cond, Label* target);
+ PassRefPtr<Label> emitJumpScopes(Label* target, int targetScopeDepth);
+
+ PassRefPtr<Label> emitJumpSubroutine(RegisterID* retAddrDst, Label*);
+ void emitSubroutineReturn(RegisterID* retAddrSrc);
+
+ RegisterID* emitGetPropertyNames(RegisterID* dst, RegisterID* base) { return emitUnaryOp(op_get_pnames, dst, base); }
+ RegisterID* emitNextPropertyName(RegisterID* dst, RegisterID* iter, Label* target);
+
+ RegisterID* emitCatch(RegisterID*, Label* start, Label* end);
+ void emitThrow(RegisterID* exc) { emitUnaryNoDstOp(op_throw, exc); }
+ RegisterID* emitNewError(RegisterID* dst, ErrorType type, JSValuePtr message);
+ void emitPushNewScope(RegisterID* dst, Identifier& property, RegisterID* value);
+
+ RegisterID* emitPushScope(RegisterID* scope);
+ void emitPopScope();
+
+ void emitDebugHook(DebugHookID, int firstLine, int lastLine);
+
+ int scopeDepth() { return m_dynamicScopeDepth + m_finallyDepth; }
+
+ void pushFinallyContext(Label* target, RegisterID* returnAddrDst);
+ void popFinallyContext();
+
+ LabelScope* breakTarget(const Identifier&);
+ LabelScope* continueTarget(const Identifier&);
+
+ void beginSwitch(RegisterID*, SwitchInfo::SwitchType);
+ void endSwitch(uint32_t clauseCount, RefPtr<Label>*, ExpressionNode**, Label* defaultLabel, int32_t min, int32_t range);
+
+ CodeType codeType() const { return m_codeType; }
+
+ void setRegeneratingForExceptionInfo(CodeBlock* originalCodeBlock)
+ {
+ m_regeneratingForExceptionInfo = true;
+ m_codeBlockBeingRegeneratedFrom = originalCodeBlock;
+ }
+
+ private:
+ void emitOpcode(OpcodeID);
+ void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index);
+ void retrieveLastUnaryOp(int& dstIndex, int& srcIndex);
+ void rewindBinaryOp();
+ void rewindUnaryOp();
+
+ PassRefPtr<Label> emitComplexJumpScopes(Label* target, ControlFlowContext* topScope, ControlFlowContext* bottomScope);
+
+ struct JSValueHashTraits : HashTraits<JSValueEncodedAsPointer*> {
+ static void constructDeletedValue(JSValueEncodedAsPointer*& slot) { slot = JSValuePtr::encode(jsImpossibleValue()); }
+ static bool isDeletedValue(JSValueEncodedAsPointer* value) { return value == JSValuePtr::encode(jsImpossibleValue()); }
+ };
+
+ typedef HashMap<JSValueEncodedAsPointer*, unsigned, PtrHash<JSValueEncodedAsPointer*>, JSValueHashTraits> JSValueMap;
+
+ struct IdentifierMapIndexHashTraits {
+ typedef int TraitType;
+ typedef IdentifierMapIndexHashTraits StorageTraits;
+ static int emptyValue() { return std::numeric_limits<int>::max(); }
+ static const bool emptyValueIsZero = false;
+ static const bool needsDestruction = false;
+ static const bool needsRef = false;
+ };
+
+ typedef HashMap<RefPtr<UString::Rep>, int, IdentifierRepHash, HashTraits<RefPtr<UString::Rep> >, IdentifierMapIndexHashTraits> IdentifierMap;
+ typedef HashMap<double, JSValuePtr> NumberMap;
+ typedef HashMap<UString::Rep*, JSString*, IdentifierRepHash> IdentifierStringMap;
+
+ RegisterID* emitCall(OpcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode*, unsigned divot, unsigned startOffset, unsigned endOffset);
+
+ RegisterID* newRegister();
+
+ // Returns the RegisterID corresponding to ident.
+ RegisterID* addVar(const Identifier& ident, bool isConstant)
+ {
+ RegisterID* local;
+ addVar(ident, isConstant, local);
+ return local;
+ }
+ // Returns true if a new RegisterID was added, false if a pre-existing RegisterID was re-used.
+ bool addVar(const Identifier&, bool isConstant, RegisterID*&);
+
+ // Returns the RegisterID corresponding to ident.
+ RegisterID* addGlobalVar(const Identifier& ident, bool isConstant)
+ {
+ RegisterID* local;
+ addGlobalVar(ident, isConstant, local);
+ return local;
+ }
+ // Returns true if a new RegisterID was added, false if a pre-existing RegisterID was re-used.
+ bool addGlobalVar(const Identifier&, bool isConstant, RegisterID*&);
+
+ RegisterID* addParameter(const Identifier&);
+
+ void allocateConstants(size_t);
+
+ RegisterID& registerFor(int index)
+ {
+ if (index >= 0)
+ return m_calleeRegisters[index];
+
+ if (index == RegisterFile::OptionalCalleeArguments)
+ return m_argumentsRegister;
+
+ if (m_parameters.size()) {
+ ASSERT(!m_globals.size());
+ return m_parameters[index + m_parameters.size() + RegisterFile::CallFrameHeaderSize];
+ }
+
+ return m_globals[-index - 1];
+ }
+
+ unsigned addConstant(FuncDeclNode*);
+ unsigned addConstant(FuncExprNode*);
+ unsigned addConstant(const Identifier&);
+ RegisterID* addConstant(JSValuePtr);
+ unsigned addUnexpectedConstant(JSValuePtr);
+ unsigned addRegExp(RegExp*);
+
+ Vector<Instruction>& instructions() { return m_codeBlock->instructions(); }
+ SymbolTable& symbolTable() { return *m_symbolTable; }
+
+ bool shouldOptimizeLocals() { return (m_codeType != EvalCode) && !m_dynamicScopeDepth; }
+ bool canOptimizeNonLocals() { return (m_codeType == FunctionCode) && !m_dynamicScopeDepth && !m_codeBlock->usesEval(); }
+
+ RegisterID* emitThrowExpressionTooDeepException();
+
+ bool m_shouldEmitDebugHooks;
+ bool m_shouldEmitProfileHooks;
+
+ const ScopeChain* m_scopeChain;
+ SymbolTable* m_symbolTable;
+
+ ScopeNode* m_scopeNode;
+ CodeBlock* m_codeBlock;
+
+ HashSet<RefPtr<UString::Rep>, IdentifierRepHash> m_functions;
+ RegisterID m_ignoredResultRegister;
+ RegisterID m_thisRegister;
+ RegisterID m_argumentsRegister;
+ int m_activationRegisterIndex;
+ SegmentedVector<RegisterID, 512> m_calleeRegisters;
+ SegmentedVector<RegisterID, 512> m_parameters;
+ SegmentedVector<RegisterID, 512> m_globals;
+ SegmentedVector<LabelScope, 256> m_labelScopes;
+ SegmentedVector<Label, 256> m_labels;
+ RefPtr<RegisterID> m_lastConstant;
+ int m_finallyDepth;
+ int m_dynamicScopeDepth;
+ int m_baseScopeDepth;
+ CodeType m_codeType;
+
+ Vector<ControlFlowContext> m_scopeContextStack;
+ Vector<SwitchInfo> m_switchContextStack;
+
+ int m_nextGlobalIndex;
+ int m_nextParameterIndex;
+ int m_nextConstantIndex;
+
+ int m_globalVarStorageOffset;
+
+ // Constant pool
+ IdentifierMap m_identifierMap;
+ JSValueMap m_jsValueMap;
+ NumberMap m_numberMap;
+ IdentifierStringMap m_stringMap;
+
+ JSGlobalData* m_globalData;
+
+ OpcodeID m_lastOpcodeID;
+
+ unsigned m_emitNodeDepth;
+
+ bool m_regeneratingForExceptionInfo;
+ CodeBlock* m_codeBlockBeingRegeneratedFrom;
+
+ static const unsigned s_maxEmitNodeDepth = 10000;
+ };
+
+}
+
+#endif // BytecodeGenerator_h
diff --git a/JavaScriptCore/bytecompiler/Label.h b/JavaScriptCore/bytecompiler/Label.h
new file mode 100644
index 0000000..0b3d038
--- /dev/null
+++ b/JavaScriptCore/bytecompiler/Label.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Label_h
+#define Label_h
+
+#include "CodeBlock.h"
+#include "Instruction.h"
+#include <wtf/Assertions.h>
+#include <wtf/Vector.h>
+#include <limits.h>
+
+namespace JSC {
+
+ class Label {
+ public:
+ explicit Label(CodeBlock* codeBlock)
+ : m_refCount(0)
+ , m_location(invalidLocation)
+ , m_codeBlock(codeBlock)
+ {
+ }
+
+ void setLocation(unsigned location)
+ {
+ m_location = location;
+
+ unsigned size = m_unresolvedJumps.size();
+ for (unsigned i = 0; i < size; ++i) {
+ unsigned j = m_unresolvedJumps[i];
+ m_codeBlock->instructions()[j].u.operand = m_location - j;
+ }
+ }
+
+ int offsetFrom(int location) const
+ {
+ if (m_location == invalidLocation) {
+ m_unresolvedJumps.append(location);
+ return 0;
+ }
+ return m_location - location;
+ }
+
+ void ref() { ++m_refCount; }
+ void deref()
+ {
+ --m_refCount;
+ ASSERT(m_refCount >= 0);
+ }
+ int refCount() const { return m_refCount; }
+
+ bool isForward() const { return m_location == invalidLocation; }
+
+ private:
+ typedef Vector<int, 8> JumpVector;
+
+ static const unsigned invalidLocation = UINT_MAX;
+
+ int m_refCount;
+ unsigned m_location;
+ CodeBlock* m_codeBlock;
+ mutable JumpVector m_unresolvedJumps;
+ };
+
+} // namespace JSC
+
+#endif // Label_h
diff --git a/JavaScriptCore/bytecompiler/LabelScope.h b/JavaScriptCore/bytecompiler/LabelScope.h
new file mode 100644
index 0000000..cc21fff
--- /dev/null
+++ b/JavaScriptCore/bytecompiler/LabelScope.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LabelScope_h
+#define LabelScope_h
+
+#include <wtf/PassRefPtr.h>
+#include "Label.h"
+
+namespace JSC {
+
+ class Identifier;
+
+ class LabelScope {
+ public:
+ enum Type { Loop, Switch, NamedLabel };
+
+ LabelScope(Type type, const Identifier* name, int scopeDepth, PassRefPtr<Label> breakTarget, PassRefPtr<Label> continueTarget)
+ : m_refCount(0)
+ , m_type(type)
+ , m_name(name)
+ , m_scopeDepth(scopeDepth)
+ , m_breakTarget(breakTarget)
+ , m_continueTarget(continueTarget)
+ {
+ }
+
+ void ref() { ++m_refCount; }
+ void deref()
+ {
+ --m_refCount;
+ ASSERT(m_refCount >= 0);
+ }
+ int refCount() const { return m_refCount; }
+
+ Label* breakTarget() const { return m_breakTarget.get(); }
+ Label* continueTarget() const { return m_continueTarget.get(); }
+
+ Type type() const { return m_type; }
+ const Identifier* name() const { return m_name; }
+ int scopeDepth() const { return m_scopeDepth; }
+
+ private:
+ int m_refCount;
+ Type m_type;
+ const Identifier* m_name;
+ int m_scopeDepth;
+ RefPtr<Label> m_breakTarget;
+ RefPtr<Label> m_continueTarget;
+ };
+
+} // namespace JSC
+
+#endif // LabelScope_h
diff --git a/JavaScriptCore/bytecompiler/RegisterID.h b/JavaScriptCore/bytecompiler/RegisterID.h
new file mode 100644
index 0000000..0223c2a
--- /dev/null
+++ b/JavaScriptCore/bytecompiler/RegisterID.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RegisterID_h
+#define RegisterID_h
+
+#include <wtf/Assertions.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/VectorTraits.h>
+
+namespace JSC {
+
+ class RegisterID : Noncopyable {
+ public:
+ RegisterID()
+ : m_refCount(0)
+ , m_isTemporary(false)
+#ifndef NDEBUG
+ , m_didSetIndex(false)
+#endif
+ {
+ }
+
+ explicit RegisterID(int index)
+ : m_refCount(0)
+ , m_index(index)
+ , m_isTemporary(false)
+#ifndef NDEBUG
+ , m_didSetIndex(true)
+#endif
+ {
+ }
+
+ void setIndex(int index)
+ {
+ ASSERT(!m_refCount);
+#ifndef NDEBUG
+ m_didSetIndex = true;
+#endif
+ m_index = index;
+ }
+
+ void setTemporary()
+ {
+ m_isTemporary = true;
+ }
+
+ int index() const
+ {
+ ASSERT(m_didSetIndex);
+ return m_index;
+ }
+
+ bool isTemporary()
+ {
+ return m_isTemporary;
+ }
+
+ void ref()
+ {
+ ++m_refCount;
+ }
+
+ void deref()
+ {
+ --m_refCount;
+ ASSERT(m_refCount >= 0);
+ }
+
+ int refCount() const
+ {
+ return m_refCount;
+ }
+
+ private:
+
+ int m_refCount;
+ int m_index;
+ bool m_isTemporary;
+#ifndef NDEBUG
+ bool m_didSetIndex;
+#endif
+ };
+
+} // namespace JSC
+
+namespace WTF {
+
+ template<> struct VectorTraits<JSC::RegisterID> : VectorTraitsBase<true, JSC::RegisterID> {
+ static const bool needsInitialization = true;
+ static const bool canInitializeWithMemset = true; // Default initialization just sets everything to 0 or false, so this is safe.
+ };
+
+} // namespace WTF
+
+#endif // RegisterID_h
diff --git a/JavaScriptCore/bytecompiler/SegmentedVector.h b/JavaScriptCore/bytecompiler/SegmentedVector.h
new file mode 100644
index 0000000..bbab04f
--- /dev/null
+++ b/JavaScriptCore/bytecompiler/SegmentedVector.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SegmentedVector_h
+#define SegmentedVector_h
+
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+ // SegmentedVector is just like Vector, but it doesn't move the values
+ // stored in its buffer when it grows. Therefore, it is safe to keep
+ // pointers into a SegmentedVector.
+ template <typename T, size_t SegmentSize> class SegmentedVector {
+ public:
+ SegmentedVector()
+ : m_size(0)
+ {
+ m_segments.append(&m_inlineSegment);
+ }
+
+ ~SegmentedVector()
+ {
+ deleteAllSegments();
+ }
+
+ size_t size() const { return m_size; }
+
+ T& at(size_t index)
+ {
+ if (index < SegmentSize)
+ return m_inlineSegment[index];
+ return segmentFor(index)->at(subscriptFor(index));
+ }
+
+ T& operator[](size_t index)
+ {
+ return at(index);
+ }
+
+ T& last()
+ {
+ return at(size() - 1);
+ }
+
+ template <typename U> void append(const U& value)
+ {
+ ++m_size;
+
+ if (m_size <= SegmentSize) {
+ m_inlineSegment.uncheckedAppend(value);
+ return;
+ }
+
+ if (!segmentExistsFor(m_size - 1))
+ m_segments.append(new Segment);
+ segmentFor(m_size - 1)->uncheckedAppend(value);
+ }
+
+ void removeLast()
+ {
+ if (m_size <= SegmentSize)
+ m_inlineSegment.removeLast();
+ else
+ segmentFor(m_size - 1)->removeLast();
+ --m_size;
+ }
+
+ void grow(size_t size)
+ {
+ ASSERT(size > m_size);
+ ensureSegmentsFor(size);
+ m_size = size;
+ }
+
+ void clear()
+ {
+ deleteAllSegments();
+ m_segments.resize(1);
+ m_inlineSegment.clear();
+ m_size = 0;
+ }
+
+ private:
+ typedef Vector<T, SegmentSize> Segment;
+
+ void deleteAllSegments()
+ {
+ // Skip the first segment, because it's our inline segment, which was
+ // not created by new.
+ for (size_t i = 1; i < m_segments.size(); i++)
+ delete m_segments[i];
+ }
+
+ bool segmentExistsFor(size_t index)
+ {
+ return index / SegmentSize < m_segments.size();
+ }
+
+ Segment* segmentFor(size_t index)
+ {
+ return m_segments[index / SegmentSize];
+ }
+
+ size_t subscriptFor(size_t index)
+ {
+ return index % SegmentSize;
+ }
+
+ void ensureSegmentsFor(size_t size)
+ {
+ size_t segmentCount = m_size / SegmentSize;
+ if (m_size % SegmentSize)
+ ++segmentCount;
+ segmentCount = std::max<size_t>(segmentCount, 1); // We always have at least our inline segment.
+
+ size_t neededSegmentCount = size / SegmentSize;
+ if (size % SegmentSize)
+ ++neededSegmentCount;
+
+ // Fill up to N - 1 segments.
+ size_t end = neededSegmentCount - 1;
+ for (size_t i = segmentCount - 1; i < end; ++i)
+ ensureSegment(i, SegmentSize);
+
+ // Grow segment N to accomodate the remainder.
+ ensureSegment(end, subscriptFor(size - 1) + 1);
+ }
+
+ void ensureSegment(size_t segmentIndex, size_t size)
+ {
+ ASSERT(segmentIndex <= m_segments.size());
+ if (segmentIndex == m_segments.size())
+ m_segments.append(new Segment);
+ m_segments[segmentIndex]->grow(size);
+ }
+
+ size_t m_size;
+ Segment m_inlineSegment;
+ Vector<Segment*, 32> m_segments;
+ };
+
+} // namespace JSC
+
+#endif // SegmentedVector_h