diff options
Diffstat (limited to 'Source/JavaScriptCore/dfg')
19 files changed, 7316 insertions, 0 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAliasTracker.h b/Source/JavaScriptCore/dfg/DFGAliasTracker.h new file mode 100644 index 0000000..8710169 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGAliasTracker.h @@ -0,0 +1,121 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGAliasTracker_h +#define DFGAliasTracker_h + +#if ENABLE(DFG_JIT) + +#include <dfg/DFGGraph.h> +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +// === AliasTracker === +// +// This class id used to detect aliasing property accesses, which we may +// be able to speculatively optimize (for example removing redundant loads +// where we know a getter will not be called, or optimizing puts to arrays +// where we know the value being written to in within length and is not a +// hole value). In time, this should be more than a 1-deep buffer! +class AliasTracker { +public: + AliasTracker(Graph& graph) + : m_graph(graph) + , m_candidateAliasGetByVal(NoNode) + { + } + + NodeIndex lookupGetByVal(NodeIndex base, NodeIndex property) + { + // Try to detect situations where a GetByVal follows another GetByVal to the same + // property; in these cases, we may be able to omit the subsequent get on the + // speculative path, where we know conditions hold to make this safe (for example, + // on the speculative path we will not have allowed getter access). + if (m_candidateAliasGetByVal != NoNode) { + Node& possibleAlias = m_graph[m_candidateAliasGetByVal]; + ASSERT(possibleAlias.op == GetByVal); + // This check ensures the accesses alias, provided that the subscript is an + // integer index (this is good enough; the speculative path will only generate + // optimized accesses to handle integer subscripts). + if (possibleAlias.child1 == base && equalIgnoringLaterNumericConversion(possibleAlias.child2, property)) + return m_candidateAliasGetByVal; + } + return NoNode; + } + + void recordGetByVal(NodeIndex getByVal) + { + m_candidateAliasGetByVal = getByVal; + } + + void recordPutByVal(NodeIndex putByVal) + { + ASSERT_UNUSED(putByVal, m_graph[putByVal].op == PutByVal || m_graph[putByVal].op == PutByValAlias); + m_candidateAliasGetByVal = NoNode; + } + + void recordGetById(NodeIndex getById) + { + ASSERT_UNUSED(getById, m_graph[getById].op == GetById); + m_candidateAliasGetByVal = NoNode; + } + + void recordPutById(NodeIndex putById) + { + ASSERT_UNUSED(putById, m_graph[putById].op == PutById); + m_candidateAliasGetByVal = NoNode; + } + + void recordPutByIdDirect(NodeIndex putByVal) + { + ASSERT_UNUSED(putByVal, m_graph[putByVal].op == PutByIdDirect); + m_candidateAliasGetByVal = NoNode; + } + +private: + // This method returns true for arguments: + // - (X, X) + // - (X, ValueToNumber(X)) + // - (X, ValueToInt32(X)) + // - (X, NumberToInt32(X)) + bool equalIgnoringLaterNumericConversion(NodeIndex op1, NodeIndex op2) + { + if (op1 == op2) + return true; + Node& node2 = m_graph[op2]; + return (node2.op == ValueToNumber || node2.op == ValueToInt32 || node2.op == NumberToInt32) && op1 == node2.child1; + } + + // The graph, to look up potentially aliasing nodes. + Graph& m_graph; + // Currently a 1-deep buffer! + NodeIndex m_candidateAliasGetByVal; +}; + +} } // namespace JSC::DFG + +#endif +#endif diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp new file mode 100644 index 0000000..1d4c36a --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp @@ -0,0 +1,1082 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGByteCodeParser.h" + +#if ENABLE(DFG_JIT) + +#include "DFGAliasTracker.h" +#include "DFGScoreBoard.h" +#include "CodeBlock.h" + +namespace JSC { namespace DFG { + +#if ENABLE(DFG_JIT_RESTRICTIONS) +// FIXME: Temporarily disable arithmetic, until we fix associated performance regressions. +#define ARITHMETIC_OP() m_parseFailed = true +#else +#define ARITHMETIC_OP() ((void)0) +#endif + +// === ByteCodeParser === +// +// This class is used to compile the dataflow graph from a CodeBlock. +class ByteCodeParser { +public: + ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, Graph& graph) + : m_globalData(globalData) + , m_codeBlock(codeBlock) + , m_graph(graph) + , m_currentIndex(0) + , m_parseFailed(false) + , m_constantUndefined(UINT_MAX) + , m_constantNull(UINT_MAX) + , m_constant1(UINT_MAX) + , m_constants(codeBlock->numberOfConstantRegisters()) + , m_arguments(codeBlock->m_numParameters) + , m_variables(codeBlock->m_numVars) + , m_temporaries(codeBlock->m_numCalleeRegisters - codeBlock->m_numVars) + { + for (unsigned i = 0; i < m_temporaries.size(); ++i) + m_temporaries[i] = NoNode; + } + + // Parse a full CodeBlock of bytecode. + bool parse(); + +private: + // Parse a single basic block of bytecode instructions. + bool parseBlock(unsigned limit); + + // Get/Set the operands/result of a bytecode instruction. + NodeIndex get(int operand) + { + // Is this a constant? + if (operand >= FirstConstantRegisterIndex) { + unsigned constant = operand - FirstConstantRegisterIndex; + ASSERT(constant < m_constants.size()); + return getJSConstant(constant); + } + + // Is this an argument? + if (operand < 0) + return getArgument(operand); + + // Is this a variable? + unsigned numVariables = m_variables.size(); + if ((unsigned)operand < numVariables) + return getVariable((unsigned)operand); + + // Must be a temporary. + unsigned temporary = (unsigned)operand - numVariables; + ASSERT(temporary < m_temporaries.size()); + return getTemporary(temporary); + } + void set(int operand, NodeIndex value) + { + // Is this an argument? + if (operand < 0) { + setArgument(operand, value); + return; + } + + // Is this a variable? + unsigned numVariables = m_variables.size(); + if ((unsigned)operand < numVariables) { + setVariable((unsigned)operand, value); + return; + } + + // Must be a temporary. + unsigned temporary = (unsigned)operand - numVariables; + ASSERT(temporary < m_temporaries.size()); + setTemporary(temporary, value); + } + + // Used in implementing get/set, above, where the operand is a local variable. + NodeIndex getVariable(unsigned operand) + { + NodeIndex setNode = m_variables[operand].set; + if (setNode != NoNode) + return m_graph[setNode].child1; + + NodeIndex getNode = m_variables[operand].get; + if (getNode != NoNode) + return getNode; + + getNode = addToGraph(GetLocal, OpInfo(operand)); + m_variables[operand].get = getNode; + return getNode; + } + void setVariable(unsigned operand, NodeIndex value) + { + NodeIndex priorSet = m_variables[operand].set; + m_variables[operand].set = addToGraph(SetLocal, OpInfo(operand), value); + if (priorSet != NoNode) + m_graph.deref(priorSet); + } + + // Used in implementing get/set, above, where the operand is a temporary. + NodeIndex getTemporary(unsigned operand) + { + NodeIndex index = m_temporaries[operand]; + if (index != NoNode) + return index; + + // Detect a read of an temporary that is not a yet defined within this block (e.g. use of ?:). + m_parseFailed = true; + return constantUndefined(); + } + void setTemporary(unsigned operand, NodeIndex value) + { + m_temporaries[operand] = value; + } + + // Used in implementing get/set, above, where the operand is an argument. + NodeIndex getArgument(unsigned operand) + { + unsigned argument = operand + m_codeBlock->m_numParameters + RegisterFile::CallFrameHeaderSize; + ASSERT(argument < m_arguments.size()); + + NodeIndex setNode = m_arguments[argument].set; + if (setNode != NoNode) + return m_graph[setNode].child1; + + NodeIndex getNode = m_arguments[argument].get; + if (getNode != NoNode) + return getNode; + + getNode = addToGraph(GetLocal, OpInfo(operand)); + m_arguments[argument].get = getNode; + return getNode; + } + void setArgument(int operand, NodeIndex value) + { + unsigned argument = operand + m_codeBlock->m_numParameters + RegisterFile::CallFrameHeaderSize; + ASSERT(argument < m_arguments.size()); + + NodeIndex priorSet = m_arguments[argument].set; + m_arguments[argument].set = addToGraph(SetLocal, OpInfo(operand), value); + if (priorSet != NoNode) + m_graph.deref(priorSet); + } + + // Get an operand, and perform a ToInt32/ToNumber conversion on it. + NodeIndex getToInt32(int operand) + { + // Avoid wastefully adding a JSConstant node to the graph, only to + // replace it with a Int32Constant (which is what would happen if + // we called 'toInt32(get(operand))' in this case). + if (operand >= FirstConstantRegisterIndex) { + JSValue v = m_codeBlock->getConstant(operand); + if (v.isInt32()) + return getInt32Constant(v.asInt32(), operand - FirstConstantRegisterIndex); + } + return toInt32(get(operand)); + } + NodeIndex getToNumber(int operand) + { + // Avoid wastefully adding a JSConstant node to the graph, only to + // replace it with a DoubleConstant (which is what would happen if + // we called 'toNumber(get(operand))' in this case). + if (operand >= FirstConstantRegisterIndex) { + JSValue v = m_codeBlock->getConstant(operand); + if (v.isNumber()) + return getDoubleConstant(v.uncheckedGetNumber(), operand - FirstConstantRegisterIndex); + } + return toNumber(get(operand)); + } + + // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32. + NodeIndex toInt32(NodeIndex index) + { + Node& node = m_graph[index]; + + if (node.hasInt32Result()) + return index; + + if (node.hasDoubleResult()) { + if (node.op == DoubleConstant) + return getInt32Constant(JSC::toInt32(valueOfDoubleConstant(index)), node.constantNumber()); + // 'NumberToInt32(Int32ToNumber(X))' == X, and 'NumberToInt32(UInt32ToNumber(X)) == X' + if (node.op == Int32ToNumber || node.op == UInt32ToNumber) + return node.child1; + + // We unique NumberToInt32 nodes in a map to prevent duplicate conversions. + pair<UnaryOpMap::iterator, bool> result = m_numberToInt32Nodes.add(index, NoNode); + // Either we added a new value, or the existing value in the map is non-zero. + ASSERT(result.second == (result.first->second == NoNode)); + if (result.second) + result.first->second = addToGraph(NumberToInt32, index); + return result.first->second; + } + + // Check for numeric constants boxed as JSValues. + if (node.op == JSConstant) { + JSValue v = valueOfJSConstant(index); + if (v.isInt32()) + return getInt32Constant(v.asInt32(), node.constantNumber()); + if (v.isNumber()) + return getInt32Constant(JSC::toInt32(v.uncheckedGetNumber()), node.constantNumber()); + } + + return addToGraph(ValueToInt32, index); + } + + // Perform an ES5 ToNumber operation - returns a node of type NodeResultDouble. + NodeIndex toNumber(NodeIndex index) + { + Node& node = m_graph[index]; + + if (node.hasDoubleResult()) + return index; + + if (node.hasInt32Result()) { + if (node.op == Int32Constant) + return getDoubleConstant(valueOfInt32Constant(index), node.constantNumber()); + + // We unique Int32ToNumber nodes in a map to prevent duplicate conversions. + pair<UnaryOpMap::iterator, bool> result = m_int32ToNumberNodes.add(index, NoNode); + // Either we added a new value, or the existing value in the map is non-zero. + ASSERT(result.second == (result.first->second == NoNode)); + if (result.second) + result.first->second = addToGraph(Int32ToNumber, index); + return result.first->second; + } + + if (node.op == JSConstant) { + JSValue v = valueOfJSConstant(index); + if (v.isNumber()) + return getDoubleConstant(v.uncheckedGetNumber(), node.constantNumber()); + } + + return addToGraph(ValueToNumber, index); + } + + + // Used in implementing get, above, where the operand is a constant. + NodeIndex getInt32Constant(int32_t value, unsigned constant) + { + NodeIndex index = m_constants[constant].asInt32; + if (index != NoNode) + return index; + NodeIndex resultIndex = addToGraph(Int32Constant, OpInfo(constant)); + m_graph[resultIndex].setInt32Constant(value); + m_constants[constant].asInt32 = resultIndex; + return resultIndex; + } + NodeIndex getDoubleConstant(double value, unsigned constant) + { + NodeIndex index = m_constants[constant].asNumeric; + if (index != NoNode) + return index; + NodeIndex resultIndex = addToGraph(DoubleConstant, OpInfo(constant)); + m_graph[resultIndex].setDoubleConstant(value); + m_constants[constant].asNumeric = resultIndex; + return resultIndex; + } + NodeIndex getJSConstant(unsigned constant) + { + NodeIndex index = m_constants[constant].asJSValue; + if (index != NoNode) + return index; + + NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant)); + m_constants[constant].asJSValue = resultIndex; + return resultIndex; + } + + // Helper functions to get/set the this value. + NodeIndex getThis() + { + return getArgument(m_codeBlock->thisRegister()); + } + void setThis(NodeIndex value) + { + setArgument(m_codeBlock->thisRegister(), value); + } + + // Convenience methods for checking nodes for constants. + bool isInt32Constant(NodeIndex index) + { + return m_graph[index].op == Int32Constant; + } + bool isDoubleConstant(NodeIndex index) + { + return m_graph[index].op == DoubleConstant; + } + bool isJSConstant(NodeIndex index) + { + return m_graph[index].op == JSConstant; + } + + // Convenience methods for getting constant values. + int32_t valueOfInt32Constant(NodeIndex index) + { + ASSERT(isInt32Constant(index)); + return m_graph[index].int32Constant(); + } + double valueOfDoubleConstant(NodeIndex index) + { + ASSERT(isDoubleConstant(index)); + return m_graph[index].numericConstant(); + } + JSValue valueOfJSConstant(NodeIndex index) + { + ASSERT(isJSConstant(index)); + return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber()); + } + + // This method returns a JSConstant with the value 'undefined'. + NodeIndex constantUndefined() + { + // Has m_constantUndefined been set up yet? + if (m_constantUndefined == UINT_MAX) { + // Search the constant pool for undefined, if we find it, we can just reuse this! + unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters(); + for (m_constantUndefined = 0; m_constantUndefined < numberOfConstants; ++m_constantUndefined) { + JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined); + if (testMe.isUndefined()) + return getJSConstant(m_constantUndefined); + } + + // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants. + ASSERT(m_constants.size() == numberOfConstants); + m_codeBlock->addConstant(jsUndefined()); + m_constants.append(ConstantRecord()); + ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); + } + + // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'. + ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantUndefined).isUndefined()); + return getJSConstant(m_constantUndefined); + } + + // This method returns a JSConstant with the value 'null'. + NodeIndex constantNull() + { + // Has m_constantNull been set up yet? + if (m_constantNull == UINT_MAX) { + // Search the constant pool for null, if we find it, we can just reuse this! + unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters(); + for (m_constantNull = 0; m_constantNull < numberOfConstants; ++m_constantNull) { + JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull); + if (testMe.isNull()) + return getJSConstant(m_constantNull); + } + + // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants. + ASSERT(m_constants.size() == numberOfConstants); + m_codeBlock->addConstant(jsNull()); + m_constants.append(ConstantRecord()); + ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); + } + + // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'. + ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNull).isNull()); + return getJSConstant(m_constantNull); + } + + // This method returns a DoubleConstant with the value 1. + NodeIndex one() + { + // Has m_constant1 been set up yet? + if (m_constant1 == UINT_MAX) { + // Search the constant pool for the value 1, if we find it, we can just reuse this! + unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters(); + for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) { + JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1); + if (testMe.isInt32() && testMe.asInt32() == 1) + return getDoubleConstant(1, m_constant1); + } + + // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants. + ASSERT(m_constants.size() == numberOfConstants); + m_codeBlock->addConstant(jsNumber(1)); + m_constants.append(ConstantRecord()); + ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); + } + + // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1. + ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32()); + ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1); + return getDoubleConstant(1, m_constant1); + } + + + // These methods create a node and add it to the graph. If nodes of this type are + // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation. + NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) + { + NodeIndex resultIndex = (NodeIndex)m_graph.size(); + m_graph.append(Node(op, m_currentIndex, child1, child2, child3)); + + if (op & NodeMustGenerate) + m_graph.ref(resultIndex); + return resultIndex; + } + NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) + { + NodeIndex resultIndex = (NodeIndex)m_graph.size(); + m_graph.append(Node(op, m_currentIndex, info, child1, child2, child3)); + + if (op & NodeMustGenerate) + m_graph.ref(resultIndex); + return resultIndex; + } + NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) + { + NodeIndex resultIndex = (NodeIndex)m_graph.size(); + m_graph.append(Node(op, m_currentIndex, info1, info2, child1, child2, child3)); + + if (op & NodeMustGenerate) + m_graph.ref(resultIndex); + return resultIndex; + } + + JSGlobalData* m_globalData; + CodeBlock* m_codeBlock; + Graph& m_graph; + + // The bytecode index of the current instruction being generated. + unsigned m_currentIndex; + + // Record failures due to unimplemented functionality or regressions. + bool m_parseFailed; + + // We use these values during code generation, and to avoid the need for + // special handling we make sure they are available as constants in the + // CodeBlock's constant pool. These variables are initialized to + // UINT_MAX, and lazily updated to hold an index into the CodeBlock's + // constant pool, as necessary. + unsigned m_constantUndefined; + unsigned m_constantNull; + unsigned m_constant1; + + // A constant in the constant pool may be represented by more than one + // node in the graph, depending on the context in which it is being used. + struct ConstantRecord { + ConstantRecord() + : asInt32(NoNode) + , asNumeric(NoNode) + , asJSValue(NoNode) + { + } + + NodeIndex asInt32; + NodeIndex asNumeric; + NodeIndex asJSValue; + }; + + // For every local variable we track any existing get or set of the value. + // We track the get so that these may be shared, and we track the set to + // retrieve the current value, and to reference the final definition. + struct VariableRecord { + VariableRecord() + : get(NoNode) + , set(NoNode) + { + } + + NodeIndex get; + NodeIndex set; + }; + + // Track the index of the node whose result is the current value for every + // register value in the bytecode - argument, local, and temporary. + Vector <ConstantRecord, 32> m_constants; + Vector <VariableRecord, 32> m_arguments; + Vector <VariableRecord, 32> m_variables; + Vector <NodeIndex, 32> m_temporaries; + + // These maps are used to unique ToNumber and ToInt32 operations. + typedef HashMap<NodeIndex, NodeIndex> UnaryOpMap; + UnaryOpMap m_int32ToNumberNodes; + UnaryOpMap m_numberToInt32Nodes; +}; + +#define NEXT_OPCODE(name) \ + m_currentIndex += OPCODE_LENGTH(name); \ + continue + +#define LAST_OPCODE(name) \ + m_currentIndex += OPCODE_LENGTH(name); \ + return !m_parseFailed + +bool ByteCodeParser::parseBlock(unsigned limit) +{ + // No need to reset state initially, since it has been set by the constructor. + if (m_currentIndex) { + for (unsigned i = 0; i < m_constants.size(); ++i) + m_constants[i] = ConstantRecord(); + for (unsigned i = 0; i < m_variables.size(); ++i) + m_variables[i] = VariableRecord(); + for (unsigned i = 0; i < m_arguments.size(); ++i) + m_arguments[i] = VariableRecord(); + for (unsigned i = 0; i < m_temporaries.size(); ++i) + m_temporaries[i] = NoNode; + } + + AliasTracker aliases(m_graph); + + Interpreter* interpreter = m_globalData->interpreter; + Instruction* instructionsBegin = m_codeBlock->instructions().begin(); + while (true) { + // Don't extend over jump destinations. + if (m_currentIndex == limit) { + addToGraph(Jump, OpInfo(m_currentIndex)); + return !m_parseFailed; + } + + // Switch on the current bytecode opcode. + Instruction* currentInstruction = instructionsBegin + m_currentIndex; + switch (interpreter->getOpcodeID(currentInstruction->u.opcode)) { + + // === Function entry opcodes === + + case op_enter: + // Initialize all locals to undefined. + for (int i = 0; i < m_codeBlock->m_numVars; ++i) + set(i, constantUndefined()); + NEXT_OPCODE(op_enter); + + case op_convert_this: { + NodeIndex op1 = getThis(); + setThis(addToGraph(ConvertThis, op1)); + NEXT_OPCODE(op_convert_this); + } + + // === Bitwise operations === + + case op_bitand: { + NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); + NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2)); + NEXT_OPCODE(op_bitand); + } + + case op_bitor: { + NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); + NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2)); + NEXT_OPCODE(op_bitor); + } + + case op_bitxor: { + NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); + NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2)); + NEXT_OPCODE(op_bitxor); + } + + case op_rshift: { + NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); + NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); + NodeIndex result; + // Optimize out shifts by zero. + if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f)) + result = op1; + else + result = addToGraph(BitRShift, op1, op2); + set(currentInstruction[1].u.operand, result); + NEXT_OPCODE(op_rshift); + } + + case op_lshift: { + NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); + NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); + NodeIndex result; + // Optimize out shifts by zero. + if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f)) + result = op1; + else + result = addToGraph(BitLShift, op1, op2); + set(currentInstruction[1].u.operand, result); + NEXT_OPCODE(op_lshift); + } + + case op_urshift: { + NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); + NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); + NodeIndex result; + // The result of a zero-extending right shift is treated as an unsigned value. + // This means that if the top bit is set, the result is not in the int32 range, + // and as such must be stored as a double. If the shift amount is a constant, + // we may be able to optimize. + if (isInt32Constant(op2)) { + // If we know we are shifting by a non-zero amount, then since the operation + // zero fills we know the top bit of the result must be zero, and as such the + // result must be within the int32 range. Conversely, if this is a shift by + // zero, then the result may be changed by the conversion to unsigned, but it + // is not necessary to perform the shift! + if (valueOfInt32Constant(op2) & 0x1f) + result = addToGraph(BitURShift, op1, op2); + else + result = addToGraph(UInt32ToNumber, op1); + } else { + // Cannot optimize at this stage; shift & potentially rebox as a double. + result = addToGraph(BitURShift, op1, op2); + result = addToGraph(UInt32ToNumber, result); + } + set(currentInstruction[1].u.operand, result); + NEXT_OPCODE(op_urshift); + } + + // === Increment/Decrement opcodes === + + case op_pre_inc: { + unsigned srcDst = currentInstruction[1].u.operand; + NodeIndex op = getToNumber(srcDst); + set(srcDst, addToGraph(ArithAdd, op, one())); + NEXT_OPCODE(op_pre_inc); + } + + case op_post_inc: { + unsigned result = currentInstruction[1].u.operand; + unsigned srcDst = currentInstruction[2].u.operand; + NodeIndex op = getToNumber(srcDst); + set(result, op); + set(srcDst, addToGraph(ArithAdd, op, one())); + NEXT_OPCODE(op_post_inc); + } + + case op_pre_dec: { + unsigned srcDst = currentInstruction[1].u.operand; + NodeIndex op = getToNumber(srcDst); + set(srcDst, addToGraph(ArithSub, op, one())); + NEXT_OPCODE(op_pre_dec); + } + + case op_post_dec: { + unsigned result = currentInstruction[1].u.operand; + unsigned srcDst = currentInstruction[2].u.operand; + NodeIndex op = getToNumber(srcDst); + set(result, op); + set(srcDst, addToGraph(ArithSub, op, one())); + NEXT_OPCODE(op_post_dec); + } + + // === Arithmetic operations === + + case op_add: { + ARITHMETIC_OP(); + NodeIndex op1 = get(currentInstruction[2].u.operand); + NodeIndex op2 = get(currentInstruction[3].u.operand); + // If both operands can statically be determined to the numbers, then this is an arithmetic add. + // Otherwise, we must assume this may be performing a concatenation to a string. + if (m_graph[op1].hasNumericResult() && m_graph[op2].hasNumericResult()) + set(currentInstruction[1].u.operand, addToGraph(ArithAdd, toNumber(op1), toNumber(op2))); + else + set(currentInstruction[1].u.operand, addToGraph(ValueAdd, op1, op2)); + NEXT_OPCODE(op_add); + } + + case op_sub: { + ARITHMETIC_OP(); + NodeIndex op1 = getToNumber(currentInstruction[2].u.operand); + NodeIndex op2 = getToNumber(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(ArithSub, op1, op2)); + NEXT_OPCODE(op_sub); + } + + case op_mul: { + ARITHMETIC_OP(); + NodeIndex op1 = getToNumber(currentInstruction[2].u.operand); + NodeIndex op2 = getToNumber(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(ArithMul, op1, op2)); + NEXT_OPCODE(op_mul); + } + + case op_mod: { + ARITHMETIC_OP(); + NodeIndex op1 = getToNumber(currentInstruction[2].u.operand); + NodeIndex op2 = getToNumber(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(ArithMod, op1, op2)); + NEXT_OPCODE(op_mod); + } + + case op_div: { + ARITHMETIC_OP(); + NodeIndex op1 = getToNumber(currentInstruction[2].u.operand); + NodeIndex op2 = getToNumber(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(ArithDiv, op1, op2)); + NEXT_OPCODE(op_div); + } + + // === Misc operations === + + case op_mov: { + NodeIndex op = get(currentInstruction[2].u.operand); + set(currentInstruction[1].u.operand, op); + NEXT_OPCODE(op_mov); + } + + case op_not: { + ARITHMETIC_OP(); + NodeIndex value = get(currentInstruction[2].u.operand); + set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value)); + NEXT_OPCODE(op_not); + } + + case op_less: { + ARITHMETIC_OP(); + NodeIndex op1 = get(currentInstruction[2].u.operand); + NodeIndex op2 = get(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2)); + NEXT_OPCODE(op_less); + } + + case op_lesseq: { + ARITHMETIC_OP(); + NodeIndex op1 = get(currentInstruction[2].u.operand); + NodeIndex op2 = get(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2)); + NEXT_OPCODE(op_lesseq); + } + + case op_eq: { + ARITHMETIC_OP(); + NodeIndex op1 = get(currentInstruction[2].u.operand); + NodeIndex op2 = get(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2)); + NEXT_OPCODE(op_eq); + } + + case op_eq_null: { + ARITHMETIC_OP(); + NodeIndex value = get(currentInstruction[2].u.operand); + set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull())); + NEXT_OPCODE(op_eq_null); + } + + case op_stricteq: { + ARITHMETIC_OP(); + NodeIndex op1 = get(currentInstruction[2].u.operand); + NodeIndex op2 = get(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2)); + NEXT_OPCODE(op_stricteq); + } + + case op_neq: { + ARITHMETIC_OP(); + NodeIndex op1 = get(currentInstruction[2].u.operand); + NodeIndex op2 = get(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); + NEXT_OPCODE(op_neq); + } + + case op_neq_null: { + ARITHMETIC_OP(); + NodeIndex value = get(currentInstruction[2].u.operand); + set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull()))); + NEXT_OPCODE(op_neq_null); + } + + case op_nstricteq: { + ARITHMETIC_OP(); + NodeIndex op1 = get(currentInstruction[2].u.operand); + NodeIndex op2 = get(currentInstruction[3].u.operand); + set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2))); + NEXT_OPCODE(op_nstricteq); + } + + // === Property access operations === + + case op_get_by_val: { + NodeIndex base = get(currentInstruction[2].u.operand); + NodeIndex property = get(currentInstruction[3].u.operand); + + NodeIndex getByVal = addToGraph(GetByVal, base, property, aliases.lookupGetByVal(base, property)); + set(currentInstruction[1].u.operand, getByVal); + aliases.recordGetByVal(getByVal); + + NEXT_OPCODE(op_get_by_val); + } + + case op_put_by_val: { + NodeIndex base = get(currentInstruction[1].u.operand); + NodeIndex property = get(currentInstruction[2].u.operand); + NodeIndex value = get(currentInstruction[3].u.operand); + + NodeIndex aliasedGet = aliases.lookupGetByVal(base, property); + NodeIndex putByVal = addToGraph(aliasedGet != NoNode ? PutByValAlias : PutByVal, base, property, value); + aliases.recordPutByVal(putByVal); + + NEXT_OPCODE(op_put_by_val); + } + + case op_get_by_id: { + NodeIndex base = get(currentInstruction[2].u.operand); + unsigned identifier = currentInstruction[3].u.operand; + + NodeIndex getById = addToGraph(GetById, OpInfo(identifier), base); + set(currentInstruction[1].u.operand, getById); + aliases.recordGetById(getById); + + NEXT_OPCODE(op_get_by_id); + } + + case op_put_by_id: { + NodeIndex value = get(currentInstruction[3].u.operand); + NodeIndex base = get(currentInstruction[1].u.operand); + unsigned identifier = currentInstruction[2].u.operand; + bool direct = currentInstruction[8].u.operand; + + if (direct) { + NodeIndex putByIdDirect = addToGraph(PutByIdDirect, OpInfo(identifier), base, value); + aliases.recordPutByIdDirect(putByIdDirect); + } else { + NodeIndex putById = addToGraph(PutById, OpInfo(identifier), base, value); + aliases.recordPutById(putById); + } + + NEXT_OPCODE(op_put_by_id); + } + + case op_get_global_var: { + NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand)); + set(currentInstruction[1].u.operand, getGlobalVar); + NEXT_OPCODE(op_get_global_var); + } + + case op_put_global_var: { + NodeIndex value = get(currentInstruction[2].u.operand); + addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value); + NEXT_OPCODE(op_put_global_var); + } + + // === Block terminators. === + + case op_jmp: { + unsigned relativeOffset = currentInstruction[1].u.operand; + addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); + LAST_OPCODE(op_jmp); + } + + case op_loop: { + unsigned relativeOffset = currentInstruction[1].u.operand; + addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); + LAST_OPCODE(op_loop); + } + + case op_jtrue: { + unsigned relativeOffset = currentInstruction[2].u.operand; + NodeIndex condition = get(currentInstruction[1].u.operand); + addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition); + LAST_OPCODE(op_jtrue); + } + + case op_jfalse: { + unsigned relativeOffset = currentInstruction[2].u.operand; + NodeIndex condition = get(currentInstruction[1].u.operand); + addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition); + LAST_OPCODE(op_jfalse); + } + + case op_loop_if_true: { + unsigned relativeOffset = currentInstruction[2].u.operand; + NodeIndex condition = get(currentInstruction[1].u.operand); + addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition); + LAST_OPCODE(op_loop_if_true); + } + + case op_loop_if_false: { + unsigned relativeOffset = currentInstruction[2].u.operand; + NodeIndex condition = get(currentInstruction[1].u.operand); + addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition); + LAST_OPCODE(op_loop_if_false); + } + + case op_jeq_null: { + unsigned relativeOffset = currentInstruction[2].u.operand; + NodeIndex value = get(currentInstruction[1].u.operand); + NodeIndex condition = addToGraph(CompareEq, value, constantNull()); + addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition); + LAST_OPCODE(op_jeq_null); + } + + case op_jneq_null: { + unsigned relativeOffset = currentInstruction[2].u.operand; + NodeIndex value = get(currentInstruction[1].u.operand); + NodeIndex condition = addToGraph(CompareEq, value, constantNull()); + addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition); + LAST_OPCODE(op_jneq_null); + } + + case op_jnless: { + unsigned relativeOffset = currentInstruction[3].u.operand; + NodeIndex op1 = get(currentInstruction[1].u.operand); + NodeIndex op2 = get(currentInstruction[2].u.operand); + NodeIndex condition = addToGraph(CompareLess, op1, op2); + addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition); + LAST_OPCODE(op_jnless); + } + + case op_jnlesseq: { + unsigned relativeOffset = currentInstruction[3].u.operand; + NodeIndex op1 = get(currentInstruction[1].u.operand); + NodeIndex op2 = get(currentInstruction[2].u.operand); + NodeIndex condition = addToGraph(CompareLessEq, op1, op2); + addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition); + LAST_OPCODE(op_jnlesseq); + } + + case op_jless: { + unsigned relativeOffset = currentInstruction[3].u.operand; + NodeIndex op1 = get(currentInstruction[1].u.operand); + NodeIndex op2 = get(currentInstruction[2].u.operand); + NodeIndex condition = addToGraph(CompareLess, op1, op2); + addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition); + LAST_OPCODE(op_jless); + } + + case op_jlesseq: { + unsigned relativeOffset = currentInstruction[3].u.operand; + NodeIndex op1 = get(currentInstruction[1].u.operand); + NodeIndex op2 = get(currentInstruction[2].u.operand); + NodeIndex condition = addToGraph(CompareLessEq, op1, op2); + addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition); + LAST_OPCODE(op_jlesseq); + } + + case op_loop_if_less: { + unsigned relativeOffset = currentInstruction[3].u.operand; + NodeIndex op1 = get(currentInstruction[1].u.operand); + NodeIndex op2 = get(currentInstruction[2].u.operand); + NodeIndex condition = addToGraph(CompareLess, op1, op2); + addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition); + LAST_OPCODE(op_loop_if_less); + } + + case op_loop_if_lesseq: { + unsigned relativeOffset = currentInstruction[3].u.operand; + NodeIndex op1 = get(currentInstruction[1].u.operand); + NodeIndex op2 = get(currentInstruction[2].u.operand); + NodeIndex condition = addToGraph(CompareLessEq, op1, op2); + addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition); + LAST_OPCODE(op_loop_if_lesseq); + } + + case op_ret: { + addToGraph(Return, get(currentInstruction[1].u.operand)); + + // FIXME: throw away terminal definitions of variables; + // should not be necessary once we have proper DCE! + for (unsigned i = 0; i < m_variables.size(); ++i) { + NodeIndex priorSet = m_variables[i].set; + if (priorSet != NoNode) + m_graph.deref(priorSet); + } + + LAST_OPCODE(op_ret); + } + + default: + // Parse failed! + return false; + } + } +} + +bool ByteCodeParser::parse() +{ + // Set during construction. + ASSERT(!m_currentIndex); + + for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= m_codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) { + // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions. + unsigned limit = jumpTargetIndex < m_codeBlock->numberOfJumpTargets() ? m_codeBlock->jumpTarget(jumpTargetIndex) : m_codeBlock->instructions().size(); + ASSERT(m_currentIndex < limit); + + // Loop until we reach the current limit (i.e. next jump target). + do { + unsigned bytecodeBegin = m_currentIndex; + NodeIndex begin = m_graph.size(); + + if (!parseBlock(limit)) + return false; + // We should not have gone beyond the limit. + ASSERT(m_currentIndex <= limit); + + NodeIndex end = m_graph.size(); + m_graph.m_blocks.append(BasicBlock(bytecodeBegin, begin, end)); + } while (m_currentIndex < limit); + } + + // Should have reached the end of the instructions. + ASSERT(m_currentIndex == m_codeBlock->instructions().size()); + + // Assign VirtualRegisters. + ScoreBoard scoreBoard(m_graph, m_variables.size()); + Node* nodes = m_graph.begin(); + size_t size = m_graph.size(); + for (size_t i = 0; i < size; ++i) { + Node& node = nodes[i]; + if (node.refCount) { + // First, call use on all of the current node's children, then + // allocate a VirtualRegister for this node. We do so in this + // order so that if a child is on its last use, and a + // VirtualRegister is freed, then it may be reused for node. + scoreBoard.use(node.child1); + scoreBoard.use(node.child2); + scoreBoard.use(node.child3); + node.virtualRegister = scoreBoard.allocate(); + // 'mustGenerate' nodes have their useCount artificially elevated, + // call use now to account for this. + if (node.mustGenerate()) + scoreBoard.use(i); + } + } + + // 'm_numCalleeRegisters' is the number of locals and temporaries allocated + // for the function (and checked for on entry). Since we perform a new and + // different allocation of temporaries, more registers may now be required. + unsigned calleeRegisters = scoreBoard.allocatedCount() + m_variables.size(); + if ((unsigned)m_codeBlock->m_numCalleeRegisters < calleeRegisters) + m_codeBlock->m_numCalleeRegisters = calleeRegisters; + +#if DFG_DEBUG_VERBOSE + m_graph.dump(m_codeBlock); +#endif + + return true; +} + +bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock) +{ +#if DFG_DEBUG_LOCAL_DISBALE + UNUSED_PARAM(graph); + UNUSED_PARAM(globalData); + UNUSED_PARAM(codeBlock); + return false; +#else + return ByteCodeParser(globalData, codeBlock, graph).parse(); +#endif +} + +} } // namespace JSC::DFG + +#endif diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h new file mode 100644 index 0000000..d4efe61 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGByteCodeParser_h +#define DFGByteCodeParser_h + +#if ENABLE(DFG_JIT) + +#include <dfg/DFGGraph.h> + +namespace JSC { + +class CodeBlock; +class JSGlobalData; + +namespace DFG { + +// Populate the Graph with a basic block of code from the CodeBlock, +// starting at the provided bytecode index. +bool parse(Graph&, JSGlobalData*, CodeBlock*); + +} } // namespace JSC::DFG + +#endif +#endif diff --git a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h new file mode 100644 index 0000000..1c72e09 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h @@ -0,0 +1,238 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGGenerationInfo_h +#define DFGGenerationInfo_h + +#if ENABLE(DFG_JIT) + +#include <dfg/DFGJITCompiler.h> + +namespace JSC { namespace DFG { + +// === DataFormat === +// +// This enum tracks the current representation in which a value is being held. +// Values may be unboxed primitives (int32, double, or cell), or boxed as a JSValue. +// For boxed values, we may know the type of boxing that has taken place. +// (May also need bool, array, object, string types!) +enum DataFormat { + DataFormatNone = 0, + DataFormatInteger = 1, + DataFormatDouble = 2, + DataFormatCell = 3, + DataFormatJS = 8, + DataFormatJSInteger = DataFormatJS | DataFormatInteger, + DataFormatJSDouble = DataFormatJS | DataFormatDouble, + DataFormatJSCell = DataFormatJS | DataFormatCell, +}; + +// === GenerationInfo === +// +// This class is used to track the current status of a live values during code generation. +// Can provide information as to whether a value is in machine registers, and if so which, +// whether a value has been spilled to the RegsiterFile, and if so may be able to provide +// details of the format in memory (all values are spilled in a boxed form, but we may be +// able to track the type of box), and tracks how many outstanding uses of a value remain, +// so that we know when the value is dead and the machine registers associated with it +// may be released. +class GenerationInfo { +public: + GenerationInfo() + : m_nodeIndex(NoNode) + , m_useCount(0) + , m_registerFormat(DataFormatNone) + , m_spillFormat(DataFormatNone) + , m_canFill(false) + { + } + + void initConstant(NodeIndex nodeIndex, uint32_t useCount) + { + m_nodeIndex = nodeIndex; + m_useCount = useCount; + m_registerFormat = DataFormatNone; + m_spillFormat = DataFormatNone; + m_canFill = true; + } + void initInteger(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr) + { + m_nodeIndex = nodeIndex; + m_useCount = useCount; + m_registerFormat = DataFormatInteger; + m_spillFormat = DataFormatNone; + m_canFill = false; + u.gpr = gpr; + } + void initJSValue(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr, DataFormat format = DataFormatJS) + { + ASSERT(format & DataFormatJS); + + m_nodeIndex = nodeIndex; + m_useCount = useCount; + m_registerFormat = format; + m_spillFormat = DataFormatNone; + m_canFill = false; + u.gpr = gpr; + } + void initCell(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr) + { + m_nodeIndex = nodeIndex; + m_useCount = useCount; + m_registerFormat = DataFormatCell; + m_spillFormat = DataFormatNone; + m_canFill = false; + u.gpr = gpr; + } + void initDouble(NodeIndex nodeIndex, uint32_t useCount, FPRReg fpr) + { + m_nodeIndex = nodeIndex; + m_useCount = useCount; + m_registerFormat = DataFormatDouble; + m_spillFormat = DataFormatNone; + m_canFill = false; + u.fpr = fpr; + } + void initNone(NodeIndex nodeIndex, uint32_t useCount) + { + m_nodeIndex = nodeIndex; + m_useCount = useCount; + m_registerFormat = DataFormatNone; + m_spillFormat = DataFormatNone; + m_canFill = false; + } + + // Get the index of the node that produced this value. + NodeIndex nodeIndex() { return m_nodeIndex; } + + // Mark the value as having been used (decrement the useCount). + // Returns true if this was the last use of the value, and any + // associated machine registers may be freed. + bool use() + { + return !--m_useCount; + } + + // Used to check the operands of operations to see if they are on + // their last use; in some cases it may be safe to reuse the same + // machine register for the result of the operation. + bool canReuse() + { + ASSERT(m_useCount); + return m_useCount == 1; + } + + // Get the format of the value in machine registers (or 'none'). + DataFormat registerFormat() { return m_registerFormat; } + // Get the format of the value as it is spilled in the RegisterFile (or 'none'). + DataFormat spillFormat() { return m_spillFormat; } + + // Get the machine resister currently holding the value. + GPRReg gpr() { ASSERT(m_registerFormat && m_registerFormat != DataFormatDouble); return u.gpr; } + FPRReg fpr() { ASSERT(m_registerFormat == DataFormatDouble); return u.fpr; } + + // Check whether a value needs spilling in order to free up any associated machine registers. + bool needsSpill() + { + // This should only be called on values that are currently in a register. + ASSERT(m_registerFormat != DataFormatNone); + // Constants do not need spilling, nor do values that have already been + // spilled to the RegisterFile. + return !m_canFill; + } + + // Called when a VirtualRegister is being spilled to the RegisterFile for the first time. + void spill(DataFormat spillFormat) + { + // We shouldn't be spill values that don't need spilling. + ASSERT(!m_canFill); + ASSERT(m_spillFormat == DataFormatNone); + // We should only be spilling values that are currently in machine registers. + ASSERT(m_registerFormat != DataFormatNone); + // We only spill values that have been boxed as a JSValue; otherwise the GC + // would need a way to distinguish cell pointers from numeric primitives. + ASSERT(spillFormat & DataFormatJS); + + m_registerFormat = DataFormatNone; + m_spillFormat = spillFormat; + m_canFill = true; + } + + // Called on values that don't need spilling (constants and values that have + // already been spilled), to mark them as no longer being in machine registers. + void setSpilled() + { + // Should only be called on values that don't need spilling, and are currently in registers. + ASSERT(m_canFill && m_registerFormat != DataFormatNone); + m_registerFormat = DataFormatNone; + } + + // Record that this value is filled into machine registers, + // tracking which registers, and what format the value has. + void fillJSValue(GPRReg gpr, DataFormat format = DataFormatJS) + { + ASSERT(format & DataFormatJS); + m_registerFormat = format; + u.gpr = gpr; + } + void fillInteger(GPRReg gpr) + { + m_registerFormat = DataFormatInteger; + u.gpr = gpr; + } + void fillDouble(FPRReg fpr) + { + m_registerFormat = DataFormatDouble; + u.fpr = fpr; + } + +#ifndef NDEBUG + bool alive() + { + return m_useCount; + } +#endif + +private: + // The index of the node whose result is stored in this virtual register. + // FIXME: Can we remove this? - this is currently only used when collecting + // snapshots of the RegisterBank for SpeculationCheck/EntryLocation. Could + // investigate storing NodeIndex as the name in RegsiterBank, instead of + // VirtualRegister. + NodeIndex m_nodeIndex; + uint32_t m_useCount; + DataFormat m_registerFormat; + DataFormat m_spillFormat; + bool m_canFill; + union { + GPRReg gpr; + FPRReg fpr; + } u; +}; + +} } // namespace JSC::DFG + +#endif +#endif diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp new file mode 100644 index 0000000..84e2d4d --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp @@ -0,0 +1,181 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGGraph.h" + +#include "CodeBlock.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +#ifndef NDEBUG + +// Creates an array of stringized names. +static const char* dfgOpNames[] = { +#define STRINGIZE_DFG_OP_ENUM(opcode, flags) #opcode , + FOR_EACH_DFG_OP(STRINGIZE_DFG_OP_ENUM) +#undef STRINGIZE_DFG_OP_ENUM +}; + +void Graph::dump(NodeIndex nodeIndex, CodeBlock* codeBlock) +{ + Node& node = at(nodeIndex); + NodeType op = node.op; + + unsigned refCount = node.refCount; + if (!refCount) + return; + bool mustGenerate = node.mustGenerate(); + if (mustGenerate) + --refCount; + + // Example/explanation of dataflow dump output + // + // 14: <!2:7> GetByVal(@3, @13) + // ^1 ^2 ^3 ^4 ^5 + // + // (1) The nodeIndex of this operation. + // (2) The reference count. The number printed is the 'real' count, + // not including the 'mustGenerate' ref. If the node is + // 'mustGenerate' then the count it prefixed with '!'. + // (3) The virtual register slot assigned to this node. + // (4) The name of the operation. + // (5) The arguments to the operation. The may be of the form: + // @# - a NodeIndex referencing a prior node in the graph. + // arg# - an argument number. + // $# - the index in the CodeBlock of a constant { for numeric constants the value is displayed | for integers, in both decimal and hex }. + // id# - the index in the CodeBlock of an identifier { if codeBlock is passed to dump(), the string representation is displayed }. + // var# - the index of a var on the global object, used by GetGlobalVar/PutGlobalVar operations. + printf("% 4d:\t<%c%u:%u>\t%s(", (int)nodeIndex, mustGenerate ? '!' : ' ', refCount, node.virtualRegister, dfgOpNames[op & NodeIdMask]); + if (node.child1 != NoNode) + printf("@%u", node.child1); + if (node.child2 != NoNode) + printf(", @%u", node.child2); + if (node.child3 != NoNode) + printf(", @%u", node.child3); + bool hasPrinted = node.child1 != NoNode; + + if (node.hasVarNumber()) { + printf("%svar%u", hasPrinted ? ", " : "", node.varNumber()); + hasPrinted = true; + } + if (node.hasIdentifier()) { + if (codeBlock) + printf("%sid%u{%s}", hasPrinted ? ", " : "", node.identifierNumber(), codeBlock->identifier(node.identifierNumber()).ustring().utf8().data()); + else + printf("%sid%u", hasPrinted ? ", " : "", node.identifierNumber()); + hasPrinted = true; + } + if (node.hasLocal()) { + int local = node.local(); + if (local < 0) + printf("%sarg%u", hasPrinted ? ", " : "", local - codeBlock->thisRegister()); + else + printf("%sr%u", hasPrinted ? ", " : "", local); + hasPrinted = true; + } + if (op == Int32Constant) { + printf("%s$%u{%d|0x%08x}", hasPrinted ? ", " : "", node.constantNumber(), node.int32Constant(), node.int32Constant()); + hasPrinted = true; + } + if (op == DoubleConstant) { + printf("%s$%u{%f})", hasPrinted ? ", " : "", node.constantNumber(), node.numericConstant()); + hasPrinted = true; + } + if (op == JSConstant) { + printf("%s$%u", hasPrinted ? ", " : "", node.constantNumber()); + hasPrinted = true; + } + if (node.isBranch() || node.isJump()) { + printf("%sT:#%u", hasPrinted ? ", " : "", blockIndexForBytecodeOffset(node.takenBytecodeOffset())); + hasPrinted = true; + } + if (node.isBranch()) { + printf("%sF:#%u", hasPrinted ? ", " : "", blockIndexForBytecodeOffset(node.notTakenBytecodeOffset())); + hasPrinted = true; + } + + printf(")\n"); +} + +void Graph::dump(CodeBlock* codeBlock) +{ + for (size_t b = 0; b < m_blocks.size(); ++b) { + printf("Block #%u:\n", (int)b); + BasicBlock& block = m_blocks[b]; + for (size_t i = block.begin; i < block.end; ++i) + dump(i, codeBlock); + } +} + +#endif + +// FIXME: Convert these methods to be iterative, not recursive. +void Graph::refChildren(NodeIndex op) +{ + Node& node = at(op); + + if (node.child1 == NoNode) { + ASSERT(node.child2 == NoNode && node.child3 == NoNode); + return; + } + ref(node.child1); + + if (node.child2 == NoNode) { + ASSERT(node.child3 == NoNode); + return; + } + ref(node.child2); + + if (node.child3 == NoNode) + return; + ref(node.child3); +} +void Graph::derefChildren(NodeIndex op) +{ + Node& node = at(op); + + if (node.child1 == NoNode) { + ASSERT(node.child2 == NoNode && node.child3 == NoNode); + return; + } + deref(node.child1); + + if (node.child2 == NoNode) { + ASSERT(node.child3 == NoNode); + return; + } + deref(node.child2); + + if (node.child3 == NoNode) + return; + deref(node.child3); +} + +} } // namespace JSC::DFG + +#endif diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h new file mode 100644 index 0000000..c6bc7df --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGGraph.h @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGGraph_h +#define DFGGraph_h + +#if ENABLE(DFG_JIT) + +#include <dfg/DFGNode.h> +#include <wtf/Vector.h> +#include <wtf/StdLibExtras.h> + +namespace JSC { + +class CodeBlock; + +namespace DFG { + +typedef uint32_t BlockIndex; + +struct BasicBlock { + BasicBlock(unsigned bytecodeBegin, NodeIndex begin, NodeIndex end) + : bytecodeBegin(bytecodeBegin) + , begin(begin) + , end(end) + { + } + + static inline BlockIndex getBytecodeBegin(BasicBlock* block) + { + return block->bytecodeBegin; + } + + unsigned bytecodeBegin; + NodeIndex begin; + NodeIndex end; +}; + +// +// === Graph === +// +// The dataflow graph is an ordered vector of nodes. +// The order may be significant for nodes with side-effects (property accesses, value conversions). +// Nodes that are 'dead' remain in the vector with refCount 0. +class Graph : public Vector<Node, 64> { +public: + // Mark a node as being referenced. + void ref(NodeIndex nodeIndex) + { + Node& node = at(nodeIndex); + // If the value (before incrementing) was at refCount zero then we need to ref its children. + if (!node.refCount++) + refChildren(nodeIndex); + } + void deref(NodeIndex nodeIndex) + { + Node& node = at(nodeIndex); + ASSERT(node.refCount); + // If the value (after decrementing) becomes refCount zero then we need to deref its children. + if (!--node.refCount) + derefChildren(nodeIndex); + } + +#ifndef NDEBUG + // CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names). + void dump(CodeBlock* = 0); + void dump(NodeIndex, CodeBlock* = 0); +#endif + + Vector<BasicBlock> m_blocks; + + BlockIndex blockIndexForBytecodeOffset(unsigned bytecodeBegin) + { + BasicBlock* begin = m_blocks.begin(); + BasicBlock* block = binarySearch<BasicBlock, unsigned, BasicBlock::getBytecodeBegin>(begin, m_blocks.size(), bytecodeBegin); + ASSERT(block >= m_blocks.begin() && block < m_blocks.end()); + return static_cast<BlockIndex>(block - begin); + } + +private: + // When a node's refCount goes from 0 to 1, it must (logically) recursively ref all of its children, and vice versa. + void refChildren(NodeIndex); + void derefChildren(NodeIndex); +}; + +} } // namespace JSC::DFG + +#endif +#endif diff --git a/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.cpp b/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.cpp new file mode 100644 index 0000000..52e0abe --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.cpp @@ -0,0 +1,558 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGJITCodeGenerator.h" + +#if ENABLE(DFG_JIT) + +#include "DFGNonSpeculativeJIT.h" +#include "DFGSpeculativeJIT.h" +#include "LinkBuffer.h" + +namespace JSC { namespace DFG { + +GPRReg JITCodeGenerator::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat) +{ + Node& node = m_jit.graph()[nodeIndex]; + VirtualRegister virtualRegister = node.virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + + if (info.registerFormat() == DataFormatNone) { + GPRReg gpr = allocate(); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + + if (node.isConstant()) { + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + if (isInt32Constant(nodeIndex)) { + m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), reg); + info.fillInteger(gpr); + returnFormat = DataFormatInteger; + return gpr; + } + if (isDoubleConstant(nodeIndex)) { + JSValue jsValue = jsNumber(valueOfDoubleConstant(nodeIndex)); + m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), reg); + } else { + ASSERT(isJSConstant(nodeIndex)); + JSValue jsValue = valueOfJSConstant(nodeIndex); + m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), reg); + } + } else { + ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger); + m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); + m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), reg); + } + + // Since we statically know that we're filling an integer, and values + // in the RegisterFile are boxed, this must be DataFormatJSInteger. + // We will check this with a jitAssert below. + info.fillJSValue(gpr, DataFormatJSInteger); + unlock(gpr); + } + + switch (info.registerFormat()) { + case DataFormatNone: + // Should have filled, above. + case DataFormatJSDouble: + case DataFormatDouble: + case DataFormatJS: + case DataFormatCell: + case DataFormatJSCell: + // Should only be calling this function if we know this operand to be integer. + ASSERT_NOT_REACHED(); + + case DataFormatJSInteger: { + GPRReg gpr = info.gpr(); + m_gprs.lock(gpr); + m_jit.jitAssertIsJSInt32(gpr); + returnFormat = DataFormatJSInteger; + return gpr; + } + + case DataFormatInteger: { + GPRReg gpr = info.gpr(); + m_gprs.lock(gpr); + m_jit.jitAssertIsInt32(gpr); + returnFormat = DataFormatInteger; + return gpr; + } + } + + ASSERT_NOT_REACHED(); + return InvalidGPRReg; +} + +FPRReg JITCodeGenerator::fillDouble(NodeIndex nodeIndex) +{ + Node& node = m_jit.graph()[nodeIndex]; + VirtualRegister virtualRegister = node.virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + + if (info.registerFormat() == DataFormatNone) { + GPRReg gpr = allocate(); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + + if (node.isConstant()) { + if (isInt32Constant(nodeIndex)) { + // FIXME: should not be reachable? + m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), reg); + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + info.fillInteger(gpr); + unlock(gpr); + } else if (isDoubleConstant(nodeIndex)) { + FPRReg fpr = fprAllocate(); + m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfDoubleConstant(nodeIndex)))), reg); + m_jit.movePtrToDouble(reg, JITCompiler::fprToRegisterID(fpr)); + unlock(gpr); + + m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); + info.fillDouble(fpr); + return fpr; + } else { + // FIXME: should not be reachable? + ASSERT(isJSConstant(nodeIndex)); + JSValue jsValue = valueOfJSConstant(nodeIndex); + m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), reg); + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + info.fillJSValue(gpr, DataFormatJS); + unlock(gpr); + } + } else { + DataFormat spillFormat = info.spillFormat(); + ASSERT(spillFormat & DataFormatJS); + m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); + m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), reg); + info.fillJSValue(gpr, m_isSpeculative ? spillFormat : DataFormatJS); + unlock(gpr); + } + } + + switch (info.registerFormat()) { + case DataFormatNone: + // Should have filled, above. + case DataFormatCell: + case DataFormatJSCell: + // Should only be calling this function if we know this operand to be numeric. + ASSERT_NOT_REACHED(); + + case DataFormatJS: { + GPRReg jsValueGpr = info.gpr(); + m_gprs.lock(jsValueGpr); + FPRReg fpr = fprAllocate(); + GPRReg tempGpr = allocate(); // FIXME: can we skip this allocation on the last use of the virtual register? + + JITCompiler::RegisterID jsValueReg = JITCompiler::gprToRegisterID(jsValueGpr); + JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr); + JITCompiler::RegisterID tempReg = JITCompiler::gprToRegisterID(tempGpr); + + JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueReg, JITCompiler::tagTypeNumberRegister); + + m_jit.jitAssertIsJSDouble(jsValueGpr); + + // First, if we get here we have a double encoded as a JSValue + m_jit.move(jsValueReg, tempReg); + m_jit.addPtr(JITCompiler::tagTypeNumberRegister, tempReg); + m_jit.movePtrToDouble(tempReg, fpReg); + JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); + + // Finally, handle integers. + isInteger.link(&m_jit); + m_jit.convertInt32ToDouble(jsValueReg, fpReg); + hasUnboxedDouble.link(&m_jit); + + m_gprs.release(jsValueGpr); + m_gprs.unlock(jsValueGpr); + m_gprs.unlock(tempGpr); + m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); + info.fillDouble(fpr); + return fpr; + } + + case DataFormatJSInteger: + case DataFormatInteger: { + FPRReg fpr = fprAllocate(); + GPRReg gpr = info.gpr(); + m_gprs.lock(gpr); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr); + + m_jit.convertInt32ToDouble(reg, fpReg); + + m_gprs.release(gpr); + m_gprs.unlock(gpr); + m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); + info.fillDouble(fpr); + return fpr; + } + + // Unbox the double + case DataFormatJSDouble: { + GPRReg gpr = info.gpr(); + FPRReg fpr = unboxDouble(gpr); + + m_gprs.release(gpr); + m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); + + info.fillDouble(fpr); + return fpr; + } + + case DataFormatDouble: { + FPRReg fpr = info.fpr(); + m_fprs.lock(fpr); + return fpr; + } + } + + ASSERT_NOT_REACHED(); + return InvalidFPRReg; +} + +GPRReg JITCodeGenerator::fillJSValue(NodeIndex nodeIndex) +{ + Node& node = m_jit.graph()[nodeIndex]; + VirtualRegister virtualRegister = node.virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + + switch (info.registerFormat()) { + case DataFormatNone: { + GPRReg gpr = allocate(); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + + if (node.isConstant()) { + if (isInt32Constant(nodeIndex)) { + info.fillJSValue(gpr, DataFormatJSInteger); + JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex)); + m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), reg); + } else if (isDoubleConstant(nodeIndex)) { + info.fillJSValue(gpr, DataFormatJSDouble); + JSValue jsValue(JSValue::EncodeAsDouble, valueOfDoubleConstant(nodeIndex)); + m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), reg); + } else { + ASSERT(isJSConstant(nodeIndex)); + JSValue jsValue = valueOfJSConstant(nodeIndex); + m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), reg); + info.fillJSValue(gpr, DataFormatJS); + } + + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + } else { + DataFormat spillFormat = info.spillFormat(); + ASSERT(spillFormat & DataFormatJS); + m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); + m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), reg); + info.fillJSValue(gpr, m_isSpeculative ? spillFormat : DataFormatJS); + } + return gpr; + } + + case DataFormatInteger: { + GPRReg gpr = info.gpr(); + // If the register has already been locked we need to take a copy. + // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger. + if (m_gprs.isLocked(gpr)) { + GPRReg result = allocate(); + m_jit.orPtr(JITCompiler::tagTypeNumberRegister, JITCompiler::gprToRegisterID(gpr), JITCompiler::gprToRegisterID(result)); + return result; + } + m_gprs.lock(gpr); + m_jit.orPtr(JITCompiler::tagTypeNumberRegister, JITCompiler::gprToRegisterID(gpr)); + info.fillJSValue(gpr, DataFormatJSInteger); + return gpr; + } + + case DataFormatDouble: { + FPRReg fpr = info.fpr(); + GPRReg gpr = boxDouble(fpr); + + // Update all info + info.fillJSValue(gpr, DataFormatJSDouble); + m_fprs.release(fpr); + m_gprs.retain(gpr, virtualRegister, SpillOrderJS); + + return gpr; + } + + case DataFormatCell: + // No retag required on JSVALUE64! + case DataFormatJS: + case DataFormatJSInteger: + case DataFormatJSDouble: + case DataFormatJSCell: { + GPRReg gpr = info.gpr(); + m_gprs.lock(gpr); + return gpr; + } + } + + ASSERT_NOT_REACHED(); + return InvalidGPRReg; +} + +void JITCodeGenerator::useChildren(Node& node) +{ + NodeIndex child1 = node.child1; + if (child1 == NoNode) { + ASSERT(node.child2 == NoNode && node.child3 == NoNode); + return; + } + use(child1); + + NodeIndex child2 = node.child2; + if (child2 == NoNode) { + ASSERT(node.child3 == NoNode); + return; + } + use(child2); + + NodeIndex child3 = node.child3; + if (child3 == NoNode) + return; + use(child3); +} + +#ifndef NDEBUG +static const char* dataFormatString(DataFormat format) +{ + // These values correspond to the DataFormat enum. + const char* strings[] = { + "[ ]", + "[ i]", + "[ d]", + "[ c]", + "Err!", + "Err!", + "Err!", + "Err!", + "[J ]", + "[Ji]", + "[Jd]", + "[Jc]", + "Err!", + "Err!", + "Err!", + "Err!", + }; + return strings[format]; +} + +void JITCodeGenerator::dump(const char* label) +{ + if (label) + fprintf(stderr, "<%s>\n", label); + + fprintf(stderr, " gprs:\n"); + m_gprs.dump(); + fprintf(stderr, " fprs:\n"); + m_fprs.dump(); + fprintf(stderr, " VirtualRegisters:\n"); + for (unsigned i = 0; i < m_generationInfo.size(); ++i) { + GenerationInfo& info = m_generationInfo[i]; + if (info.alive()) + fprintf(stderr, " % 3d:%s%s\n", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat())); + else + fprintf(stderr, " % 3d:[__][__]\n", i); + } + if (label) + fprintf(stderr, "</%s>\n", label); +} +#endif + + +#if DFG_CONSISTENCY_CHECK +void JITCodeGenerator::checkConsistency() +{ + VirtualRegister grpContents[numberOfGPRs]; + VirtualRegister frpContents[numberOfFPRs]; + + for (unsigned i = 0; i < numberOfGPRs; ++i) + grpContents[i] = InvalidVirtualRegister; + for (unsigned i = 0; i < numberOfFPRs; ++i) + frpContents[i] = InvalidVirtualRegister; + for (unsigned i = 0; i < m_generationInfo.size(); ++i) { + GenerationInfo& info = m_generationInfo[i]; + if (!info.alive()) + continue; + switch (info.registerFormat()) { + case DataFormatNone: + break; + case DataFormatInteger: + case DataFormatCell: + case DataFormatJS: + case DataFormatJSInteger: + case DataFormatJSDouble: + case DataFormatJSCell: { + GPRReg gpr = info.gpr(); + ASSERT(gpr != InvalidGPRReg); + grpContents[gpr] = (VirtualRegister)i; + break; + } + case DataFormatDouble: { + FPRReg fpr = info.fpr(); + ASSERT(fpr != InvalidFPRReg); + frpContents[fpr] = (VirtualRegister)i; + break; + } + } + } + + for (GPRReg i = gpr0; i < numberOfGPRs; next(i)) { + if (m_gprs.isLocked(i) || m_gprs.name(i) != grpContents[i]) { + dump(); + CRASH(); + } + } + for (FPRReg i = fpr0; i < numberOfFPRs; next(i)) { + if (m_fprs.isLocked(i) || m_fprs.name(i) != frpContents[i]) { + dump(); + CRASH(); + } + } +} +#endif + +GPRTemporary::GPRTemporary(JITCodeGenerator* jit) + : m_jit(jit) + , m_gpr(InvalidGPRReg) +{ + m_gpr = m_jit->allocate(); +} + +GPRTemporary::GPRTemporary(JITCodeGenerator* jit, SpeculateIntegerOperand& op1) + : m_jit(jit) + , m_gpr(InvalidGPRReg) +{ + // locking into a register may free for reuse! + op1.gpr(); + if (m_jit->canReuse(op1.index())) + m_gpr = m_jit->reuse(op1.gpr()); + else + m_gpr = m_jit->allocate(); +} + +GPRTemporary::GPRTemporary(JITCodeGenerator* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2) + : m_jit(jit) + , m_gpr(InvalidGPRReg) +{ + // locking into a register may free for reuse! + op1.gpr(); + op2.gpr(); + if (m_jit->canReuse(op1.index())) + m_gpr = m_jit->reuse(op1.gpr()); + else if (m_jit->canReuse(op2.index())) + m_gpr = m_jit->reuse(op2.gpr()); + else + m_gpr = m_jit->allocate(); +} + +GPRTemporary::GPRTemporary(JITCodeGenerator* jit, IntegerOperand& op1) + : m_jit(jit) + , m_gpr(InvalidGPRReg) +{ + // locking into a register may free for reuse! + op1.gpr(); + if (m_jit->canReuse(op1.index())) + m_gpr = m_jit->reuse(op1.gpr()); + else + m_gpr = m_jit->allocate(); +} + +GPRTemporary::GPRTemporary(JITCodeGenerator* jit, IntegerOperand& op1, IntegerOperand& op2) + : m_jit(jit) + , m_gpr(InvalidGPRReg) +{ + // locking into a register may free for reuse! + op1.gpr(); + op2.gpr(); + if (m_jit->canReuse(op1.index())) + m_gpr = m_jit->reuse(op1.gpr()); + else if (m_jit->canReuse(op2.index())) + m_gpr = m_jit->reuse(op2.gpr()); + else + m_gpr = m_jit->allocate(); +} + +GPRTemporary::GPRTemporary(JITCodeGenerator* jit, SpeculateCellOperand& op1) + : m_jit(jit) + , m_gpr(InvalidGPRReg) +{ + // locking into a register may free for reuse! + op1.gpr(); + if (m_jit->canReuse(op1.index())) + m_gpr = m_jit->reuse(op1.gpr()); + else + m_gpr = m_jit->allocate(); +} + +GPRTemporary::GPRTemporary(JITCodeGenerator* jit, JSValueOperand& op1) + : m_jit(jit) + , m_gpr(InvalidGPRReg) +{ + // locking into a register may free for reuse! + op1.gpr(); + if (m_jit->canReuse(op1.index())) + m_gpr = m_jit->reuse(op1.gpr()); + else + m_gpr = m_jit->allocate(); +} + +FPRTemporary::FPRTemporary(JITCodeGenerator* jit) + : m_jit(jit) + , m_fpr(InvalidFPRReg) +{ + m_fpr = m_jit->fprAllocate(); +} + +FPRTemporary::FPRTemporary(JITCodeGenerator* jit, DoubleOperand& op1) + : m_jit(jit) + , m_fpr(InvalidFPRReg) +{ + // locking into a register may free for reuse! + op1.fpr(); + if (m_jit->canReuse(op1.index())) + m_fpr = m_jit->reuse(op1.fpr()); + else + m_fpr = m_jit->fprAllocate(); +} + +FPRTemporary::FPRTemporary(JITCodeGenerator* jit, DoubleOperand& op1, DoubleOperand& op2) + : m_jit(jit) + , m_fpr(InvalidFPRReg) +{ + // locking into a register may free for reuse! + op1.fpr(); + op2.fpr(); + if (m_jit->canReuse(op1.index())) + m_fpr = m_jit->reuse(op1.fpr()); + else if (m_jit->canReuse(op2.index())) + m_fpr = m_jit->reuse(op2.fpr()); + else + m_fpr = m_jit->fprAllocate(); +} + +} } // namespace JSC::DFG + +#endif diff --git a/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.h b/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.h new file mode 100644 index 0000000..0abd3c7 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.h @@ -0,0 +1,998 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGJITCodeGenerator_h +#define DFGJITCodeGenerator_h + +#if ENABLE(DFG_JIT) + +#include "CodeBlock.h" +#include <dfg/DFGGenerationInfo.h> +#include <dfg/DFGGraph.h> +#include <dfg/DFGJITCompiler.h> +#include <dfg/DFGOperations.h> +#include <dfg/DFGRegisterBank.h> + +namespace JSC { namespace DFG { + +class SpeculateIntegerOperand; +class SpeculateStrictInt32Operand; +class SpeculateCellOperand; + + +// === JITCodeGenerator === +// +// This class provides common infrastructure used by the speculative & +// non-speculative JITs. Provides common mechanisms for virtual and +// physical register management, calls out from JIT code to helper +// functions, etc. +class JITCodeGenerator { +protected: + typedef MacroAssembler::TrustedImm32 TrustedImm32; + typedef MacroAssembler::Imm32 Imm32; + + // These constants are used to set priorities for spill order for + // the register allocator. + enum SpillOrder { + SpillOrderNone, + SpillOrderConstant = 1, // no spill, and cheap fill + SpillOrderSpilled = 2, // no spill + SpillOrderJS = 4, // needs spill + SpillOrderCell = 4, // needs spill + SpillOrderInteger = 5, // needs spill and box + SpillOrderDouble = 6, // needs spill and convert + SpillOrderMax + }; + + +public: + GPRReg fillInteger(NodeIndex, DataFormat& returnFormat); + FPRReg fillDouble(NodeIndex); + GPRReg fillJSValue(NodeIndex); + + // lock and unlock GPR & FPR registers. + void lock(GPRReg reg) + { + m_gprs.lock(reg); + } + void lock(FPRReg reg) + { + m_fprs.lock(reg); + } + void unlock(GPRReg reg) + { + m_gprs.unlock(reg); + } + void unlock(FPRReg reg) + { + m_fprs.unlock(reg); + } + + // Used to check whether a child node is on its last use, + // and its machine registers may be reused. + bool canReuse(NodeIndex nodeIndex) + { + VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + return info.canReuse(); + } + GPRReg reuse(GPRReg reg) + { + m_gprs.lock(reg); + return reg; + } + FPRReg reuse(FPRReg reg) + { + m_fprs.lock(reg); + return reg; + } + + // Allocate a gpr/fpr. + GPRReg allocate() + { + VirtualRegister spillMe; + GPRReg gpr = m_gprs.allocate(spillMe); + if (spillMe != InvalidVirtualRegister) + spill(spillMe); + return gpr; + } + FPRReg fprAllocate() + { + VirtualRegister spillMe; + FPRReg fpr = m_fprs.allocate(spillMe); + if (spillMe != InvalidVirtualRegister) + spill(spillMe); + return fpr; + } + + // Check whether a VirtualRegsiter is currently in a machine register. + // We use this when filling operands to fill those that are already in + // machine registers first (by locking VirtualRegsiters that are already + // in machine register before filling those that are not we attempt to + // avoid spilling values we will need immediately). + bool isFilled(NodeIndex nodeIndex) + { + VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + return info.registerFormat() != DataFormatNone; + } + bool isFilledDouble(NodeIndex nodeIndex) + { + VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + return info.registerFormat() == DataFormatDouble; + } + +protected: + JITCodeGenerator(JITCompiler& jit, bool isSpeculative) + : m_jit(jit) + , m_isSpeculative(isSpeculative) + , m_compileIndex(0) + , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters) + , m_blockHeads(jit.graph().m_blocks.size()) + { + } + + // These methods convert between doubles, and doubles boxed and JSValues. + GPRReg boxDouble(FPRReg fpr, GPRReg gpr) + { + JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + m_jit.moveDoubleToPtr(fpReg, reg); + m_jit.subPtr(JITCompiler::tagTypeNumberRegister, reg); + return gpr; + } + FPRReg unboxDouble(GPRReg gpr, FPRReg fpr) + { + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr); + m_jit.addPtr(JITCompiler::tagTypeNumberRegister, reg); + m_jit.movePtrToDouble(reg, fpReg); + return fpr; + } + GPRReg boxDouble(FPRReg fpr) + { + return boxDouble(fpr, allocate()); + } + FPRReg unboxDouble(GPRReg gpr) + { + return unboxDouble(gpr, fprAllocate()); + } + + // Called on an operand once it has been consumed by a parent node. + void use(NodeIndex nodeIndex) + { + VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + + // use() returns true when the value becomes dead, and any + // associated resources may be freed. + if (!info.use()) + return; + + // Release the associated machine registers. + DataFormat registerFormat = info.registerFormat(); + if (registerFormat == DataFormatDouble) + m_fprs.release(info.fpr()); + else if (registerFormat != DataFormatNone) + m_gprs.release(info.gpr()); + } + + // Spill a VirtualRegister to the RegisterFile. + void spill(VirtualRegister spillMe) + { + GenerationInfo& info = m_generationInfo[spillMe]; + + // Check the GenerationInfo to see if this value need writing + // to the RegisterFile - if not, mark it as spilled & return. + if (!info.needsSpill()) { + info.setSpilled(); + return; + } + + DataFormat spillFormat = info.registerFormat(); + if (spillFormat == DataFormatDouble) { + // All values are spilled as JSValues, so box the double via a temporary gpr. + GPRReg gpr = boxDouble(info.fpr()); + m_jit.storePtr(JITCompiler::gprToRegisterID(gpr), JITCompiler::addressFor(spillMe)); + unlock(gpr); + info.spill(DataFormatJSDouble); + return; + } + + // The following code handles JSValues, int32s, and cells. + ASSERT(spillFormat == DataFormatInteger || spillFormat == DataFormatCell || spillFormat & DataFormatJS); + + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(info.gpr()); + // We need to box int32 and cell values ... + // but on JSVALUE64 boxing a cell is a no-op! + if (spillFormat == DataFormatInteger) + m_jit.orPtr(JITCompiler::tagTypeNumberRegister, reg); + + // Spill the value, and record it as spilled in its boxed form. + m_jit.storePtr(reg, JITCompiler::addressFor(spillMe)); + info.spill((DataFormat)(spillFormat | DataFormatJS)); + } + + // Checks/accessors for constant values. + bool isConstant(NodeIndex nodeIndex) { return m_jit.isConstant(nodeIndex); } + bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.isInt32Constant(nodeIndex); } + bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.isDoubleConstant(nodeIndex); } + bool isJSConstant(NodeIndex nodeIndex) { return m_jit.isJSConstant(nodeIndex); } + int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); } + double valueOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.valueOfDoubleConstant(nodeIndex); } + JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.valueOfJSConstant(nodeIndex); } + + Identifier* identifier(unsigned index) + { + return &m_jit.codeBlock()->identifier(index); + } + + // Spill all VirtualRegisters back to the RegisterFile. + void flushRegisters() + { + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + VirtualRegister name = m_gprs.name(gpr); + if (name != InvalidVirtualRegister) { + spill(name); + m_gprs.release(gpr); + } + } + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + VirtualRegister name = m_fprs.name(fpr); + if (name != InvalidVirtualRegister) { + spill(name); + m_fprs.release(fpr); + } + } + } + +#ifndef NDEBUG + // Used to ASSERT flushRegisters() has been called prior to + // calling out from JIT code to a C helper function. + bool isFlushed() + { + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + VirtualRegister name = m_gprs.name(gpr); + if (name != InvalidVirtualRegister) + return false; + } + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + VirtualRegister name = m_fprs.name(fpr); + if (name != InvalidVirtualRegister) + return false; + } + return true; + } +#endif + + // Get the JSValue representation of a constant. + JSValue constantAsJSValue(NodeIndex nodeIndex) + { + Node& node = m_jit.graph()[nodeIndex]; + if (isInt32Constant(nodeIndex)) + return jsNumber(node.int32Constant()); + if (isDoubleConstant(nodeIndex)) + return JSValue(JSValue::EncodeAsDouble, node.numericConstant()); + ASSERT(isJSConstant(nodeIndex)); + return valueOfJSConstant(nodeIndex); + } + MacroAssembler::ImmPtr constantAsJSValueAsImmPtr(NodeIndex nodeIndex) + { + return MacroAssembler::ImmPtr(JSValue::encode(constantAsJSValue(nodeIndex))); + } + + // Helper functions to enable code sharing in implementations of bit/shift ops. + void bitOp(NodeType op, int32_t imm, MacroAssembler::RegisterID op1, MacroAssembler::RegisterID result) + { + switch (op) { + case BitAnd: + m_jit.and32(Imm32(imm), op1, result); + break; + case BitOr: + m_jit.or32(Imm32(imm), op1, result); + break; + case BitXor: + m_jit.xor32(Imm32(imm), op1, result); + break; + default: + ASSERT_NOT_REACHED(); + } + } + void bitOp(NodeType op, MacroAssembler::RegisterID op1, MacroAssembler::RegisterID op2, MacroAssembler::RegisterID result) + { + switch (op) { + case BitAnd: + m_jit.and32(op1, op2, result); + break; + case BitOr: + m_jit.or32(op1, op2, result); + break; + case BitXor: + m_jit.xor32(op1, op2, result); + break; + default: + ASSERT_NOT_REACHED(); + } + } + void shiftOp(NodeType op, MacroAssembler::RegisterID op1, int32_t shiftAmount, MacroAssembler::RegisterID result) + { + switch (op) { + case BitRShift: + m_jit.rshift32(op1, Imm32(shiftAmount), result); + break; + case BitLShift: + m_jit.lshift32(op1, Imm32(shiftAmount), result); + break; + case BitURShift: + m_jit.urshift32(op1, Imm32(shiftAmount), result); + break; + default: + ASSERT_NOT_REACHED(); + } + } + void shiftOp(NodeType op, MacroAssembler::RegisterID op1, MacroAssembler::RegisterID shiftAmount, MacroAssembler::RegisterID result) + { + switch (op) { + case BitRShift: + m_jit.rshift32(op1, shiftAmount, result); + break; + case BitLShift: + m_jit.lshift32(op1, shiftAmount, result); + break; + case BitURShift: + m_jit.urshift32(op1, shiftAmount, result); + break; + default: + ASSERT_NOT_REACHED(); + } + } + + // Called once a node has completed code generation but prior to setting + // its result, to free up its children. (This must happen prior to setting + // the nodes result, since the node may have the same VirtualRegister as + // a child, and as such will use the same GeneratioInfo). + void useChildren(Node&); + + // These method called to initialize the the GenerationInfo + // to describe the result of an operation. + void integerResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatInteger) + { + Node& node = m_jit.graph()[nodeIndex]; + useChildren(node); + + VirtualRegister virtualRegister = node.virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + + if (format == DataFormatInteger) { + m_jit.jitAssertIsInt32(reg); + m_gprs.retain(reg, virtualRegister, SpillOrderInteger); + info.initInteger(nodeIndex, node.refCount, reg); + } else { + ASSERT(format == DataFormatJSInteger); + m_jit.jitAssertIsJSInt32(reg); + m_gprs.retain(reg, virtualRegister, SpillOrderJS); + info.initJSValue(nodeIndex, node.refCount, reg, format); + } + } + void noResult(NodeIndex nodeIndex) + { + Node& node = m_jit.graph()[nodeIndex]; + useChildren(node); + + VirtualRegister virtualRegister = node.virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + info.initNone(nodeIndex, node.refCount); + } + void cellResult(GPRReg reg, NodeIndex nodeIndex) + { + Node& node = m_jit.graph()[nodeIndex]; + useChildren(node); + + VirtualRegister virtualRegister = node.virtualRegister; + m_gprs.retain(reg, virtualRegister, SpillOrderCell); + GenerationInfo& info = m_generationInfo[virtualRegister]; + info.initCell(nodeIndex, node.refCount, reg); + } + void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS) + { + if (format == DataFormatJSInteger) + m_jit.jitAssertIsJSInt32(reg); + + Node& node = m_jit.graph()[nodeIndex]; + useChildren(node); + + VirtualRegister virtualRegister = node.virtualRegister; + m_gprs.retain(reg, virtualRegister, SpillOrderJS); + GenerationInfo& info = m_generationInfo[virtualRegister]; + info.initJSValue(nodeIndex, node.refCount, reg, format); + } + void doubleResult(FPRReg reg, NodeIndex nodeIndex) + { + Node& node = m_jit.graph()[nodeIndex]; + useChildren(node); + + VirtualRegister virtualRegister = node.virtualRegister; + m_fprs.retain(reg, virtualRegister, SpillOrderDouble); + GenerationInfo& info = m_generationInfo[virtualRegister]; + info.initDouble(nodeIndex, node.refCount, reg); + } + void initConstantInfo(NodeIndex nodeIndex) + { + ASSERT(isInt32Constant(nodeIndex) || isDoubleConstant(nodeIndex) || isJSConstant(nodeIndex)); + Node& node = m_jit.graph()[nodeIndex]; + m_generationInfo[node.virtualRegister].initConstant(nodeIndex, node.refCount); + } + + // These methods used to sort arguments into the correct registers. + template<GPRReg destA, GPRReg destB> + void setupTwoStubArgs(GPRReg srcA, GPRReg srcB) + { + // Assuming that srcA != srcB, there are 7 interesting states the registers may be in: + // (1) both are already in arg regs, the right way around. + // (2) both are already in arg regs, the wrong way around. + // (3) neither are currently in arg registers. + // (4) srcA in in its correct reg. + // (5) srcA in in the incorrect reg. + // (6) srcB in in its correct reg. + // (7) srcB in in the incorrect reg. + // + // The trivial approach is to simply emit two moves, to put srcA in place then srcB in + // place (the MacroAssembler will omit redundant moves). This apporach will be safe in + // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2 + // (requires a swap) and 7 (must move srcB first, to avoid trampling.) + + if (srcB != destA) { + // Handle the easy cases - two simple moves. + m_jit.move(JITCompiler::gprToRegisterID(srcA), JITCompiler::gprToRegisterID(destA)); + m_jit.move(JITCompiler::gprToRegisterID(srcB), JITCompiler::gprToRegisterID(destB)); + } else if (srcA != destB) { + // Handle the non-swap case - just put srcB in place first. + m_jit.move(JITCompiler::gprToRegisterID(srcB), JITCompiler::gprToRegisterID(destB)); + m_jit.move(JITCompiler::gprToRegisterID(srcA), JITCompiler::gprToRegisterID(destA)); + } else + m_jit.swap(JITCompiler::gprToRegisterID(destB), JITCompiler::gprToRegisterID(destB)); + } + template<FPRReg destA, FPRReg destB> + void setupTwoStubArgs(FPRReg srcA, FPRReg srcB) + { + // Assuming that srcA != srcB, there are 7 interesting states the registers may be in: + // (1) both are already in arg regs, the right way around. + // (2) both are already in arg regs, the wrong way around. + // (3) neither are currently in arg registers. + // (4) srcA in in its correct reg. + // (5) srcA in in the incorrect reg. + // (6) srcB in in its correct reg. + // (7) srcB in in the incorrect reg. + // + // The trivial approach is to simply emit two moves, to put srcA in place then srcB in + // place (the MacroAssembler will omit redundant moves). This apporach will be safe in + // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2 + // (requires a swap) and 7 (must move srcB first, to avoid trampling.) + + if (srcB != destA) { + // Handle the easy cases - two simple moves. + m_jit.moveDouble(JITCompiler::fprToRegisterID(srcA), JITCompiler::fprToRegisterID(destA)); + m_jit.moveDouble(JITCompiler::fprToRegisterID(srcB), JITCompiler::fprToRegisterID(destB)); + return; + } + + if (srcA != destB) { + // Handle the non-swap case - just put srcB in place first. + m_jit.moveDouble(JITCompiler::fprToRegisterID(srcB), JITCompiler::fprToRegisterID(destB)); + m_jit.moveDouble(JITCompiler::fprToRegisterID(srcA), JITCompiler::fprToRegisterID(destA)); + return; + } + + ASSERT(srcB == destA && srcA == destB); + // Need to swap; pick a temporary register. + FPRReg temp; + if (destA != JITCompiler::argumentFPR3 && destA != JITCompiler::argumentFPR3) + temp = JITCompiler::argumentFPR3; + else if (destA != JITCompiler::argumentFPR2 && destA != JITCompiler::argumentFPR2) + temp = JITCompiler::argumentFPR2; + else { + ASSERT(destA != JITCompiler::argumentFPR1 && destA != JITCompiler::argumentFPR1); + temp = JITCompiler::argumentFPR1; + } + m_jit.moveDouble(JITCompiler::fprToRegisterID(destA), JITCompiler::fprToRegisterID(temp)); + m_jit.moveDouble(JITCompiler::fprToRegisterID(destB), JITCompiler::fprToRegisterID(destA)); + m_jit.moveDouble(JITCompiler::fprToRegisterID(temp), JITCompiler::fprToRegisterID(destB)); + } + void setupStubArguments(GPRReg arg1, GPRReg arg2) + { + setupTwoStubArgs<JITCompiler::argumentGPR1, JITCompiler::argumentGPR2>(arg1, arg2); + } + void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + // If neither of arg2/arg3 are in our way, then we can move arg1 into place. + // Then we can use setupTwoStubArgs to fix arg2/arg3. + if (arg2 != JITCompiler::argumentGPR1 && arg3 != JITCompiler::argumentGPR1) { + m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1); + setupTwoStubArgs<JITCompiler::argumentGPR2, JITCompiler::argumentGPR3>(arg2, arg3); + return; + } + + // If neither of arg1/arg3 are in our way, then we can move arg2 into place. + // Then we can use setupTwoStubArgs to fix arg1/arg3. + if (arg1 != JITCompiler::argumentGPR2 && arg3 != JITCompiler::argumentGPR2) { + m_jit.move(JITCompiler::gprToRegisterID(arg2), JITCompiler::argumentRegister2); + setupTwoStubArgs<JITCompiler::argumentGPR1, JITCompiler::argumentGPR3>(arg1, arg3); + return; + } + + // If neither of arg1/arg2 are in our way, then we can move arg3 into place. + // Then we can use setupTwoStubArgs to fix arg1/arg2. + if (arg1 != JITCompiler::argumentGPR3 && arg2 != JITCompiler::argumentGPR3) { + m_jit.move(JITCompiler::gprToRegisterID(arg3), JITCompiler::argumentRegister3); + setupTwoStubArgs<JITCompiler::argumentGPR1, JITCompiler::argumentGPR2>(arg1, arg2); + return; + } + + // If we get here, we haven't been able to move any of arg1/arg2/arg3. + // Since all three are blocked, then all three must already be in the argument register. + // But are they in the right ones? + + // First, ensure arg1 is in place. + if (arg1 != JITCompiler::argumentGPR1) { + m_jit.swap(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1); + + // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be. + ASSERT(arg2 == JITCompiler::argumentGPR1 || arg3 == JITCompiler::argumentGPR1); + // If arg2 was in argumentGPR1 it no longer is (due to the swap). + // Otherwise arg3 must have been. Mark him as moved. + if (arg2 == JITCompiler::argumentGPR1) + arg2 = arg1; + else + arg3 = arg1; + } + + // Either arg2 & arg3 need swapping, or we're all done. + ASSERT((arg2 == JITCompiler::argumentGPR2 || arg3 == JITCompiler::argumentGPR3) + || (arg2 == JITCompiler::argumentGPR3 || arg3 == JITCompiler::argumentGPR2)); + + if (arg2 != JITCompiler::argumentGPR2) + m_jit.swap(JITCompiler::argumentRegister2, JITCompiler::argumentRegister3); + } + + // These methods add calls to C++ helper functions. + void callOperation(J_DFGOperation_EJP operation, GPRReg result, GPRReg arg1, void* pointer) + { + ASSERT(isFlushed()); + + m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1); + m_jit.move(JITCompiler::TrustedImmPtr(pointer), JITCompiler::argumentRegister2); + m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0); + + appendCallWithExceptionCheck(operation); + m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result)); + } + void callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier) + { + callOperation((J_DFGOperation_EJP)operation, result, arg1, identifier); + } + void callOperation(J_DFGOperation_EJ operation, GPRReg result, GPRReg arg1) + { + ASSERT(isFlushed()); + + m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1); + m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0); + + appendCallWithExceptionCheck(operation); + m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result)); + } + void callOperation(Z_DFGOperation_EJ operation, GPRReg result, GPRReg arg1) + { + ASSERT(isFlushed()); + + m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1); + m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0); + + appendCallWithExceptionCheck(operation); + m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result)); + } + void callOperation(Z_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + ASSERT(isFlushed()); + + setupStubArguments(arg1, arg2); + m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0); + + appendCallWithExceptionCheck(operation); + m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result)); + } + void callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + ASSERT(isFlushed()); + + setupStubArguments(arg1, arg2); + m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0); + + appendCallWithExceptionCheck(operation); + m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result)); + } + void callOperation(V_DFGOperation_EJJP operation, GPRReg arg1, GPRReg arg2, void* pointer) + { + ASSERT(isFlushed()); + + setupStubArguments(arg1, arg2); + m_jit.move(JITCompiler::TrustedImmPtr(pointer), JITCompiler::argumentRegister3); + m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0); + + appendCallWithExceptionCheck(operation); + } + void callOperation(V_DFGOperation_EJJI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier) + { + callOperation((V_DFGOperation_EJJP)operation, arg1, arg2, identifier); + } + void callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + ASSERT(isFlushed()); + + setupStubArguments(arg1, arg2, arg3); + m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0); + + appendCallWithExceptionCheck(operation); + } + void callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2) + { + ASSERT(isFlushed()); + + setupTwoStubArgs<JITCompiler::argumentFPR0, JITCompiler::argumentFPR1>(arg1, arg2); + + m_jit.appendCall(operation); + m_jit.moveDouble(JITCompiler::fpReturnValueRegister, JITCompiler::fprToRegisterID(result)); + } + + void appendCallWithExceptionCheck(const FunctionPtr& function) + { + m_jit.appendCallWithExceptionCheck(function, m_jit.graph()[m_compileIndex].exceptionInfo); + } + + void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination) + { + m_branches.append(BranchRecord(jump, destination)); + } + + void linkBranches() + { + for (size_t i = 0; i < m_branches.size(); ++i) { + BranchRecord& branch = m_branches[i]; + branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit); + } + } + +#ifndef NDEBUG + void dump(const char* label = 0); +#endif + +#if DFG_CONSISTENCY_CHECK + void checkConsistency(); +#else + void checkConsistency() {} +#endif + + // The JIT, while also provides MacroAssembler functionality. + JITCompiler& m_jit; + // This flag is used to distinguish speculative and non-speculative + // code generation. This is significant when filling spilled values + // from the RegisterFile. When spilling we attempt to store information + // as to the type of boxed value being stored (int32, double, cell), and + // when filling on the speculative path we will retrieve this type info + // where available. On the non-speculative path, however, we cannot rely + // on the spill format info, since the a value being loaded might have + // been spilled by either the speculative or non-speculative paths (where + // we entered the non-speculative path on an intervening bail-out), and + // the value may have been boxed differently on the two paths. + bool m_isSpeculative; + // The current node being generated. + BlockIndex m_block; + NodeIndex m_compileIndex; + // Virtual and physical register maps. + Vector<GenerationInfo, 32> m_generationInfo; + RegisterBank<GPRReg, numberOfGPRs, SpillOrder, SpillOrderNone, SpillOrderMax> m_gprs; + RegisterBank<FPRReg, numberOfFPRs, SpillOrder, SpillOrderNone, SpillOrderMax> m_fprs; + + Vector<MacroAssembler::Label> m_blockHeads; + struct BranchRecord { + BranchRecord(MacroAssembler::Jump jump, BlockIndex destination) + : jump(jump) + , destination(destination) + { + } + + MacroAssembler::Jump jump; + BlockIndex destination; + }; + Vector<BranchRecord, 8> m_branches; +}; + +// === Operand types === +// +// IntegerOperand, DoubleOperand and JSValueOperand. +// +// These classes are used to lock the operands to a node into machine +// registers. These classes implement of pattern of locking a value +// into register at the point of construction only if it is already in +// registers, and otherwise loading it lazily at the point it is first +// used. We do so in order to attempt to avoid spilling one operand +// in order to make space available for another. + +class IntegerOperand { +public: + explicit IntegerOperand(JITCodeGenerator* jit, NodeIndex index) + : m_jit(jit) + , m_index(index) + , m_gprOrInvalid(InvalidGPRReg) +#ifndef NDEBUG + , m_format(DataFormatNone) +#endif + { + ASSERT(m_jit); + if (jit->isFilled(index)) + gpr(); + } + + ~IntegerOperand() + { + ASSERT(m_gprOrInvalid != InvalidGPRReg); + m_jit->unlock(m_gprOrInvalid); + } + + NodeIndex index() const + { + return m_index; + } + + GPRReg gpr() + { + if (m_gprOrInvalid == InvalidGPRReg) + m_gprOrInvalid = m_jit->fillInteger(index(), m_format); + return m_gprOrInvalid; + } + + DataFormat format() + { + gpr(); // m_format is set when m_gpr is locked. + ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger); + return m_format; + } + + MacroAssembler::RegisterID registerID() + { + return JITCompiler::gprToRegisterID(gpr()); + } + +private: + JITCodeGenerator* m_jit; + NodeIndex m_index; + GPRReg m_gprOrInvalid; + DataFormat m_format; +}; + +class DoubleOperand { +public: + explicit DoubleOperand(JITCodeGenerator* jit, NodeIndex index) + : m_jit(jit) + , m_index(index) + , m_fprOrInvalid(InvalidFPRReg) + { + ASSERT(m_jit); + if (jit->isFilledDouble(index)) + fpr(); + } + + ~DoubleOperand() + { + ASSERT(m_fprOrInvalid != InvalidFPRReg); + m_jit->unlock(m_fprOrInvalid); + } + + NodeIndex index() const + { + return m_index; + } + + FPRReg fpr() + { + if (m_fprOrInvalid == InvalidFPRReg) + m_fprOrInvalid = m_jit->fillDouble(index()); + return m_fprOrInvalid; + } + + MacroAssembler::FPRegisterID registerID() + { + return JITCompiler::fprToRegisterID(fpr()); + } + +private: + JITCodeGenerator* m_jit; + NodeIndex m_index; + FPRReg m_fprOrInvalid; +}; + +class JSValueOperand { +public: + explicit JSValueOperand(JITCodeGenerator* jit, NodeIndex index) + : m_jit(jit) + , m_index(index) + , m_gprOrInvalid(InvalidGPRReg) + { + ASSERT(m_jit); + if (jit->isFilled(index)) + gpr(); + } + + ~JSValueOperand() + { + ASSERT(m_gprOrInvalid != InvalidGPRReg); + m_jit->unlock(m_gprOrInvalid); + } + + NodeIndex index() const + { + return m_index; + } + + GPRReg gpr() + { + if (m_gprOrInvalid == InvalidGPRReg) + m_gprOrInvalid = m_jit->fillJSValue(index()); + return m_gprOrInvalid; + } + + MacroAssembler::RegisterID registerID() + { + return JITCompiler::gprToRegisterID(gpr()); + } + +private: + JITCodeGenerator* m_jit; + NodeIndex m_index; + GPRReg m_gprOrInvalid; +}; + + +// === Temporaries === +// +// These classes are used to allocate temporary registers. +// A mechanism is provided to attempt to reuse the registers +// currently allocated to child nodes whose value is consumed +// by, and not live after, this operation. + +class GPRTemporary { +public: + GPRTemporary(JITCodeGenerator*); + GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&); + GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&, SpeculateIntegerOperand&); + GPRTemporary(JITCodeGenerator*, IntegerOperand&); + GPRTemporary(JITCodeGenerator*, IntegerOperand&, IntegerOperand&); + GPRTemporary(JITCodeGenerator*, SpeculateCellOperand&); + GPRTemporary(JITCodeGenerator*, JSValueOperand&); + + ~GPRTemporary() + { + m_jit->unlock(gpr()); + } + + GPRReg gpr() const + { + ASSERT(m_gpr != InvalidGPRReg); + return m_gpr; + } + + MacroAssembler::RegisterID registerID() + { + ASSERT(m_gpr != InvalidGPRReg); + return JITCompiler::gprToRegisterID(m_gpr); + } + +protected: + GPRTemporary(JITCodeGenerator* jit, GPRReg lockedGPR) + : m_jit(jit) + , m_gpr(lockedGPR) + { + } + +private: + JITCodeGenerator* m_jit; + GPRReg m_gpr; +}; + +class FPRTemporary { +public: + FPRTemporary(JITCodeGenerator*); + FPRTemporary(JITCodeGenerator*, DoubleOperand&); + FPRTemporary(JITCodeGenerator*, DoubleOperand&, DoubleOperand&); + + ~FPRTemporary() + { + m_jit->unlock(fpr()); + } + + FPRReg fpr() const + { + ASSERT(m_fpr != InvalidFPRReg); + return m_fpr; + } + + MacroAssembler::FPRegisterID registerID() + { + ASSERT(m_fpr != InvalidFPRReg); + return JITCompiler::fprToRegisterID(m_fpr); + } + +protected: + FPRTemporary(JITCodeGenerator* jit, FPRReg lockedFPR) + : m_jit(jit) + , m_fpr(lockedFPR) + { + } + +private: + JITCodeGenerator* m_jit; + FPRReg m_fpr; +}; + + +// === Results === +// +// These classes lock the result of a call to a C++ helper function. + +class GPRResult : public GPRTemporary { +public: + GPRResult(JITCodeGenerator* jit) + : GPRTemporary(jit, lockedResult(jit)) + { + } + +private: + static GPRReg lockedResult(JITCodeGenerator* jit) + { + jit->lock(JITCompiler::returnValueGPR); + return JITCompiler::returnValueGPR; + } +}; + +class FPRResult : public FPRTemporary { +public: + FPRResult(JITCodeGenerator* jit) + : FPRTemporary(jit, lockedResult(jit)) + { + } + +private: + static FPRReg lockedResult(JITCodeGenerator* jit) + { + jit->lock(JITCompiler::returnValueFPR); + return JITCompiler::returnValueFPR; + } +}; + +} } // namespace JSC::DFG + +#endif +#endif + diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp new file mode 100644 index 0000000..5cd044a --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp @@ -0,0 +1,433 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGJITCompiler.h" + +#if ENABLE(DFG_JIT) + +#include "CodeBlock.h" +#include "DFGJITCodeGenerator.h" +#include "DFGNonSpeculativeJIT.h" +#include "DFGOperations.h" +#include "DFGRegisterBank.h" +#include "DFGSpeculativeJIT.h" +#include "JSGlobalData.h" +#include "LinkBuffer.h" + +namespace JSC { namespace DFG { + +// This method used to fill a numeric value to a FPR when linking speculative -> non-speculative. +void JITCompiler::fillNumericToDouble(NodeIndex nodeIndex, FPRReg fpr, GPRReg temporary) +{ + Node& node = graph()[nodeIndex]; + MacroAssembler::RegisterID tempReg = gprToRegisterID(temporary); + + if (node.isConstant()) { + ASSERT(node.op == DoubleConstant); + move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfDoubleConstant(nodeIndex)))), tempReg); + movePtrToDouble(tempReg, fprToRegisterID(fpr)); + } else { + loadPtr(addressFor(node.virtualRegister), tempReg); + Jump isInteger = branchPtr(MacroAssembler::AboveOrEqual, tempReg, tagTypeNumberRegister); + jitAssertIsJSDouble(gpr0); + addPtr(tagTypeNumberRegister, tempReg); + movePtrToDouble(tempReg, fprToRegisterID(fpr)); + Jump hasUnboxedDouble = jump(); + isInteger.link(this); + convertInt32ToDouble(tempReg, fprToRegisterID(fpr)); + hasUnboxedDouble.link(this); + } +} + +// This method used to fill an integer value to a GPR when linking speculative -> non-speculative. +void JITCompiler::fillInt32ToInteger(NodeIndex nodeIndex, GPRReg gpr) +{ + Node& node = graph()[nodeIndex]; + + if (node.isConstant()) { + ASSERT(node.op == Int32Constant); + move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gprToRegisterID(gpr)); + } else { +#if DFG_JIT_ASSERT + // Redundant load, just so we can check the tag! + loadPtr(addressFor(node.virtualRegister), gprToRegisterID(gpr)); + jitAssertIsJSInt32(gpr); +#endif + load32(addressFor(node.virtualRegister), gprToRegisterID(gpr)); + } +} + +// This method used to fill a JSValue to a GPR when linking speculative -> non-speculative. +void JITCompiler::fillToJS(NodeIndex nodeIndex, GPRReg gpr) +{ + Node& node = graph()[nodeIndex]; + + if (node.isConstant()) { + if (isInt32Constant(nodeIndex)) { + JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex)); + move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gprToRegisterID(gpr)); + } else if (isDoubleConstant(nodeIndex)) { + JSValue jsValue(JSValue::EncodeAsDouble, valueOfDoubleConstant(nodeIndex)); + move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gprToRegisterID(gpr)); + } else { + ASSERT(isJSConstant(nodeIndex)); + JSValue jsValue = valueOfJSConstant(nodeIndex); + move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gprToRegisterID(gpr)); + } + return; + } + + loadPtr(addressFor(node.virtualRegister), gprToRegisterID(gpr)); +} + +void JITCompiler::jumpFromSpeculativeToNonSpeculative(const SpeculationCheck& check, const EntryLocation& entry, SpeculationRecovery* recovery) +{ + ASSERT(check.m_nodeIndex == entry.m_nodeIndex); + + // Link the jump from the Speculative path to here. + check.m_check.link(this); + + // Does this speculation check require any additional recovery to be performed, + // to restore any state that has been overwritten before we enter back in to the + // non-speculative path. + if (recovery) { + // The only additional recovery we currently support is for integer add operation + ASSERT(recovery->type() == SpeculativeAdd); + // Revert the add. + sub32(gprToRegisterID(recovery->src()), gprToRegisterID(recovery->dest())); + } + + // FIXME: - This is hideously inefficient! + // Where a value is live in a register in the speculative path, and is required in a register + // on the non-speculative path, we should not need to be spilling it and reloading (we may + // need to spill anyway, if the value is marked as spilled on the non-speculative path). + // This may also be spilling values that don't need spilling, e.g. are already spilled, + // are constants, or are arguments. + + // Spill all GPRs in use by the speculative path. + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + NodeIndex nodeIndex = check.m_gprInfo[gpr].nodeIndex; + if (nodeIndex == NoNode) + continue; + + DataFormat dataFormat = check.m_gprInfo[gpr].format; + VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister; + + ASSERT(dataFormat == DataFormatInteger || DataFormatCell || dataFormat & DataFormatJS); + if (dataFormat == DataFormatInteger) + orPtr(tagTypeNumberRegister, gprToRegisterID(gpr)); + storePtr(gprToRegisterID(gpr), addressFor(virtualRegister)); + } + + // Spill all FPRs in use by the speculative path. + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + NodeIndex nodeIndex = check.m_fprInfo[fpr]; + if (nodeIndex == NoNode) + continue; + + VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister; + + moveDoubleToPtr(fprToRegisterID(fpr), regT0); + subPtr(tagTypeNumberRegister, regT0); + storePtr(regT0, addressFor(virtualRegister)); + } + + // Fill all FPRs in use by the non-speculative path. + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + NodeIndex nodeIndex = entry.m_fprInfo[fpr]; + if (nodeIndex == NoNode) + continue; + + fillNumericToDouble(nodeIndex, fpr, gpr0); + } + + // Fill all GPRs in use by the non-speculative path. + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + NodeIndex nodeIndex = entry.m_gprInfo[gpr].nodeIndex; + if (nodeIndex == NoNode) + continue; + + DataFormat dataFormat = entry.m_gprInfo[gpr].format; + if (dataFormat == DataFormatInteger) + fillInt32ToInteger(nodeIndex, gpr); + else { + ASSERT(dataFormat & DataFormatJS || dataFormat == DataFormatCell); // Treat cell as JSValue for now! + fillToJS(nodeIndex, gpr); + // FIXME: For subtypes of DataFormatJS, should jitAssert the subtype? + } + } + + // Jump into the non-speculative path. + jump(entry.m_entry); +} + +void JITCompiler::linkSpeculationChecks(SpeculativeJIT& speculative, NonSpeculativeJIT& nonSpeculative) +{ + // Iterators to walk over the set of bail outs & corresponding entry points. + SpeculationCheckVector::Iterator checksIter = speculative.speculationChecks().begin(); + SpeculationCheckVector::Iterator checksEnd = speculative.speculationChecks().end(); + NonSpeculativeJIT::EntryLocationVector::Iterator entriesIter = nonSpeculative.entryLocations().begin(); + NonSpeculativeJIT::EntryLocationVector::Iterator entriesEnd = nonSpeculative.entryLocations().end(); + + // Iterate over the speculation checks. + while (checksIter != checksEnd) { + // For every bail out from the speculative path, we must have provided an entry point + // into the non-speculative one. + ASSERT(checksIter->m_nodeIndex == entriesIter->m_nodeIndex); + + // There may be multiple bail outs that map to the same entry point! + do { + ASSERT(checksIter != checksEnd); + ASSERT(entriesIter != entriesEnd); + + // Plant code to link this speculation failure. + const SpeculationCheck& check = *checksIter; + const EntryLocation& entry = *entriesIter; + jumpFromSpeculativeToNonSpeculative(check, entry, speculative.speculationRecovery(check.m_recoveryIndex)); + ++checksIter; + } while (checksIter != checksEnd && checksIter->m_nodeIndex == entriesIter->m_nodeIndex); + ++entriesIter; + } + + // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56289 + ASSERT(!(checksIter != checksEnd)); + ASSERT(!(entriesIter != entriesEnd)); +} + +void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck) +{ + // === Stage 1 - Function header code generation === + // + // This code currently matches the old JIT. In the function header we need to + // pop the return address (since we do not allow any recursion on the machine + // stack), and perform a fast register file check. + + // This is the main entry point, without performing an arity check. + // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292 + // We'll need to convert the remaining cti_ style calls (specifically the register file + // check) which will be dependent on stack layout. (We'd need to account for this in + // both normal return code and when jumping to an exception handler). + preserveReturnAddressAfterCall(regT2); + emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); + // If we needed to perform an arity check we will already have moved the return address, + // so enter after this. + Label fromArityCheck(this); + + // Setup a pointer to the codeblock in the CallFrameHeader. + emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); + + // Plant a check that sufficient space is available in the RegisterFile. + // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 + addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); + Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1); + // Return here after register file check. + Label fromRegisterFileCheck = label(); + + + // === Stage 2 - Function body code generation === + // + // We generate the speculative code path, followed by the non-speculative + // code for the function. Next we need to link the two together, making + // bail-outs from the speculative path jump to the corresponding point on + // the non-speculative one (and generating any code necessary to juggle + // register values around, rebox values, and ensure spilled, to match the + // non-speculative path's requirements). + +#if DFG_JIT_BREAK_ON_EVERY_FUNCTION + // Handy debug tool! + breakpoint(); +#endif + + // First generate the speculative path. + Label speculativePathBegin = label(); + SpeculativeJIT speculative(*this); + bool compiledSpeculative = speculative.compile(); + + // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator + // to allow it to check which nodes in the graph may bail out, and may need to reenter the + // non-speculative path. + if (compiledSpeculative) { + SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks()); + NonSpeculativeJIT nonSpeculative(*this); + nonSpeculative.compile(checkIterator); + + // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one. + linkSpeculationChecks(speculative, nonSpeculative); + } else { + // If compilation through the SpeculativeJIT failed, throw away the code we generated. + m_calls.clear(); + rewindToLabel(speculativePathBegin); + + SpeculationCheckVector noChecks; + SpeculationCheckIndexIterator checkIterator(noChecks); + NonSpeculativeJIT nonSpeculative(*this); + nonSpeculative.compile(checkIterator); + } + + // === Stage 3 - Function footer code generation === + // + // Generate code to lookup and jump to exception handlers, to perform the slow + // register file check (if the fast one in the function header fails), and + // generate the entry point with arity check. + + // Iterate over the m_calls vector, checking for exception checks, + // and linking them to here. + unsigned exceptionCheckCount = 0; + for (unsigned i = 0; i < m_calls.size(); ++i) { + Jump& exceptionCheck = m_calls[i].m_exceptionCheck; + if (exceptionCheck.isSet()) { + exceptionCheck.link(this); + ++exceptionCheckCount; + } + } + // If any exception checks were linked, generate code to lookup a handler. + if (exceptionCheckCount) { + // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and + // an identifier for the operation that threw the exception, which we can use + // to look up handler information. The identifier we use is the return address + // of the call out from JIT code that threw the exception; this is still + // available on the stack, just below the stack pointer! + move(callFrameRegister, argumentRegister0); + peek(argumentRegister1, -1); + m_calls.append(CallRecord(call(), lookupExceptionHandler)); + // lookupExceptionHandler leaves the handler CallFrame* in the returnValueRegister, + // and the address of the handler in returnValueRegister2. + jump(returnValueRegister2); + } + + // Generate the register file check; if the fast check in the function head fails, + // we need to call out to a helper function to check whether more space is available. + // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). + registerFileCheck.link(this); + move(stackPointerRegister, argumentRegister0); + poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); + Call callRegisterFileCheck = call(); + jump(fromRegisterFileCheck); + + // The fast entry point into a function does not check the correct number of arguments + // have been passed to the call (we only use the fast entry point where we can statically + // determine the correct number of arguments have been passed, or have already checked). + // In cases where an arity check is necessary, we enter here. + // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). + Label arityCheck = label(); + preserveReturnAddressAfterCall(regT2); + emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); + branch32(Equal, regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this); + move(stackPointerRegister, argumentRegister0); + poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); + Call callArityCheck = call(); + move(regT0, callFrameRegister); + jump(fromArityCheck); + + + // === Stage 4 - Link === + // + // Link the code, populate data in CodeBlock data structures. + + LinkBuffer linkBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), 0); + +#if DFG_DEBUG_VERBOSE + fprintf(stderr, "JIT code start at %p\n", linkBuffer.debugAddress()); +#endif + + // Link all calls out from the JIT code to their respective functions. + for (unsigned i = 0; i < m_calls.size(); ++i) + linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); + + if (m_codeBlock->needsCallReturnIndices()) { + m_codeBlock->callReturnIndexVector().reserveCapacity(exceptionCheckCount); + for (unsigned i = 0; i < m_calls.size(); ++i) { + if (m_calls[i].m_exceptionCheck.isSet()) { + unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call); + unsigned exceptionInfo = m_calls[i].m_exceptionInfo; + m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo)); + } + } + } + + // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs. + linkBuffer.link(callRegisterFileCheck, cti_register_file_check); + linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck); + + entryWithArityCheck = linkBuffer.locationOf(arityCheck); + entry = linkBuffer.finalizeCode(); +} + +#if DFG_JIT_ASSERT +void JITCompiler::jitAssertIsInt32(GPRReg gpr) +{ +#if CPU(X86_64) + Jump checkInt32 = branchPtr(BelowOrEqual, gprToRegisterID(gpr), TrustedImmPtr(reinterpret_cast<void*>(static_cast<uintptr_t>(0xFFFFFFFFu)))); + breakpoint(); + checkInt32.link(this); +#else + UNUSED_PARAM(gpr); +#endif +} + +void JITCompiler::jitAssertIsJSInt32(GPRReg gpr) +{ + Jump checkJSInt32 = branchPtr(AboveOrEqual, gprToRegisterID(gpr), tagTypeNumberRegister); + breakpoint(); + checkJSInt32.link(this); +} + +void JITCompiler::jitAssertIsJSNumber(GPRReg gpr) +{ + Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gprToRegisterID(gpr), tagTypeNumberRegister); + breakpoint(); + checkJSNumber.link(this); +} + +void JITCompiler::jitAssertIsJSDouble(GPRReg gpr) +{ + Jump checkJSInt32 = branchPtr(AboveOrEqual, gprToRegisterID(gpr), tagTypeNumberRegister); + Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gprToRegisterID(gpr), tagTypeNumberRegister); + checkJSInt32.link(this); + breakpoint(); + checkJSNumber.link(this); +} +#endif + +#if ENABLE(SAMPLING_COUNTERS) && CPU(X86_64) // Or any other 64-bit platform! +void JITCompiler::emitCount(AbstractSamplingCounter& counter, uint32_t increment) +{ + addPtr(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter())); +} +#endif + +#if ENABLE(SAMPLING_COUNTERS) && CPU(X86) // Or any other little-endian 32-bit platform! +void JITCompiler::emitCount(AbstractSamplingCounter& counter, uint32_t increment) +{ + intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t); + add32(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter())); + addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord))); +} +#endif + +} } // namespace JSC::DFG + +#endif diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h new file mode 100644 index 0000000..03ae2b8 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h @@ -0,0 +1,329 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGJITCompiler_h +#define DFGJITCompiler_h + +#if ENABLE(DFG_JIT) + +#include <assembler/MacroAssembler.h> +#include <bytecode/CodeBlock.h> +#include <dfg/DFGGraph.h> +#include <jit/JITCode.h> + +namespace JSC { + +class AbstractSamplingCounter; +class CodeBlock; +class JSGlobalData; + +namespace DFG { + +class JITCodeGenerator; +class NonSpeculativeJIT; +class SpeculativeJIT; +class SpeculationRecovery; + +struct EntryLocation; +struct SpeculationCheck; + +// Abstracted sequential numbering of available machine registers (as opposed to MacroAssembler::RegisterID, +// which are non-sequential, and not abstracted from the register numbering used by the underlying processor). +enum GPRReg { gpr0, gpr1, gpr2, gpr3, gpr4, gpr5, numberOfGPRs, InvalidGPRReg = 0xFFFFFFFF }; +enum FPRReg { fpr0, fpr1, fpr2, fpr3, fpr4, fpr5, numberOfFPRs, InvalidFPRReg = 0xFFFFFFFF }; + +// GPRReg/FPRReg are enum types to provide type checking at compile time, use these method to iterate. +inline GPRReg next(GPRReg& reg) +{ + ASSERT(reg < numberOfGPRs); + return reg = static_cast<GPRReg>(reg + 1); +} +inline FPRReg next(FPRReg& reg) +{ + ASSERT(reg < numberOfFPRs); + return reg = static_cast<FPRReg>(reg + 1); +} + +// === CallRecord === +// +// A record of a call out from JIT code to a helper function. +// Every CallRecord contains a reference to the call instruction & the function +// that it needs to be linked to. Calls that might throw an exception also record +// the Jump taken on exception (unset if not present), and ExceptionInfo (presently +// an unsigned, bytecode index) used to recover handler/source info. +struct CallRecord { + // Constructor for a call with no exception handler. + CallRecord(MacroAssembler::Call call, FunctionPtr function) + : m_call(call) + , m_function(function) + { + } + + // Constructor for a call with an exception handler. + CallRecord(MacroAssembler::Call call, FunctionPtr function, MacroAssembler::Jump exceptionCheck, ExceptionInfo exceptionInfo) + : m_call(call) + , m_function(function) + , m_exceptionCheck(exceptionCheck) + , m_exceptionInfo(exceptionInfo) + { + } + + MacroAssembler::Call m_call; + FunctionPtr m_function; + MacroAssembler::Jump m_exceptionCheck; + ExceptionInfo m_exceptionInfo; +}; + +// === JITCompiler === +// +// DFG::JITCompiler is responsible for generating JIT code from the dataflow graph. +// It does so by delegating to the speculative & non-speculative JITs, which +// generate to a MacroAssembler (which the JITCompiler owns through an inheritance +// relationship). The JITCompiler holds references to information required during +// compilation, and also records information used in linking (e.g. a list of all +// call to be linked). +class JITCompiler : public MacroAssembler { +public: + JITCompiler(JSGlobalData* globalData, Graph& dfg, CodeBlock* codeBlock) + : m_globalData(globalData) + , m_graph(dfg) + , m_codeBlock(codeBlock) + { + } + + void compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck); + + // Accessors for properties. + Graph& graph() { return m_graph; } + CodeBlock* codeBlock() { return m_codeBlock; } + JSGlobalData* globalData() { return m_globalData; } + +#if CPU(X86_64) + // These registers match the old JIT. + static const RegisterID timeoutCheckRegister = X86Registers::r12; + static const RegisterID callFrameRegister = X86Registers::r13; + static const RegisterID tagTypeNumberRegister = X86Registers::r14; + static const RegisterID tagMaskRegister = X86Registers::r15; + + // Temporary registers (these correspond to the temporary GPRReg/FPRReg + // registers i.e. regT0 and grp0 refer to the same thing, grp0 being + // the abstracted, sequential name, and regT0 being the machine register + // number in the instruction set, as provided by the MacroAssembler). + static const RegisterID regT0 = X86Registers::eax; + static const RegisterID regT1 = X86Registers::edx; + static const RegisterID regT2 = X86Registers::ecx; + static const RegisterID regT3 = X86Registers::ebx; + static const RegisterID regT4 = X86Registers::edi; + static const RegisterID regT5 = X86Registers::esi; + static const FPRegisterID fpRegT0 = X86Registers::xmm0; + static const FPRegisterID fpRegT1 = X86Registers::xmm1; + static const FPRegisterID fpRegT2 = X86Registers::xmm2; + static const FPRegisterID fpRegT3 = X86Registers::xmm3; + static const FPRegisterID fpRegT4 = X86Registers::xmm4; + static const FPRegisterID fpRegT5 = X86Registers::xmm5; + + // These constants provide both RegisterID & GPRReg style names for the + // general purpose argument & return value register. + static const GPRReg argumentGPR0 = gpr4; + static const GPRReg argumentGPR1 = gpr5; + static const GPRReg argumentGPR2 = gpr1; + static const GPRReg argumentGPR3 = gpr2; + static const RegisterID argumentRegister0 = regT4; + static const RegisterID argumentRegister1 = regT5; + static const RegisterID argumentRegister2 = regT1; + static const RegisterID argumentRegister3 = regT2; + static const GPRReg returnValueGPR = gpr0; + static const RegisterID returnValueRegister = regT0; + static const RegisterID returnValueRegister2 = regT1; + + // These constants provide both FPRegisterID & FPRReg style names for the + // floating point argument & return value register. + static const FPRReg argumentFPR0 = fpr0; + static const FPRReg argumentFPR1 = fpr1; + static const FPRReg argumentFPR2 = fpr2; + static const FPRReg argumentFPR3 = fpr3; + static const FPRegisterID fpArgumentRegister0 = fpRegT0; + static const FPRegisterID fpArgumentRegister1 = fpRegT1; + static const FPRegisterID fpArgumentRegister2 = fpRegT2; + static const FPRegisterID fpArgumentRegister3 = fpRegT3; + static const FPRReg returnValueFPR = fpr0; + static const FPRegisterID fpReturnValueRegister = fpRegT0; + + + void preserveReturnAddressAfterCall(RegisterID reg) + { + pop(reg); + } + + void restoreReturnAddressBeforeReturn(RegisterID reg) + { + push(reg); + } + + void restoreReturnAddressBeforeReturn(Address address) + { + push(address); + } + + void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to) + { + loadPtr(Address(callFrameRegister, entry * sizeof(Register)), to); + } + void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry) + { + storePtr(from, Address(callFrameRegister, entry * sizeof(Register))); + } + + void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry) + { + storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register))); + } +#endif + + Address addressForArgument(int32_t argument) + { + return Address(callFrameRegister, (argument - (m_codeBlock->m_numParameters + RegisterFile::CallFrameHeaderSize)) * sizeof(Register)); + } + + static Address addressForGlobalVar(RegisterID global, int32_t varNumber) + { + return Address(global, varNumber * sizeof(Register)); + } + + static Address addressFor(VirtualRegister virtualRegister) + { + return Address(callFrameRegister, virtualRegister * sizeof(Register)); + } + + // These methods provide mapping from sequential register numbering (GPRReg/FPRReg) + // to machine register numbering (RegisterID/FPRegisterID). + static RegisterID gprToRegisterID(GPRReg reg) + { + ASSERT(reg < numberOfGPRs); + static const RegisterID idForRegister[numberOfGPRs] = { regT0, regT1, regT2, regT3, regT4, regT5 }; + return idForRegister[reg]; + } + static FPRegisterID fprToRegisterID(FPRReg reg) + { + ASSERT(reg < numberOfFPRs); + static const FPRegisterID idForRegister[numberOfFPRs] = { fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5 }; + return idForRegister[reg]; + } + + // Add a call out from JIT code, without an exception check. + void appendCall(const FunctionPtr& function) + { + m_calls.append(CallRecord(call(), function)); + // FIXME: should be able to JIT_ASSERT here that globalData->exception is null on return back to JIT code. + } + + // Add a call out from JIT code, with an exception check. + void appendCallWithExceptionCheck(const FunctionPtr& function, unsigned exceptionInfo) + { + Call functionCall = call(); + Jump exceptionCheck = branchTestPtr(NonZero, AbsoluteAddress(&globalData()->exception)); + m_calls.append(CallRecord(functionCall, function, exceptionCheck, exceptionInfo)); + } + + // Helper methods to check nodes for constants. + bool isConstant(NodeIndex nodeIndex) + { + return graph()[nodeIndex].isConstant(); + } + bool isInt32Constant(NodeIndex nodeIndex) + { + return graph()[nodeIndex].op == Int32Constant; + } + bool isDoubleConstant(NodeIndex nodeIndex) + { + return graph()[nodeIndex].op == DoubleConstant; + } + bool isJSConstant(NodeIndex nodeIndex) + { + return graph()[nodeIndex].op == JSConstant; + } + + // Helper methods get constant values from nodes. + int32_t valueOfInt32Constant(NodeIndex nodeIndex) + { + ASSERT(isInt32Constant(nodeIndex)); + return graph()[nodeIndex].int32Constant(); + } + double valueOfDoubleConstant(NodeIndex nodeIndex) + { + ASSERT(isDoubleConstant(nodeIndex)); + return graph()[nodeIndex].numericConstant(); + } + JSValue valueOfJSConstant(NodeIndex nodeIndex) + { + ASSERT(isJSConstant(nodeIndex)); + unsigned constantIndex = graph()[nodeIndex].constantNumber(); + return codeBlock()->constantRegister(FirstConstantRegisterIndex + constantIndex).get(); + } + + // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs. +#if DFG_JIT_ASSERT + void jitAssertIsInt32(GPRReg); + void jitAssertIsJSInt32(GPRReg); + void jitAssertIsJSNumber(GPRReg); + void jitAssertIsJSDouble(GPRReg); +#else + void jitAssertIsInt32(GPRReg) {} + void jitAssertIsJSInt32(GPRReg) {} + void jitAssertIsJSNumber(GPRReg) {} + void jitAssertIsJSDouble(GPRReg) {} +#endif + +#if ENABLE(SAMPLING_COUNTERS) + // Debug profiling tool. + void emitCount(AbstractSamplingCounter&, uint32_t increment = 1); +#endif + +private: + // These methods used in linking the speculative & non-speculative paths together. + void fillNumericToDouble(NodeIndex, FPRReg, GPRReg temporary); + void fillInt32ToInteger(NodeIndex, GPRReg); + void fillToJS(NodeIndex, GPRReg); + void jumpFromSpeculativeToNonSpeculative(const SpeculationCheck&, const EntryLocation&, SpeculationRecovery*); + void linkSpeculationChecks(SpeculativeJIT&, NonSpeculativeJIT&); + + // The globalData, used to access constants such as the vPtrs. + JSGlobalData* m_globalData; + + // The dataflow graph currently being generated. + Graph& m_graph; + + // The codeBlock currently being generated, used to access information such as constant values, immediates. + CodeBlock* m_codeBlock; + + // Vector of calls out from JIT code, including exception handler information. + Vector<CallRecord> m_calls; +}; + +} } // namespace JSC::DFG + +#endif +#endif + diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h new file mode 100644 index 0000000..2a5b6dd --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGNode.h @@ -0,0 +1,355 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGNode_h +#define DFGNode_h + +// Emit various logging information for debugging, including dumping the dataflow graphs. +#define DFG_DEBUG_VERBOSE 0 +// Enable generation of dynamic checks into the instruction stream. +#define DFG_JIT_ASSERT 0 +// Consistency check contents compiler data structures. +#define DFG_CONSISTENCY_CHECK 0 +// Emit a breakpoint into the head of every generated function, to aid debugging in GDB. +#define DFG_JIT_BREAK_ON_EVERY_FUNCTION 0 +// Emit a breakpoint into the head of every generated node, to aid debugging in GDB. +#define DFG_JIT_BREAK_ON_EVERY_BLOCK 0 +// Emit a breakpoint into the head of every generated node, to aid debugging in GDB. +#define DFG_JIT_BREAK_ON_EVERY_NODE 0 +// Disable the DFG JIT without having to touch Platform.h! +#define DFG_DEBUG_LOCAL_DISBALE 0 +// Generate stats on how successful we were in making use of the DFG jit, and remaining on the hot path. +#define DFG_SUCCESS_STATS 0 + + +#if ENABLE(DFG_JIT) + +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +// Type for a virtual register number (spill location). +// Using an enum to make this type-checked at compile time, to avert programmer errors. +enum VirtualRegister { InvalidVirtualRegister = -1 }; +COMPILE_ASSERT(sizeof(VirtualRegister) == sizeof(int), VirtualRegister_is_32bit); + +// Type for a reference to another node in the graph. +typedef uint32_t NodeIndex; +static const NodeIndex NoNode = UINT_MAX; + +// Information used to map back from an exception to any handler/source information. +// (Presently implemented as a bytecode index). +typedef uint32_t ExceptionInfo; + +// Entries in the NodeType enum (below) are composed of an id, a result type (possibly none) +// and some additional informative flags (must generate, is constant, etc). +#define NodeIdMask 0xFFF +#define NodeResultMask 0xF000 +#define NodeMustGenerate 0x10000 // set on nodes that have side effects, and may not trivially be removed by DCE. +#define NodeIsConstant 0x20000 +#define NodeIsJump 0x40000 +#define NodeIsBranch 0x80000 + +// These values record the result type of the node (as checked by NodeResultMask, above), 0 for no result. +#define NodeResultJS 0x1000 +#define NodeResultDouble 0x2000 +#define NodeResultInt32 0x3000 + +// This macro defines a set of information about all known node types, used to populate NodeId, NodeType below. +#define FOR_EACH_DFG_OP(macro) \ + /* Nodes for constants. */\ + macro(JSConstant, NodeResultJS | NodeIsConstant) \ + macro(Int32Constant, NodeResultJS | NodeIsConstant) \ + macro(DoubleConstant, NodeResultJS | NodeIsConstant) \ + macro(ConvertThis, NodeResultJS) \ + \ + /* Nodes for local variable access. */\ + macro(GetLocal, NodeResultJS) \ + macro(SetLocal, NodeMustGenerate) \ + \ + /* Nodes for bitwise operations. */\ + macro(BitAnd, NodeResultInt32) \ + macro(BitOr, NodeResultInt32) \ + macro(BitXor, NodeResultInt32) \ + macro(BitLShift, NodeResultInt32) \ + macro(BitRShift, NodeResultInt32) \ + macro(BitURShift, NodeResultInt32) \ + /* Bitwise operators call ToInt32 on their operands. */\ + macro(NumberToInt32, NodeResultInt32) \ + macro(ValueToInt32, NodeResultInt32 | NodeMustGenerate) \ + /* Used to box the result of URShift nodes (result has range 0..2^32-1). */\ + macro(UInt32ToNumber, NodeResultDouble) \ + \ + /* Nodes for arithmetic operations. */\ + macro(ArithAdd, NodeResultDouble) \ + macro(ArithSub, NodeResultDouble) \ + macro(ArithMul, NodeResultDouble) \ + macro(ArithDiv, NodeResultDouble) \ + macro(ArithMod, NodeResultDouble) \ + /* Arithmetic operators call ToNumber on their operands. */\ + macro(Int32ToNumber, NodeResultDouble) \ + macro(ValueToNumber, NodeResultDouble | NodeMustGenerate) \ + \ + /* Add of values may either be arithmetic, or result in string concatenation. */\ + macro(ValueAdd, NodeResultJS | NodeMustGenerate) \ + \ + /* Property access. */\ + /* PutByValAlias indicates a 'put' aliases a prior write to the same property. */\ + /* Since a put to 'length' may invalidate optimizations here, */\ + /* this must be the directly subsequent property put. */\ + macro(GetByVal, NodeResultJS | NodeMustGenerate) \ + macro(PutByVal, NodeMustGenerate) \ + macro(PutByValAlias, NodeMustGenerate) \ + macro(GetById, NodeResultJS | NodeMustGenerate) \ + macro(PutById, NodeMustGenerate) \ + macro(PutByIdDirect, NodeMustGenerate) \ + macro(GetGlobalVar, NodeResultJS | NodeMustGenerate) \ + macro(PutGlobalVar, NodeMustGenerate) \ + \ + /* Nodes for comparison operations. */\ + macro(CompareLess, NodeResultJS | NodeMustGenerate) \ + macro(CompareLessEq, NodeResultJS | NodeMustGenerate) \ + macro(CompareEq, NodeResultJS | NodeMustGenerate) \ + macro(CompareStrictEq, NodeResultJS) \ + \ + /* Nodes for misc operations. */\ + macro(LogicalNot, NodeResultJS) \ + \ + /* Block terminals. */\ + macro(Jump, NodeMustGenerate | NodeIsJump) \ + macro(Branch, NodeMustGenerate | NodeIsBranch) \ + macro(Return, NodeMustGenerate) + +// This enum generates a monotonically increasing id for all Node types, +// and is used by the subsequent enum to fill out the id (as accessed via the NodeIdMask). +enum NodeId { +#define DFG_OP_ENUM(opcode, flags) opcode##_id, + FOR_EACH_DFG_OP(DFG_OP_ENUM) +#undef DFG_OP_ENUM +}; + +// Entries in this enum describe all Node types. +// The enum value contains a monotonically increasing id, a result type, and additional flags. +enum NodeType { +#define DFG_OP_ENUM(opcode, flags) opcode = opcode##_id | (flags), + FOR_EACH_DFG_OP(DFG_OP_ENUM) +#undef DFG_OP_ENUM +}; + +// This type used in passing an immediate argument to Node constructor; +// distinguishes an immediate value (typically an index into a CodeBlock data structure - +// a constant index, argument, or identifier) from a NodeIndex. +struct OpInfo { + explicit OpInfo(unsigned value) : m_value(value) {} + unsigned m_value; +}; + +// === Node === +// +// Node represents a single operation in the data flow graph. +struct Node { + // Construct a node with up to 3 children, no immediate value. + Node(NodeType op, ExceptionInfo exceptionInfo, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) + : op(op) + , exceptionInfo(exceptionInfo) + , child1(child1) + , child2(child2) + , child3(child3) + , virtualRegister(InvalidVirtualRegister) + , refCount(0) + { + } + + // Construct a node with up to 3 children and an immediate value. + Node(NodeType op, ExceptionInfo exceptionInfo, OpInfo imm, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) + : op(op) + , exceptionInfo(exceptionInfo) + , child1(child1) + , child2(child2) + , child3(child3) + , virtualRegister(InvalidVirtualRegister) + , refCount(0) + , m_opInfo(imm.m_value) + { + } + + // Construct a node with up to 3 children and two immediate values. + Node(NodeType op, ExceptionInfo exceptionInfo, OpInfo imm1, OpInfo imm2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) + : op(op) + , exceptionInfo(exceptionInfo) + , child1(child1) + , child2(child2) + , child3(child3) + , virtualRegister(InvalidVirtualRegister) + , refCount(0) + , m_opInfo(imm1.m_value) + { + m_constantValue.opInfo2 = imm2.m_value; + } + + bool mustGenerate() + { + return op & NodeMustGenerate; + } + + bool isConstant() + { + return op & NodeIsConstant; + } + + unsigned constantNumber() + { + ASSERT(isConstant()); + return m_opInfo; + } + + bool hasLocal() + { + return op == GetLocal || op == SetLocal; + } + + VirtualRegister local() + { + ASSERT(hasLocal()); + return (VirtualRegister)m_opInfo; + } + + bool hasIdentifier() + { + return op == GetById || op == PutById || op == PutByIdDirect; + } + + unsigned identifierNumber() + { + ASSERT(hasIdentifier()); + return m_opInfo; + } + + bool hasVarNumber() + { + return op == GetGlobalVar || op == PutGlobalVar; + } + + unsigned varNumber() + { + ASSERT(hasVarNumber()); + return m_opInfo; + } + + bool hasInt32Result() + { + return (op & NodeResultMask) == NodeResultInt32; + } + + bool hasDoubleResult() + { + return (op & NodeResultMask) == NodeResultDouble; + } + + bool hasJSResult() + { + return (op & NodeResultMask) == NodeResultJS; + } + + // Check for integers or doubles. + bool hasNumericResult() + { + // This check will need updating if more result types are added. + ASSERT((hasInt32Result() || hasDoubleResult()) == !hasJSResult()); + return !hasJSResult(); + } + + int32_t int32Constant() + { + ASSERT(op == Int32Constant); + return m_constantValue.asInt32; + } + + void setInt32Constant(int32_t value) + { + ASSERT(op == Int32Constant); + m_constantValue.asInt32 = value; + } + + double numericConstant() + { + ASSERT(op == DoubleConstant); + return m_constantValue.asDouble; + } + + void setDoubleConstant(double value) + { + ASSERT(op == DoubleConstant); + m_constantValue.asDouble = value; + } + + bool isJump() + { + return op & NodeIsJump; + } + + bool isBranch() + { + return op & NodeIsBranch; + } + + unsigned takenBytecodeOffset() + { + ASSERT(isBranch() || isJump()); + return m_opInfo; + } + + unsigned notTakenBytecodeOffset() + { + ASSERT(isBranch()); + return m_constantValue.opInfo2; + } + + // This enum value describes the type of the node. + NodeType op; + // Used to look up exception handling information (currently implemented as a bytecode index). + ExceptionInfo exceptionInfo; + // References to up to 3 children (0 for no child). + NodeIndex child1, child2, child3; + // The virtual register number (spill location) associated with this . + VirtualRegister virtualRegister; + // The number of uses of the result of this operation (+1 for 'must generate' nodes, which have side-effects). + unsigned refCount; + +private: + // An immediate value, accesses type-checked via accessors above. + unsigned m_opInfo; + // The value of an int32/double constant. + union { + int32_t asInt32; + double asDouble; + unsigned opInfo2; + } m_constantValue; +}; + +} } // namespace JSC::DFG + +#endif +#endif diff --git a/Source/JavaScriptCore/dfg/DFGNonSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGNonSpeculativeJIT.cpp new file mode 100644 index 0000000..87c4234 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGNonSpeculativeJIT.cpp @@ -0,0 +1,689 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGNonSpeculativeJIT.h" + +#include "DFGSpeculativeJIT.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +const double twoToThe32 = (double)0x100000000ull; + +EntryLocation::EntryLocation(MacroAssembler::Label entry, NonSpeculativeJIT* jit) + : m_entry(entry) + , m_nodeIndex(jit->m_compileIndex) +{ + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + VirtualRegister virtualRegister = jit->m_gprs.name(gpr); + if (virtualRegister != InvalidVirtualRegister) { + GenerationInfo& info = jit->m_generationInfo[virtualRegister]; + m_gprInfo[gpr].nodeIndex = info.nodeIndex(); + m_gprInfo[gpr].format = info.registerFormat(); + } else + m_gprInfo[gpr].nodeIndex = NoNode; + } + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + VirtualRegister virtualRegister = jit->m_fprs.name(fpr); + if (virtualRegister != InvalidVirtualRegister) { + GenerationInfo& info = jit->m_generationInfo[virtualRegister]; + ASSERT(info.registerFormat() == DataFormatDouble); + m_fprInfo[fpr] = info.nodeIndex(); + } else + m_fprInfo[fpr] = NoNode; + } +} + +void NonSpeculativeJIT::valueToNumber(JSValueOperand& operand, FPRReg fpr) +{ + GPRReg jsValueGpr = operand.gpr(); + GPRReg tempGpr = allocate(); // FIXME: can we skip this allocation on the last use of the virtual register? + + JITCompiler::RegisterID jsValueReg = JITCompiler::gprToRegisterID(jsValueGpr); + JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr); + JITCompiler::RegisterID tempReg = JITCompiler::gprToRegisterID(tempGpr); + + JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueReg, JITCompiler::tagTypeNumberRegister); + JITCompiler::Jump nonNumeric = m_jit.branchTestPtr(MacroAssembler::Zero, jsValueReg, JITCompiler::tagTypeNumberRegister); + + // First, if we get here we have a double encoded as a JSValue + m_jit.move(jsValueReg, tempReg); + m_jit.addPtr(JITCompiler::tagTypeNumberRegister, tempReg); + m_jit.movePtrToDouble(tempReg, fpReg); + JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); + + // Next handle cells (& other JS immediates) + nonNumeric.link(&m_jit); + silentSpillAllRegisters(fpr, jsValueGpr); + m_jit.move(jsValueReg, JITCompiler::argumentRegister1); + m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0); + appendCallWithExceptionCheck(dfgConvertJSValueToNumber); + m_jit.moveDouble(JITCompiler::fpReturnValueRegister, fpReg); + silentFillAllRegisters(fpr); + JITCompiler::Jump hasCalledToNumber = m_jit.jump(); + + // Finally, handle integers. + isInteger.link(&m_jit); + m_jit.convertInt32ToDouble(jsValueReg, fpReg); + hasUnboxedDouble.link(&m_jit); + hasCalledToNumber.link(&m_jit); + + m_gprs.unlock(tempGpr); +} + +void NonSpeculativeJIT::valueToInt32(JSValueOperand& operand, GPRReg result) +{ + GPRReg jsValueGpr = operand.gpr(); + + JITCompiler::RegisterID jsValueReg = JITCompiler::gprToRegisterID(jsValueGpr); + JITCompiler::RegisterID resultReg = JITCompiler::gprToRegisterID(result); + + JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueReg, JITCompiler::tagTypeNumberRegister); + + // First handle non-integers + silentSpillAllRegisters(result, jsValueGpr); + m_jit.move(jsValueReg, JITCompiler::argumentRegister1); + m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0); + appendCallWithExceptionCheck(dfgConvertJSValueToInt32); + m_jit.zeroExtend32ToPtr(JITCompiler::returnValueRegister, resultReg); + silentFillAllRegisters(result); + JITCompiler::Jump hasCalledToInt32 = m_jit.jump(); + + // Then handle integers. + isInteger.link(&m_jit); + m_jit.zeroExtend32ToPtr(jsValueReg, resultReg); + hasCalledToInt32.link(&m_jit); +} + +void NonSpeculativeJIT::numberToInt32(FPRReg fpr, GPRReg gpr) +{ + JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + + JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpReg, reg, JITCompiler::BranchIfTruncateSuccessful); + + silentSpillAllRegisters(gpr); + + m_jit.moveDouble(fpReg, JITCompiler::fpArgumentRegister0); + appendCallWithExceptionCheck(toInt32); + m_jit.zeroExtend32ToPtr(JITCompiler::returnValueRegister, reg); + + silentFillAllRegisters(gpr); + + truncatedToInteger.link(&m_jit); +} + +bool NonSpeculativeJIT::isKnownInteger(NodeIndex nodeIndex) +{ + GenerationInfo& info = m_generationInfo[m_jit.graph()[nodeIndex].virtualRegister]; + + DataFormat registerFormat = info.registerFormat(); + if (registerFormat != DataFormatNone) + return (registerFormat | DataFormatJS) == DataFormatJSInteger; + + DataFormat spillFormat = info.spillFormat(); + if (spillFormat != DataFormatNone) + return (spillFormat | DataFormatJS) == DataFormatJSInteger; + + ASSERT(isConstant(nodeIndex)); + return isInt32Constant(nodeIndex); +} + +bool NonSpeculativeJIT::isKnownNumeric(NodeIndex nodeIndex) +{ + GenerationInfo& info = m_generationInfo[m_jit.graph()[nodeIndex].virtualRegister]; + + DataFormat registerFormat = info.registerFormat(); + if (registerFormat != DataFormatNone) + return (registerFormat | DataFormatJS) == DataFormatJSInteger + || (registerFormat | DataFormatJS) == DataFormatJSDouble; + + DataFormat spillFormat = info.spillFormat(); + if (spillFormat != DataFormatNone) + return (spillFormat | DataFormatJS) == DataFormatJSInteger + || (spillFormat | DataFormatJS) == DataFormatJSDouble; + + ASSERT(isConstant(nodeIndex)); + return isInt32Constant(nodeIndex) || isDoubleConstant(nodeIndex); +} + +void NonSpeculativeJIT::compile(SpeculationCheckIndexIterator& checkIterator, Node& node) +{ + // ... + if (checkIterator.hasCheckAtIndex(m_compileIndex)) + trackEntry(m_jit.label()); + + checkConsistency(); + NodeType op = node.op; + + switch (op) { + case ConvertThis: { + JSValueOperand thisValue(this, node.child1); + GPRReg thisGPR = thisValue.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(operationConvertThis, result.gpr(), thisGPR); + cellResult(result.gpr(), m_compileIndex); + break; + } + + case Int32Constant: + case DoubleConstant: + case JSConstant: + initConstantInfo(m_compileIndex); + break; + + case GetLocal: { + GPRTemporary result(this); + m_jit.loadPtr(JITCompiler::addressFor(node.local()), result.registerID()); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case SetLocal: { + JSValueOperand value(this, node.child1); + m_jit.storePtr(value.registerID(), JITCompiler::addressFor(node.local())); + noResult(m_compileIndex); + break; + } + + case BitAnd: + case BitOr: + case BitXor: + if (isInt32Constant(node.child1)) { + IntegerOperand op2(this, node.child2); + GPRTemporary result(this, op2); + + bitOp(op, valueOfInt32Constant(node.child1), op2.registerID(), result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } else if (isInt32Constant(node.child2)) { + IntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + + bitOp(op, valueOfInt32Constant(node.child2), op1.registerID(), result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } else { + IntegerOperand op1(this, node.child1); + IntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1, op2); + + MacroAssembler::RegisterID reg1 = op1.registerID(); + MacroAssembler::RegisterID reg2 = op2.registerID(); + bitOp(op, reg1, reg2, result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } + break; + + case BitRShift: + case BitLShift: + case BitURShift: + if (isInt32Constant(node.child2)) { + IntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + + int shiftAmount = valueOfInt32Constant(node.child2) & 0x1f; + // Shifts by zero should have been optimized out of the graph! + ASSERT(shiftAmount); + shiftOp(op, op1.registerID(), shiftAmount, result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } else { + // Do not allow shift amount to be used as the result, MacroAssembler does not permit this. + IntegerOperand op1(this, node.child1); + IntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1); + + MacroAssembler::RegisterID reg1 = op1.registerID(); + MacroAssembler::RegisterID reg2 = op2.registerID(); + shiftOp(op, reg1, reg2, result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } + break; + + case UInt32ToNumber: { + IntegerOperand op1(this, node.child1); + FPRTemporary result(this); + m_jit.convertInt32ToDouble(op1.registerID(), result.registerID()); + + MacroAssembler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.registerID(), TrustedImm32(0)); + m_jit.addDouble(JITCompiler::AbsoluteAddress(&twoToThe32), result.registerID()); + positive.link(&m_jit); + + doubleResult(result.fpr(), m_compileIndex); + break; + } + + case Int32ToNumber: { + IntegerOperand op1(this, node.child1); + FPRTemporary result(this); + m_jit.convertInt32ToDouble(op1.registerID(), result.registerID()); + doubleResult(result.fpr(), m_compileIndex); + break; + } + + case NumberToInt32: + case ValueToInt32: { + ASSERT(!isInt32Constant(node.child1)); + + if (isKnownInteger(node.child1)) { + IntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + m_jit.move(op1.registerID(), result.registerID()); + integerResult(result.gpr(), m_compileIndex); + break; + } + + if (isKnownNumeric(node.child1)) { + DoubleOperand op1(this, node.child1); + GPRTemporary result(this); + numberToInt32(op1.fpr(), result.gpr()); + integerResult(result.gpr(), m_compileIndex); + break; + } + + // We should have handled this via isKnownInteger, or isKnownNumeric! + ASSERT(op != NumberToInt32); + + JSValueOperand op1(this, node.child1); + GPRTemporary result(this, op1); + valueToInt32(op1, result.gpr()); + integerResult(result.gpr(), m_compileIndex); + break; + } + + case ValueToNumber: { + ASSERT(!isInt32Constant(node.child1)); + ASSERT(!isDoubleConstant(node.child1)); + + if (isKnownInteger(node.child1)) { + IntegerOperand op1(this, node.child1); + FPRTemporary result(this); + m_jit.convertInt32ToDouble(op1.registerID(), result.registerID()); + doubleResult(result.fpr(), m_compileIndex); + break; + } + + if (isKnownNumeric(node.child1)) { + DoubleOperand op1(this, node.child1); + FPRTemporary result(this, op1); + m_jit.moveDouble(op1.registerID(), result.registerID()); + doubleResult(result.fpr(), m_compileIndex); + break; + } + + JSValueOperand op1(this, node.child1); + FPRTemporary result(this); + valueToNumber(op1, result.fpr()); + doubleResult(result.fpr(), m_compileIndex); + break; + } + + case ValueAdd: { + JSValueOperand arg1(this, node.child1); + JSValueOperand arg2(this, node.child2); + GPRReg arg1GPR = arg1.gpr(); + GPRReg arg2GPR = arg2.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(operationValueAdd, result.gpr(), arg1GPR, arg2GPR); + + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case ArithAdd: { + DoubleOperand op1(this, node.child1); + DoubleOperand op2(this, node.child2); + FPRTemporary result(this, op1, op2); + + MacroAssembler::FPRegisterID reg1 = op1.registerID(); + MacroAssembler::FPRegisterID reg2 = op2.registerID(); + m_jit.addDouble(reg1, reg2, result.registerID()); + + doubleResult(result.fpr(), m_compileIndex); + break; + } + + case ArithSub: { + DoubleOperand op1(this, node.child1); + DoubleOperand op2(this, node.child2); + FPRTemporary result(this, op1); + + MacroAssembler::FPRegisterID reg1 = op1.registerID(); + MacroAssembler::FPRegisterID reg2 = op2.registerID(); + m_jit.subDouble(reg1, reg2, result.registerID()); + + doubleResult(result.fpr(), m_compileIndex); + break; + } + + case ArithMul: { + DoubleOperand op1(this, node.child1); + DoubleOperand op2(this, node.child2); + FPRTemporary result(this, op1, op2); + + MacroAssembler::FPRegisterID reg1 = op1.registerID(); + MacroAssembler::FPRegisterID reg2 = op2.registerID(); + m_jit.mulDouble(reg1, reg2, result.registerID()); + + doubleResult(result.fpr(), m_compileIndex); + break; + } + + case ArithDiv: { + DoubleOperand op1(this, node.child1); + DoubleOperand op2(this, node.child2); + FPRTemporary result(this, op1); + + MacroAssembler::FPRegisterID reg1 = op1.registerID(); + MacroAssembler::FPRegisterID reg2 = op2.registerID(); + m_jit.divDouble(reg1, reg2, result.registerID()); + + doubleResult(result.fpr(), m_compileIndex); + break; + } + + case ArithMod: { + DoubleOperand arg1(this, node.child1); + DoubleOperand arg2(this, node.child2); + FPRReg arg1FPR = arg1.fpr(); + FPRReg arg2FPR = arg2.fpr(); + flushRegisters(); + + FPRResult result(this); + callOperation(fmod, result.fpr(), arg1FPR, arg2FPR); + + doubleResult(result.fpr(), m_compileIndex); + break; + } + + case LogicalNot: { + JSValueOperand arg1(this, node.child1); + GPRReg arg1GPR = arg1.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(dfgConvertJSValueToBoolean, result.gpr(), arg1GPR); + + // If we add a DataFormatBool, we should use it here. + m_jit.xor32(TrustedImm32(ValueTrue), result.registerID()); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case CompareLess: { + JSValueOperand arg1(this, node.child1); + JSValueOperand arg2(this, node.child2); + GPRReg arg1GPR = arg1.gpr(); + GPRReg arg2GPR = arg2.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(operationCompareLess, result.gpr(), arg1GPR, arg2GPR); + m_jit.or32(TrustedImm32(ValueFalse), result.registerID()); + + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case CompareLessEq: { + JSValueOperand arg1(this, node.child1); + JSValueOperand arg2(this, node.child2); + GPRReg arg1GPR = arg1.gpr(); + GPRReg arg2GPR = arg2.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(operationCompareLessEq, result.gpr(), arg1GPR, arg2GPR); + m_jit.or32(TrustedImm32(ValueFalse), result.registerID()); + + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case CompareEq: { + JSValueOperand arg1(this, node.child1); + JSValueOperand arg2(this, node.child2); + GPRReg arg1GPR = arg1.gpr(); + GPRReg arg2GPR = arg2.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(operationCompareEq, result.gpr(), arg1GPR, arg2GPR); + m_jit.or32(TrustedImm32(ValueFalse), result.registerID()); + + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case CompareStrictEq: { + JSValueOperand arg1(this, node.child1); + JSValueOperand arg2(this, node.child2); + GPRReg arg1GPR = arg1.gpr(); + GPRReg arg2GPR = arg2.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(operationCompareStrictEq, result.gpr(), arg1GPR, arg2GPR); + m_jit.or32(TrustedImm32(ValueFalse), result.registerID()); + + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case GetByVal: { + JSValueOperand arg1(this, node.child1); + JSValueOperand arg2(this, node.child2); + GPRReg arg1GPR = arg1.gpr(); + GPRReg arg2GPR = arg2.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(operationGetByVal, result.gpr(), arg1GPR, arg2GPR); + + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case PutByVal: + case PutByValAlias: { + JSValueOperand arg1(this, node.child1); + JSValueOperand arg2(this, node.child2); + JSValueOperand arg3(this, node.child3); + GPRReg arg1GPR = arg1.gpr(); + GPRReg arg2GPR = arg2.gpr(); + GPRReg arg3GPR = arg3.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValStrict : operationPutByValNonStrict, arg1GPR, arg2GPR, arg3GPR); + + noResult(m_compileIndex); + break; + } + + case GetById: { + JSValueOperand base(this, node.child1); + GPRReg baseGPR = base.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(operationGetById, result.gpr(), baseGPR, identifier(node.identifierNumber())); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case PutById: { + JSValueOperand base(this, node.child1); + JSValueOperand value(this, node.child2); + GPRReg valueGPR = value.gpr(); + GPRReg baseGPR = base.gpr(); + flushRegisters(); + + callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByIdStrict : operationPutByIdNonStrict, valueGPR, baseGPR, identifier(node.identifierNumber())); + noResult(m_compileIndex); + break; + } + + case PutByIdDirect: { + JSValueOperand base(this, node.child1); + JSValueOperand value(this, node.child2); + GPRReg valueGPR = value.gpr(); + GPRReg baseGPR = base.gpr(); + flushRegisters(); + + callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByIdDirectStrict : operationPutByIdDirectNonStrict, valueGPR, baseGPR, identifier(node.identifierNumber())); + noResult(m_compileIndex); + break; + } + + case GetGlobalVar: { + GPRTemporary result(this); + + JSVariableObject* globalObject = m_jit.codeBlock()->globalObject(); + m_jit.loadPtr(globalObject->addressOfRegisters(), result.registerID()); + m_jit.loadPtr(JITCompiler::addressForGlobalVar(result.registerID(), node.varNumber()), result.registerID()); + + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case PutGlobalVar: { + JSValueOperand value(this, node.child1); + GPRTemporary temp(this); + + JSVariableObject* globalObject = m_jit.codeBlock()->globalObject(); + m_jit.loadPtr(globalObject->addressOfRegisters(), temp.registerID()); + m_jit.storePtr(value.registerID(), JITCompiler::addressForGlobalVar(temp.registerID(), node.varNumber())); + + noResult(m_compileIndex); + break; + } + + case DFG::Jump: { + BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset()); + if (taken != (m_block + 1)) + addBranch(m_jit.jump(), taken); + noResult(m_compileIndex); + break; + } + + case Branch: { + JSValueOperand value(this, node.child1); + GPRReg valueGPR = value.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(dfgConvertJSValueToBoolean, result.gpr(), valueGPR); + + BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset()); + BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(node.notTakenBytecodeOffset()); + + addBranch(m_jit.branchTest8(MacroAssembler::NonZero, result.registerID()), taken); + if (notTaken != (m_block + 1)) + addBranch(m_jit.jump(), notTaken); + + noResult(m_compileIndex); + break; + } + + case Return: { + ASSERT(JITCompiler::callFrameRegister != JITCompiler::regT1); + ASSERT(JITCompiler::regT1 != JITCompiler::returnValueRegister); + ASSERT(JITCompiler::returnValueRegister != JITCompiler::callFrameRegister); + +#if DFG_SUCCESS_STATS + static SamplingCounter counter("NonSpeculativeJIT"); + m_jit.emitCount(counter); +#endif + + // Return the result in returnValueRegister. + JSValueOperand op1(this, node.child1); + m_jit.move(op1.registerID(), JITCompiler::returnValueRegister); + + // Grab the return address. + m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, JITCompiler::regT1); + // Restore our caller's "r". + m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, JITCompiler::callFrameRegister); + // Return. + m_jit.restoreReturnAddressBeforeReturn(JITCompiler::regT1); + m_jit.ret(); + + noResult(m_compileIndex); + break; + } + } + + if (node.mustGenerate()) + use(m_compileIndex); + + checkConsistency(); +} + +void NonSpeculativeJIT::compile(SpeculationCheckIndexIterator& checkIterator, BasicBlock& block) +{ + ASSERT(m_compileIndex == block.begin); + m_blockHeads[m_block] = m_jit.label(); + +#if DFG_JIT_BREAK_ON_EVERY_BLOCK + m_jit.breakpoint(); +#endif + + for (; m_compileIndex < block.end; ++m_compileIndex) { + Node& node = m_jit.graph()[m_compileIndex]; + if (!node.refCount) + continue; + +#if DFG_DEBUG_VERBOSE + fprintf(stderr, "NonSpeculativeJIT generating Node @%d at code offset 0x%x\n", (int)m_compileIndex, m_jit.debugOffset()); +#endif +#if DFG_JIT_BREAK_ON_EVERY_NODE + m_jit.breakpoint(); +#endif + + compile(checkIterator, node); + } +} + +void NonSpeculativeJIT::compile(SpeculationCheckIndexIterator& checkIterator) +{ + ASSERT(!m_compileIndex); + Vector<BasicBlock> blocks = m_jit.graph().m_blocks; + for (m_block = 0; m_block < blocks.size(); ++m_block) + compile(checkIterator, blocks[m_block]); + linkBranches(); +} + +} } // namespace JSC::DFG + +#endif diff --git a/Source/JavaScriptCore/dfg/DFGNonSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGNonSpeculativeJIT.h new file mode 100644 index 0000000..de4c04b --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGNonSpeculativeJIT.h @@ -0,0 +1,259 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGNonSpeculativeJIT_h +#define DFGNonSpeculativeJIT_h + +#if ENABLE(DFG_JIT) + +#include <dfg/DFGJITCodeGenerator.h> + +namespace JSC { namespace DFG { + +class SpeculationCheckIndexIterator; + +// === EntryLocation === +// +// This structure describes an entry point into the non-speculative +// code path. This is used in linking bail-outs from the speculative path. +struct EntryLocation { + EntryLocation(MacroAssembler::Label, NonSpeculativeJIT*); + + // The node this entry point corresponds to, and the label + // marking the start of code for the given node. + MacroAssembler::Label m_entry; + NodeIndex m_nodeIndex; + + // For every entry point we record a map recording for every + // machine register which, if any, values it contains. For + // GPR registers we must also record the format of the value. + struct RegisterInfo { + NodeIndex nodeIndex; + DataFormat format; + }; + RegisterInfo m_gprInfo[numberOfGPRs]; + NodeIndex m_fprInfo[numberOfFPRs]; +}; + +// === NonSpeculativeJIT === +// +// This class is used to generate code for the non-speculative path. +// Code generation will take advantage of static information available +// in the dataflow to perform safe optimizations - for example, avoiding +// boxing numeric values between arithmetic operations, but will not +// perform any unsafe optimizations that would render the code unable +// to produce the correct results for any possible input. +class NonSpeculativeJIT : public JITCodeGenerator { + friend struct EntryLocation; +public: + NonSpeculativeJIT(JITCompiler& jit) + : JITCodeGenerator(jit, false) + { + } + + void compile(SpeculationCheckIndexIterator&); + + typedef SegmentedVector<EntryLocation, 16> EntryLocationVector; + EntryLocationVector& entryLocations() { return m_entryLocations; } + +private: + void compile(SpeculationCheckIndexIterator&, Node&); + void compile(SpeculationCheckIndexIterator&, BasicBlock&); + + bool isKnownInteger(NodeIndex); + bool isKnownNumeric(NodeIndex); + + // These methods are used when generating 'unexpected' + // calls out from JIT code to C++ helper routines - + // they spill all live values to the appropriate + // slots in the RegisterFile without changing any state + // in the GenerationInfo. + void silentSpillGPR(VirtualRegister spillMe, GPRReg exclude = InvalidGPRReg) + { + GenerationInfo& info = m_generationInfo[spillMe]; + ASSERT(info.registerFormat() != DataFormatNone && info.registerFormat() != DataFormatDouble); + + if (!info.needsSpill() || (info.gpr() == exclude)) + return; + + DataFormat registerFormat = info.registerFormat(); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(info.gpr()); + + if (registerFormat == DataFormatInteger) { + m_jit.orPtr(JITCompiler::tagTypeNumberRegister, reg); + m_jit.storePtr(reg, JITCompiler::addressFor(spillMe)); + } else { + ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell); + m_jit.storePtr(reg, JITCompiler::addressFor(spillMe)); + } + } + void silentSpillFPR(VirtualRegister spillMe, GPRReg canTrample, FPRReg exclude = InvalidFPRReg) + { + GenerationInfo& info = m_generationInfo[spillMe]; + ASSERT(info.registerFormat() == DataFormatDouble); + + if (!info.needsSpill() || (info.fpr() == exclude)) + return; + + boxDouble(info.fpr(), canTrample); + m_jit.storePtr(JITCompiler::gprToRegisterID(canTrample), JITCompiler::addressFor(spillMe)); + } + + void silentFillGPR(VirtualRegister spillMe, GPRReg exclude = InvalidGPRReg) + { + GenerationInfo& info = m_generationInfo[spillMe]; + if (info.gpr() == exclude) + return; + + NodeIndex nodeIndex = info.nodeIndex(); + Node& node = m_jit.graph()[nodeIndex]; + ASSERT(info.registerFormat() != DataFormatNone && info.registerFormat() != DataFormatDouble); + DataFormat registerFormat = info.registerFormat(); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(info.gpr()); + + if (registerFormat == DataFormatInteger) { + if (node.isConstant()) { + ASSERT(isInt32Constant(nodeIndex)); + m_jit.move(Imm32(valueOfInt32Constant(nodeIndex)), reg); + } else + m_jit.load32(JITCompiler::addressFor(spillMe), reg); + return; + } + + if (node.isConstant()) + m_jit.move(constantAsJSValueAsImmPtr(nodeIndex), reg); + else { + ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell); + m_jit.loadPtr(JITCompiler::addressFor(spillMe), reg); + } + } + void silentFillFPR(VirtualRegister spillMe, GPRReg canTrample, FPRReg exclude = InvalidFPRReg) + { + GenerationInfo& info = m_generationInfo[spillMe]; + if (info.fpr() == exclude) + return; + + NodeIndex nodeIndex = info.nodeIndex(); + Node& node = m_jit.graph()[nodeIndex]; + ASSERT(info.registerFormat() == DataFormatDouble); + + if (node.isConstant()) { + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(info.gpr()); + m_jit.move(constantAsJSValueAsImmPtr(nodeIndex), reg); + } else { + m_jit.loadPtr(JITCompiler::addressFor(spillMe), JITCompiler::gprToRegisterID(canTrample)); + unboxDouble(canTrample, info.fpr()); + } + } + + void silentSpillAllRegisters(GPRReg exclude, GPRReg preserve = InvalidGPRReg) + { + GPRReg canTrample = (preserve == gpr0) ? gpr1 : gpr0; + + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + VirtualRegister name = m_gprs.name(gpr); + if (name != InvalidVirtualRegister) + silentSpillGPR(name, exclude); + } + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + VirtualRegister name = m_fprs.name(fpr); + if (name != InvalidVirtualRegister) + silentSpillFPR(name, canTrample); + } + } + void silentSpillAllRegisters(FPRReg exclude, GPRReg preserve = InvalidGPRReg) + { + GPRReg canTrample = (preserve == gpr0) ? gpr1 : gpr0; + + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + VirtualRegister name = m_gprs.name(gpr); + if (name != InvalidVirtualRegister) + silentSpillGPR(name); + } + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + VirtualRegister name = m_fprs.name(fpr); + if (name != InvalidVirtualRegister) + silentSpillFPR(name, canTrample, exclude); + } + } + void silentFillAllRegisters(GPRReg exclude) + { + GPRReg canTrample = (exclude == gpr0) ? gpr1 : gpr0; + + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + VirtualRegister name = m_fprs.name(fpr); + if (name != InvalidVirtualRegister) + silentFillFPR(name, canTrample); + } + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + VirtualRegister name = m_gprs.name(gpr); + if (name != InvalidVirtualRegister) + silentFillGPR(name, exclude); + } + } + void silentFillAllRegisters(FPRReg exclude) + { + GPRReg canTrample = gpr0; + + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + VirtualRegister name = m_fprs.name(fpr); + if (name != InvalidVirtualRegister) { +#ifndef NDEBUG + ASSERT(fpr != exclude); +#else + UNUSED_PARAM(exclude); +#endif + silentFillFPR(name, canTrample, exclude); + } + } + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + VirtualRegister name = m_gprs.name(gpr); + if (name != InvalidVirtualRegister) + silentFillGPR(name); + } + } + + // These methods are used to plant calls out to C++ + // helper routines to convert between types. + void valueToNumber(JSValueOperand&, FPRReg result); + void valueToInt32(JSValueOperand&, GPRReg result); + void numberToInt32(FPRReg, GPRReg result); + + // Record an entry location into the non-speculative code path; + // for every bail-out on the speculative path we record information + // to be able to re-enter into the non-speculative one. + void trackEntry(MacroAssembler::Label entry) + { + m_entryLocations.append(EntryLocation(entry, this)); + } + + EntryLocationVector m_entryLocations; +}; + +} } // namespace JSC::DFG + +#endif +#endif + diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp new file mode 100644 index 0000000..a310d22 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp @@ -0,0 +1,245 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGOperations.h" + +#if ENABLE(DFG_JIT) + +#include "CodeBlock.h" +#include "Interpreter.h" +#include "JSByteArray.h" +#include "JSGlobalData.h" +#include "Operations.h" + +namespace JSC { namespace DFG { + +EncodedJSValue operationConvertThis(ExecState* exec, EncodedJSValue encodedOp) +{ + return JSValue::encode(JSValue::decode(encodedOp).toThisObject(exec)); +} + +EncodedJSValue operationValueAdd(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + JSValue op1 = JSValue::decode(encodedOp1); + JSValue op2 = JSValue::decode(encodedOp2); + + if (op1.isInt32() && op2.isInt32()) { + int64_t result64 = static_cast<int64_t>(op1.asInt32()) + static_cast<int64_t>(op2.asInt32()); + int32_t result32 = static_cast<int32_t>(result64); + if (LIKELY(result32 == result64)) + return JSValue::encode(jsNumber(result32)); + return JSValue::encode(jsNumber((double)result64)); + } + + double number1; + double number2; + if (op1.getNumber(number1) && op2.getNumber(number2)) + return JSValue::encode(jsNumber(number1 + number2)); + + return JSValue::encode(jsAddSlowCase(exec, op1, op2)); +} + +EncodedJSValue operationGetByVal(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty) +{ + JSValue baseValue = JSValue::decode(encodedBase); + JSValue property = JSValue::decode(encodedProperty); + + if (LIKELY(baseValue.isCell())) { + JSCell* base = baseValue.asCell(); + + if (property.isUInt32()) { + JSGlobalData* globalData = &exec->globalData(); + uint32_t i = property.asUInt32(); + + // FIXME: the JIT used to handle these in compiled code! + if (isJSArray(globalData, base) && asArray(base)->canGetIndex(i)) + return JSValue::encode(asArray(base)->getIndex(i)); + + // FIXME: the JITstub used to relink this to an optimized form! + if (isJSString(globalData, base) && asString(base)->canGetIndex(i)) + return JSValue::encode(asString(base)->getIndex(exec, i)); + + // FIXME: the JITstub used to relink this to an optimized form! + if (isJSByteArray(globalData, base) && asByteArray(base)->canAccessIndex(i)) + return JSValue::encode(asByteArray(base)->getIndex(exec, i)); + + return JSValue::encode(baseValue.get(exec, i)); + } + + if (property.isString()) { + Identifier propertyName(exec, asString(property)->value(exec)); + PropertySlot slot(base); + if (base->fastGetOwnPropertySlot(exec, propertyName, slot)) + return JSValue::encode(slot.getValue(exec, propertyName)); + } + } + + Identifier ident(exec, property.toString(exec)); + return JSValue::encode(baseValue.get(exec, ident)); +} + +EncodedJSValue operationGetById(ExecState* exec, EncodedJSValue encodedBase, Identifier* identifier) +{ + JSValue baseValue = JSValue::decode(encodedBase); + PropertySlot slot(baseValue); + return JSValue::encode(baseValue.get(exec, *identifier, slot)); +} + +template<bool strict> +ALWAYS_INLINE static void operationPutByValInternal(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue) +{ + JSGlobalData* globalData = &exec->globalData(); + + JSValue baseValue = JSValue::decode(encodedBase); + JSValue property = JSValue::decode(encodedProperty); + JSValue value = JSValue::decode(encodedValue); + + if (LIKELY(property.isUInt32())) { + uint32_t i = property.asUInt32(); + + if (isJSArray(globalData, baseValue)) { + JSArray* jsArray = asArray(baseValue); + if (jsArray->canSetIndex(i)) { + jsArray->setIndex(*globalData, i, value); + return; + } + + jsArray->JSArray::put(exec, i, value); + return; + } + + if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) { + JSByteArray* jsByteArray = asByteArray(baseValue); + // FIXME: the JITstub used to relink this to an optimized form! + if (value.isInt32()) { + jsByteArray->setIndex(i, value.asInt32()); + return; + } + + double dValue = 0; + if (value.getNumber(dValue)) { + jsByteArray->setIndex(i, dValue); + return; + } + } + + baseValue.put(exec, i, value); + return; + } + + // Don't put to an object if toString throws an exception. + Identifier ident(exec, property.toString(exec)); + if (!globalData->exception) { + PutPropertySlot slot(strict); + baseValue.put(exec, ident, value, slot); + } +} + +void operationPutByValStrict(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue) +{ + operationPutByValInternal<true>(exec, encodedBase, encodedProperty, encodedValue); +} + +void operationPutByValNonStrict(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue) +{ + operationPutByValInternal<false>(exec, encodedBase, encodedProperty, encodedValue); +} + +void operationPutByIdStrict(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* identifier) +{ + PutPropertySlot slot(true); + JSValue::decode(encodedBase).put(exec, *identifier, JSValue::decode(encodedValue), slot); +} + +void operationPutByIdNonStrict(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* identifier) +{ + PutPropertySlot slot(false); + JSValue::decode(encodedBase).put(exec, *identifier, JSValue::decode(encodedValue), slot); +} + +void operationPutByIdDirectStrict(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* identifier) +{ + PutPropertySlot slot(true); + JSValue::decode(encodedBase).putDirect(exec, *identifier, JSValue::decode(encodedValue), slot); +} + +void operationPutByIdDirectNonStrict(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier* identifier) +{ + PutPropertySlot slot(false); + JSValue::decode(encodedBase).putDirect(exec, *identifier, JSValue::decode(encodedValue), slot); +} + +bool operationCompareLess(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + return jsLess(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2)); +} + +bool operationCompareLessEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + return jsLessEq(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2)); +} + +bool operationCompareEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + return JSValue::equal(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2)); +} + +bool operationCompareStrictEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) +{ + return JSValue::strictEqual(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2)); +} + +DFGHandler lookupExceptionHandler(ExecState* exec, ReturnAddressPtr faultLocation) +{ + JSValue exceptionValue = exec->exception(); + ASSERT(exceptionValue); + + unsigned vPCIndex = exec->codeBlock()->bytecodeOffset(faultLocation); + HandlerInfo* handler = exec->globalData().interpreter->throwException(exec, exceptionValue, vPCIndex); + + void* catchRoutine = handler ? handler->nativeCode.executableAddress() : (void*)ctiOpThrowNotCaught; + ASSERT(catchRoutine); + return DFGHandler(exec, catchRoutine); +} + +double dfgConvertJSValueToNumber(ExecState* exec, EncodedJSValue value) +{ + return JSValue::decode(value).toNumber(exec); +} + +int32_t dfgConvertJSValueToInt32(ExecState* exec, EncodedJSValue value) +{ + return JSValue::decode(value).toInt32(exec); +} + +bool dfgConvertJSValueToBoolean(ExecState* exec, EncodedJSValue encodedOp) +{ + return JSValue::decode(encodedOp).toBoolean(exec); +} + +} } // namespace JSC::DFG + +#endif diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h new file mode 100644 index 0000000..d4c7c0f --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGOperations.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGOperations_h +#define DFGOperations_h + +#if ENABLE(DFG_JIT) + +#include <dfg/DFGJITCompiler.h> + +namespace JSC { + +class Identifier; + +namespace DFG { + +// These typedefs provide typechecking when generating calls out to helper routines; +// this helps prevent calling a helper routine with the wrong arguments! +typedef EncodedJSValue (*J_DFGOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue); +typedef EncodedJSValue (*J_DFGOperation_EJ)(ExecState*, EncodedJSValue); +typedef EncodedJSValue (*J_DFGOperation_EJP)(ExecState*, EncodedJSValue, void*); +typedef EncodedJSValue (*J_DFGOperation_EJI)(ExecState*, EncodedJSValue, Identifier*); +typedef bool (*Z_DFGOperation_EJ)(ExecState*, EncodedJSValue); +typedef bool (*Z_DFGOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue); +typedef void (*V_DFGOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue); +typedef void (*V_DFGOperation_EJJP)(ExecState*, EncodedJSValue, EncodedJSValue, void*); +typedef void (*V_DFGOperation_EJJI)(ExecState*, EncodedJSValue, EncodedJSValue, Identifier*); +typedef double (*D_DFGOperation_DD)(double, double); + +// These routines are provide callbacks out to C++ implementations of operations too complex to JIT. +EncodedJSValue operationConvertThis(ExecState*, EncodedJSValue encodedOp1); +EncodedJSValue operationValueAdd(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2); +EncodedJSValue operationGetByVal(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty); +EncodedJSValue operationGetById(ExecState*, EncodedJSValue encodedBase, Identifier*); +void operationPutByValStrict(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue); +void operationPutByValNonStrict(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue); +void operationPutByIdStrict(ExecState*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier*); +void operationPutByIdNonStrict(ExecState*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier*); +void operationPutByIdDirectStrict(ExecState*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier*); +void operationPutByIdDirectNonStrict(ExecState*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, Identifier*); +bool operationCompareLess(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2); +bool operationCompareLessEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2); +bool operationCompareEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2); +bool operationCompareStrictEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2); + +// This method is used to lookup an exception hander, keyed by faultLocation, which is +// the return location from one of the calls out to one of the helper operations above. +struct DFGHandler { + DFGHandler(ExecState* exec, void* handler) + : exec(exec) + , handler(handler) + { + } + + ExecState* exec; + void* handler; +}; +DFGHandler lookupExceptionHandler(ExecState*, ReturnAddressPtr faultLocation); + +// These operations implement the implicitly called ToInt32, ToNumber, and ToBoolean conversions from ES5. +double dfgConvertJSValueToNumber(ExecState*, EncodedJSValue); +int32_t dfgConvertJSValueToInt32(ExecState*, EncodedJSValue); +bool dfgConvertJSValueToBoolean(ExecState*, EncodedJSValue); + +} } // namespace JSC::DFG + +#endif +#endif diff --git a/Source/JavaScriptCore/dfg/DFGRegisterBank.h b/Source/JavaScriptCore/dfg/DFGRegisterBank.h new file mode 100644 index 0000000..575e6b7 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGRegisterBank.h @@ -0,0 +1,253 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGRegisterBank_h +#define DFGRegisterBank_h + +#if ENABLE(DFG_JIT) + +#include <dfg/DFGJITCompiler.h> + +namespace JSC { namespace DFG { + +// === RegisterBank === +// +// This class is used to implement the GPR and FPR register banks. +// All registers have two pieces of state associated with them: +// a lock count (used to indicate this register is already in use +// in code generation of the current node, and cannot be spilled or +// allocated as a temporary), and VirtualRegister 'name', recording +// which value (if any) a machine register currently holds. +// Either or both of these pieces of information may be valid for a +// given register. A register may be: +// +// - unlocked, and unnamed: Available for allocation. +// - locked, but unnamed: Already allocated as a temporary or +// result for the current node. +// - unlocked, but named: Contains the result of a prior operation, +// not yet in use for this node, +// - locked, but named: Contains the result of a prior operation, +// already allocated as a operand to the +// current operation. +// +// For every named register we also record a hint value indicating +// the order in which registers should be selected to be spilled; +// registers that can be more cheaply spilled and/or filled should +// be selected first. +// +// Locking register is a strong retention mechanism; a locked register +// will never be reallocated (this is used to ensure the operands to +// the current node are in registers). Naming, conversely, in a weak +// retention mechanism - allocating a register may force a named value +// to be spilled. +// +// All named values must be given a hint that is greater than Min and +// less than Max. +template<typename RegID, size_t NUM_REGS, typename SpillHint, SpillHint SpillHintMin, SpillHint SpillHintMax> +class RegisterBank { +public: + RegisterBank() + : m_lastAllocated(NUM_REGS - 1) + { + } + + // Allocate a register - this function finds an unlocked register, + // locks it, and returns it. If any named registers exist, one + // of these should be selected to be allocated. If all unlocked + // registers are named, then one of the named registers will need + // to be spilled. In this case the register selected to be spilled + // will be one of the registers that has the lowest 'spillOrder' + // cost associated with it. + // + // This method select the register to be allocated, and calls the + // private 'allocateInternal' method to update internal data + // structures accordingly. + RegID allocate(VirtualRegister &spillMe) + { + uint32_t currentLowest = NUM_REGS; + SpillHint currentSpillOrder = SpillHintMax; + + // Scan through all register, starting at the last allocated & looping around. + ASSERT(m_lastAllocated < NUM_REGS); + + // This loop is broken into two halves, looping from the last allocated + // register (the register returned last time this method was called) to + // the maximum register value, then from 0 to the last allocated. + // This implements a simple round-robin like approach to try to reduce + // thrash, and minimize time spent scanning locked registers in allocation. + // If a unlocked and unnamed register is found return it immediately. + // Otherwise, find the first unlocked register with the lowest spillOrder. + for (uint32_t i = m_lastAllocated + 1; i < NUM_REGS; ++i) { + // (1) If the current register is locked, it is not a candidate. + if (m_data[i].lockCount) + continue; + // (2) If the current register's spill order is 0, pick this! – unassigned registers have spill order 0. + SpillHint spillOrder = m_data[i].spillOrder; + if (!spillOrder) + return allocateInternal(i, spillMe); + // If this register is better (has a lower spill order value) than any prior + // candidate, then record it. + if (spillOrder < currentSpillOrder) { + currentSpillOrder = spillOrder; + currentLowest = i; + } + } + // Loop over the remaining entries. + for (uint32_t i = 0; i <= m_lastAllocated; ++i) { + if (m_data[i].lockCount) + continue; + SpillHint spillOrder = m_data[i].spillOrder; + if (!spillOrder) + return allocateInternal(i, spillMe); + if (spillOrder < currentSpillOrder) { + currentSpillOrder = spillOrder; + currentLowest = i; + } + } + + // Deadlock check - this could only occur is all registers are locked! + ASSERT(currentLowest != NUM_REGS && currentSpillOrder != SpillHintMax); + // There were no available registers; currentLowest will need to be spilled. + return allocateInternal(currentLowest, spillMe); + } + + // retain/release - these methods are used to associate/disassociate names + // with values in registers. retain should only be called on locked registers. + void retain(RegID reg, VirtualRegister name, SpillHint spillOrder) + { + // 'reg' must be a valid, locked register. + ASSERT(reg < NUM_REGS); + ASSERT(m_data[reg].lockCount); + // 'reg' should not currently be named, the new name must be valid. + ASSERT(m_data[reg].name == InvalidVirtualRegister); + ASSERT(name != InvalidVirtualRegister); + // 'reg' should not currently have a spillOrder, the new spill order must be valid. + ASSERT(spillOrder && spillOrder < SpillHintMax); + ASSERT(m_data[reg].spillOrder == SpillHintMin); + + m_data[reg].name = name; + m_data[reg].spillOrder = spillOrder; + } + void release(RegID reg) + { + // 'reg' must be a valid register. + ASSERT(reg < NUM_REGS); + // 'reg' should currently be named. + ASSERT(m_data[reg].name != InvalidVirtualRegister); + // 'reg' should currently have a valid spill order. + ASSERT(m_data[reg].spillOrder > SpillHintMin && m_data[reg].spillOrder < SpillHintMax); + + m_data[reg].name = InvalidVirtualRegister; + m_data[reg].spillOrder = SpillHintMin; + } + + // lock/unlock register, ensures that they are not spilled. + void lock(RegID reg) + { + ASSERT(reg < NUM_REGS); + ++m_data[reg].lockCount; + ASSERT(m_data[reg].lockCount); + } + void unlock(RegID reg) + { + ASSERT(reg < NUM_REGS); + ASSERT(m_data[reg].lockCount); + --m_data[reg].lockCount; + } + bool isLocked(RegID reg) + { + ASSERT(reg < NUM_REGS); + return m_data[reg].lockCount; + } + + // Get the name (VirtualRegister) associated with the + // given register (or InvalidVirtualRegister for none). + VirtualRegister name(RegID reg) + { + ASSERT(reg < NUM_REGS); + return m_data[reg].name; + } + +#ifndef NDEBUG + void dump() + { + // For each register, print the VirtualRegister 'name'. + for (uint32_t i =0; i < NUM_REGS; ++i) { + if (m_data[i].name != InvalidVirtualRegister) + fprintf(stderr, "[%02d]", m_data[i].name); + else + fprintf(stderr, "[--]"); + } + fprintf(stderr, "\n"); + } +#endif + +private: + // Used by 'allocate', above, to update inforamtion in the map. + RegID allocateInternal(uint32_t i, VirtualRegister &spillMe) + { + // 'i' must be a valid, unlocked register. + ASSERT(i < NUM_REGS && !m_data[i].lockCount); + + // Return the VirtualRegister of the named value currently stored in + // the register being returned - or InvalidVirtualRegister if none. + spillMe = m_data[i].name; + + // Clear any name/spillOrder currently associated with the register, + m_data[i] = MapEntry(); + m_data[i].lockCount = 1; + // Mark the register as locked (with a lock count of 1). + m_lastAllocated = i; + return (RegID)i; + } + + // === MapEntry === + // + // This structure provides information for an individual machine register + // being managed by the RegisterBank. For each register we track a lock + // count, name and spillOrder hint. + struct MapEntry { + MapEntry() + : name(InvalidVirtualRegister) + , spillOrder(SpillHintMin) + , lockCount(0) + { + } + + VirtualRegister name; + SpillHint spillOrder; + uint32_t lockCount; + }; + + // Holds the current status of all registers. + MapEntry m_data[NUM_REGS]; + // Used to to implement a simple round-robin like allocation scheme. + uint32_t m_lastAllocated; +}; + +} } // namespace JSC::DFG + +#endif +#endif diff --git a/Source/JavaScriptCore/dfg/DFGScoreBoard.h b/Source/JavaScriptCore/dfg/DFGScoreBoard.h new file mode 100644 index 0000000..b9bf1fd --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGScoreBoard.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGScoreBoard_h +#define DFGScoreBoard_h + +#if ENABLE(DFG_JIT) + +#include <dfg/DFGGraph.h> +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +// === ScoreBoard === +// +// This class is used in performing a virtual register allocation over the graph. +// VirtualRegisters are allocated to nodes, with a used count for each virtual +// register tracking the lifespan of the value; after the final use of a node +// the VirtualRegister associated is freed such that it can be reused for +// another node. +class ScoreBoard { +public: + ScoreBoard(Graph& graph, uint32_t firstTemporary) + : m_graph(graph) + , m_firstTemporary(firstTemporary) + { + } + +#if DFG_CONSISTENCY_CHECK + ~ScoreBoard() + { + // Every VirtualRegister that was allocated should now be free. + ASSERT(m_used.size() == m_free.size()); + // For every entry in the free list, the use count of the virtual register should be zero. + // * By using the virtual register numbers from m_free, we are checking that all values + // in m_free are < m_used.size(), and correspond to an allocated VirtualRegsiter. + // * By setting m_used to a non-zero value after checking it, we are checking that all + // entries in m_free are unique (otherwise the second test of m_used will fail). + for (size_t i = 0; i < m_free.size(); ++i) { + uint32_t virtualRegister = m_free[i]; + ASSERT(!m_used[virtualRegister]); + m_used[virtualRegister] = 1; + } + } +#endif + + VirtualRegister allocate() + { + // Do we have any VirtualRegsiters in the free list, that were used by + // prior nodes, but are now available? + if (!m_free.isEmpty()) { + uint32_t index = m_free.last(); + m_free.removeLast(); + // Use count must have hit zero for it to have been added to the free list! + ASSERT(!m_used[index]); + return (VirtualRegister)(m_firstTemporary + index); + } + + // Allocate a new VirtualRegister, and add a corresponding entry to m_used. + size_t next = allocatedCount(); + m_used.append(0); + return (VirtualRegister)(m_firstTemporary + next); + } + + // Increment the usecount for the VirtualRegsiter associated with 'child', + // if it reaches the node's refcount, free the VirtualRegsiter. + void use(NodeIndex child) + { + if (child == NoNode) + return; + + // Find the virtual register number for this child, increment its use count. + Node& node = m_graph[child]; + uint32_t index = node.virtualRegister - m_firstTemporary; + if (node.refCount == ++m_used[index]) { + // If the use count in the scoreboard reaches the use count for the node, + // then this was its last use; the virtual register is now free. + // Clear the use count & add to the free list. + m_used[index] = 0; + m_free.append(index); + } + } + + unsigned allocatedCount() + { + // m_used contains an entry for every allocated VirtualRegister. + return m_used.size(); + } + +private: + // The graph, so we can get refCounts for nodes, to determine when values are dead. + Graph& m_graph; + // The first VirtualRegsiter to be used as a temporary. + uint32_t m_firstTemporary; + + // For every virtual register that has been allocated (either currently alive, or in + // the free list), we keep a count of the number of remaining uses until it is dead + // (0, in the case of entries in the free list). Since there is an entry for every + // allocated VirtualRegister, the length of this array conveniently provides the + // next available VirtualRegister number. + Vector<uint32_t, 64> m_used; + // A free list of VirtualRegsiters no longer alive. + Vector<uint32_t, 64> m_free; +}; + +} } // namespace JSC::DFG + +#endif +#endif diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp new file mode 100644 index 0000000..7963184 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp @@ -0,0 +1,824 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGSpeculativeJIT.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +template<bool strict> +GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat) +{ + Node& node = m_jit.graph()[nodeIndex]; + VirtualRegister virtualRegister = node.virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + + switch (info.registerFormat()) { + case DataFormatNone: { + GPRReg gpr = allocate(); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + + if (node.isConstant()) { + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + if (isInt32Constant(nodeIndex)) { + m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), reg); + info.fillInteger(gpr); + returnFormat = DataFormatInteger; + return gpr; + } + m_jit.move(constantAsJSValueAsImmPtr(nodeIndex), reg); + } else { + DataFormat spillFormat = info.spillFormat(); + ASSERT(spillFormat & DataFormatJS); + + m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); + + if (spillFormat == DataFormatJSInteger) { + // If we know this was spilled as an integer we can fill without checking. + if (strict) { + m_jit.load32(JITCompiler::addressFor(virtualRegister), reg); + info.fillInteger(gpr); + returnFormat = DataFormatInteger; + return gpr; + } + m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), reg); + info.fillJSValue(gpr, DataFormatJSInteger); + returnFormat = DataFormatJSInteger; + return gpr; + } + m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), reg); + } + + // Fill as JSValue, and fall through. + info.fillJSValue(gpr, DataFormatJSInteger); + m_gprs.unlock(gpr); + } + + case DataFormatJS: { + // Check the value is an integer. + GPRReg gpr = info.gpr(); + m_gprs.lock(gpr); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + speculationCheck(m_jit.branchPtr(MacroAssembler::Below, reg, JITCompiler::tagTypeNumberRegister)); + info.fillJSValue(gpr, DataFormatJSInteger); + // If !strict we're done, return. + if (!strict) { + returnFormat = DataFormatJSInteger; + return gpr; + } + // else fall through & handle as DataFormatJSInteger. + m_gprs.unlock(gpr); + } + + case DataFormatJSInteger: { + // In a strict fill we need to strip off the value tag. + if (strict) { + GPRReg gpr = info.gpr(); + GPRReg result; + // If the register has already been locked we need to take a copy. + // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger. + if (m_gprs.isLocked(gpr)) + result = allocate(); + else { + m_gprs.lock(gpr); + info.fillInteger(gpr); + result = gpr; + } + m_jit.zeroExtend32ToPtr(JITCompiler::gprToRegisterID(gpr), JITCompiler::gprToRegisterID(result)); + returnFormat = DataFormatInteger; + return result; + } + + GPRReg gpr = info.gpr(); + m_gprs.lock(gpr); + returnFormat = DataFormatJSInteger; + return gpr; + } + + case DataFormatInteger: { + GPRReg gpr = info.gpr(); + m_gprs.lock(gpr); + returnFormat = DataFormatInteger; + return gpr; + } + + case DataFormatDouble: + case DataFormatCell: + case DataFormatJSDouble: + case DataFormatJSCell: { + terminateSpeculativeExecution(); + returnFormat = DataFormatInteger; + return allocate(); + } + } + + ASSERT_NOT_REACHED(); + return InvalidGPRReg; +} + +SpeculationCheck::SpeculationCheck(MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex) + : m_check(check) + , m_nodeIndex(jit->m_compileIndex) + , m_recoveryIndex(recoveryIndex) +{ + for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) { + VirtualRegister virtualRegister = jit->m_gprs.name(gpr); + if (virtualRegister != InvalidVirtualRegister) { + GenerationInfo& info = jit->m_generationInfo[virtualRegister]; + m_gprInfo[gpr].nodeIndex = info.nodeIndex(); + m_gprInfo[gpr].format = info.registerFormat(); + } else + m_gprInfo[gpr].nodeIndex = NoNode; + } + for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) { + VirtualRegister virtualRegister = jit->m_fprs.name(fpr); + if (virtualRegister != InvalidVirtualRegister) { + GenerationInfo& info = jit->m_generationInfo[virtualRegister]; + ASSERT(info.registerFormat() == DataFormatDouble); + m_fprInfo[fpr] = info.nodeIndex(); + } else + m_fprInfo[fpr] = NoNode; + } +} + +GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat) +{ + return fillSpeculateIntInternal<false>(nodeIndex, returnFormat); +} + +GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex) +{ + DataFormat mustBeDataFormatInteger; + GPRReg result = fillSpeculateIntInternal<true>(nodeIndex, mustBeDataFormatInteger); + ASSERT(mustBeDataFormatInteger == DataFormatInteger); + return result; +} + +GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) +{ + Node& node = m_jit.graph()[nodeIndex]; + VirtualRegister virtualRegister = node.virtualRegister; + GenerationInfo& info = m_generationInfo[virtualRegister]; + + switch (info.registerFormat()) { + case DataFormatNone: { + GPRReg gpr = allocate(); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + + if (node.isConstant()) { + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + JSValue jsValue = constantAsJSValue(nodeIndex); + if (jsValue.isCell()) { + m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), reg); + info.fillJSValue(gpr, DataFormatJSCell); + return gpr; + } + terminateSpeculativeExecution(); + return gpr; + } + ASSERT(info.spillFormat() & DataFormatJS); + m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); + m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), reg); + + if (info.spillFormat() != DataFormatJSCell) + speculationCheck(m_jit.branchTestPtr(MacroAssembler::NonZero, reg, JITCompiler::tagMaskRegister)); + info.fillJSValue(gpr, DataFormatJSCell); + return gpr; + } + + case DataFormatCell: + case DataFormatJSCell: { + GPRReg gpr = info.gpr(); + m_gprs.lock(gpr); + return gpr; + } + + case DataFormatJS: { + GPRReg gpr = info.gpr(); + m_gprs.lock(gpr); + JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr); + speculationCheck(m_jit.branchTestPtr(MacroAssembler::NonZero, reg, JITCompiler::tagMaskRegister)); + info.fillJSValue(gpr, DataFormatJSCell); + return gpr; + } + + case DataFormatJSInteger: + case DataFormatInteger: + case DataFormatJSDouble: + case DataFormatDouble: { + terminateSpeculativeExecution(); + return allocate(); + } + } + + ASSERT_NOT_REACHED(); + return InvalidGPRReg; +} + +bool SpeculativeJIT::compile(Node& node) +{ + checkConsistency(); + NodeType op = node.op; + + switch (op) { + case Int32Constant: + case DoubleConstant: + case JSConstant: + initConstantInfo(m_compileIndex); + break; + + case GetLocal: { + GPRTemporary result(this); + m_jit.loadPtr(JITCompiler::addressFor(node.local()), result.registerID()); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case SetLocal: { + JSValueOperand value(this, node.child1); + m_jit.storePtr(value.registerID(), JITCompiler::addressFor(node.local())); + noResult(m_compileIndex); + break; + } + + case BitAnd: + case BitOr: + case BitXor: + if (isInt32Constant(node.child1)) { + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op2); + + bitOp(op, valueOfInt32Constant(node.child1), op2.registerID(), result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } else if (isInt32Constant(node.child2)) { + SpeculateIntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + + bitOp(op, valueOfInt32Constant(node.child2), op1.registerID(), result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } else { + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1, op2); + + MacroAssembler::RegisterID reg1 = op1.registerID(); + MacroAssembler::RegisterID reg2 = op2.registerID(); + bitOp(op, reg1, reg2, result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } + break; + + case BitRShift: + case BitLShift: + case BitURShift: + if (isInt32Constant(node.child2)) { + SpeculateIntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + + shiftOp(op, op1.registerID(), valueOfInt32Constant(node.child2) & 0x1f, result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } else { + // Do not allow shift amount to be used as the result, MacroAssembler does not permit this. + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1); + + MacroAssembler::RegisterID reg1 = op1.registerID(); + MacroAssembler::RegisterID reg2 = op2.registerID(); + shiftOp(op, reg1, reg2, result.registerID()); + + integerResult(result.gpr(), m_compileIndex); + } + break; + + case UInt32ToNumber: { + IntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + + // Test the operand is positive. + speculationCheck(m_jit.branch32(MacroAssembler::LessThan, op1.registerID(), TrustedImm32(0))); + + m_jit.move(op1.registerID(), result.registerID()); + integerResult(result.gpr(), m_compileIndex, op1.format()); + break; + } + + case NumberToInt32: { + SpeculateIntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + m_jit.move(op1.registerID(), result.registerID()); + integerResult(result.gpr(), m_compileIndex, op1.format()); + break; + } + + case Int32ToNumber: { + SpeculateIntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + m_jit.move(op1.registerID(), result.registerID()); + integerResult(result.gpr(), m_compileIndex, op1.format()); + break; + } + + case ValueToInt32: { + SpeculateIntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + m_jit.move(op1.registerID(), result.registerID()); + integerResult(result.gpr(), m_compileIndex, op1.format()); + break; + } + + case ValueToNumber: { + SpeculateIntegerOperand op1(this, node.child1); + GPRTemporary result(this, op1); + m_jit.move(op1.registerID(), result.registerID()); + integerResult(result.gpr(), m_compileIndex, op1.format()); + break; + } + + case ValueAdd: + case ArithAdd: { + int32_t imm1; + if (isDoubleConstantWithInt32Value(node.child1, imm1)) { + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this); + + MacroAssembler::RegisterID reg = op2.registerID(); + speculationCheck(m_jit.branchAdd32(MacroAssembler::Overflow, reg, Imm32(imm1), result.registerID())); + + integerResult(result.gpr(), m_compileIndex); + break; + } + + int32_t imm2; + if (isDoubleConstantWithInt32Value(node.child2, imm2)) { + SpeculateIntegerOperand op1(this, node.child1); + GPRTemporary result(this); + + MacroAssembler::RegisterID reg = op1.registerID(); + speculationCheck(m_jit.branchAdd32(MacroAssembler::Overflow, reg, Imm32(imm2), result.registerID())); + + integerResult(result.gpr(), m_compileIndex); + break; + } + + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1, op2); + + GPRReg gpr1 = op1.gpr(); + GPRReg gpr2 = op2.gpr(); + GPRReg gprResult = result.gpr(); + MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, JITCompiler::gprToRegisterID(gpr1), JITCompiler::gprToRegisterID(gpr2), JITCompiler::gprToRegisterID(gprResult)); + + if (gpr1 == gprResult) + speculationCheck(check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2)); + else if (gpr2 == gprResult) + speculationCheck(check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1)); + else + speculationCheck(check); + + integerResult(gprResult, m_compileIndex); + break; + } + + case ArithSub: { + int32_t imm2; + if (isDoubleConstantWithInt32Value(node.child2, imm2)) { + SpeculateIntegerOperand op1(this, node.child1); + GPRTemporary result(this); + + MacroAssembler::RegisterID reg = op1.registerID(); + speculationCheck(m_jit.branchSub32(MacroAssembler::Overflow, reg, Imm32(imm2), result.registerID())); + + integerResult(result.gpr(), m_compileIndex); + break; + } + + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this); + + MacroAssembler::RegisterID reg1 = op1.registerID(); + MacroAssembler::RegisterID reg2 = op2.registerID(); + speculationCheck(m_jit.branchSub32(MacroAssembler::Overflow, reg1, reg2, result.registerID())); + + integerResult(result.gpr(), m_compileIndex); + break; + } + + case ArithMul: { + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this); + + MacroAssembler::RegisterID reg1 = op1.registerID(); + MacroAssembler::RegisterID reg2 = op2.registerID(); + speculationCheck(m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.registerID())); + + MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.registerID()); + speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0))); + speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0))); + resultNonZero.link(&m_jit); + + integerResult(result.gpr(), m_compileIndex); + break; + } + + case ArithDiv: { + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1, op2); + + terminateSpeculativeExecution(); + + integerResult(result.gpr(), m_compileIndex); + break; + } + + case ArithMod: { + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1, op2); + + terminateSpeculativeExecution(); + + integerResult(result.gpr(), m_compileIndex); + break; + } + + case LogicalNot: { + JSValueOperand value(this, node.child1); + GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add). + + m_jit.move(value.registerID(), result.registerID()); + m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), result.registerID()); + speculationCheck(m_jit.branchTestPtr(JITCompiler::NonZero, result.registerID(), TrustedImm32(static_cast<int32_t>(~1)))); + m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueTrue)), result.registerID()); + + // If we add a DataFormatBool, we should use it here. + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case CompareLess: { + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1, op2); + + m_jit.set32Compare32(JITCompiler::LessThan, op1.registerID(), op2.registerID(), result.registerID()); + + // If we add a DataFormatBool, we should use it here. + m_jit.or32(TrustedImm32(ValueFalse), result.registerID()); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case CompareLessEq: { + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1, op2); + + m_jit.set32Compare32(JITCompiler::LessThanOrEqual, op1.registerID(), op2.registerID(), result.registerID()); + + // If we add a DataFormatBool, we should use it here. + m_jit.or32(TrustedImm32(ValueFalse), result.registerID()); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case CompareEq: { + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1, op2); + + m_jit.set32Compare32(JITCompiler::Equal, op1.registerID(), op2.registerID(), result.registerID()); + + // If we add a DataFormatBool, we should use it here. + m_jit.or32(TrustedImm32(ValueFalse), result.registerID()); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case CompareStrictEq: { + SpeculateIntegerOperand op1(this, node.child1); + SpeculateIntegerOperand op2(this, node.child2); + GPRTemporary result(this, op1, op2); + + m_jit.set32Compare32(JITCompiler::Equal, op1.registerID(), op2.registerID(), result.registerID()); + + // If we add a DataFormatBool, we should use it here. + m_jit.or32(TrustedImm32(ValueFalse), result.registerID()); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case GetByVal: { + NodeIndex alias = node.child3; + if (alias != NoNode) { + // FIXME: result should be able to reuse child1, child2. Should have an 'UnusedOperand' type. + JSValueOperand aliasedValue(this, node.child3); + GPRTemporary result(this, aliasedValue); + m_jit.move(aliasedValue.registerID(), result.registerID()); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + SpeculateCellOperand base(this, node.child1); + SpeculateStrictInt32Operand property(this, node.child2); + GPRTemporary storage(this); + + MacroAssembler::RegisterID baseReg = base.registerID(); + MacroAssembler::RegisterID propertyReg = property.registerID(); + MacroAssembler::RegisterID storageReg = storage.registerID(); + + // Get the array storage. We haven't yet checked this is a JSArray, so this is only safe if + // an access with offset JSArray::storageOffset() is valid for all JSCells! + m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg); + + // Check that base is an array, and that property is contained within m_vector (< m_vectorLength). + speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr))); + speculationCheck(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()))); + + // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache + // the storage pointer - especially if there happens to be another register free right now. If we do so, + // then we'll need to allocate a new temporary for result. + GPRTemporary& result = storage; + m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.registerID()); + speculationCheck(m_jit.branchTestPtr(MacroAssembler::Zero, result.registerID())); + + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case PutByVal: { + SpeculateCellOperand base(this, node.child1); + SpeculateStrictInt32Operand property(this, node.child2); + JSValueOperand value(this, node.child3); + GPRTemporary storage(this); + + // Map base, property & value into registers, allocate a register for storage. + MacroAssembler::RegisterID baseReg = base.registerID(); + MacroAssembler::RegisterID propertyReg = property.registerID(); + MacroAssembler::RegisterID valueReg = value.registerID(); + MacroAssembler::RegisterID storageReg = storage.registerID(); + + // Check that base is an array, and that property is contained within m_vector (< m_vectorLength). + speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg), MacroAssembler::TrustedImmPtr(m_jit.globalData()->jsArrayVPtr))); + speculationCheck(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()))); + + // Get the array storage. + m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg); + + // Check if we're writing to a hole; if so increment m_numValuesInVector. + MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + + // If we're writing to a hole we might be growing the array; + MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length))); + m_jit.add32(TrustedImm32(1), propertyReg); + m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length))); + m_jit.sub32(TrustedImm32(1), propertyReg); + + lengthDoesNotNeedUpdate.link(&m_jit); + notHoleValue.link(&m_jit); + + // Store the value to the array. + m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + + noResult(m_compileIndex); + break; + } + + case PutByValAlias: { + SpeculateCellOperand base(this, node.child1); + SpeculateStrictInt32Operand property(this, node.child2); + JSValueOperand value(this, node.child3); + GPRTemporary storage(this, base); // storage may overwrite base. + + // Get the array storage. + MacroAssembler::RegisterID storageReg = storage.registerID(); + m_jit.loadPtr(MacroAssembler::Address(base.registerID(), JSArray::storageOffset()), storageReg); + + // Map property & value into registers. + MacroAssembler::RegisterID propertyReg = property.registerID(); + MacroAssembler::RegisterID valueReg = value.registerID(); + + // Store the value to the array. + m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + + noResult(m_compileIndex); + break; + } + + case DFG::Jump: { + BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset()); + if (taken != (m_block + 1)) + addBranch(m_jit.jump(), taken); + noResult(m_compileIndex); + break; + } + + case Branch: { + JSValueOperand value(this, node.child1); + MacroAssembler::RegisterID valueReg = value.registerID(); + + BlockIndex taken = m_jit.graph().blockIndexForBytecodeOffset(node.takenBytecodeOffset()); + BlockIndex notTaken = m_jit.graph().blockIndexForBytecodeOffset(node.notTakenBytecodeOffset()); + + // Integers + addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueReg, MacroAssembler::ImmPtr(JSValue::encode(jsNumber(0)))), notTaken); + MacroAssembler::Jump isNonZeroInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, valueReg, JITCompiler::tagTypeNumberRegister); + + // Booleans + addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueReg, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(false)))), notTaken); + speculationCheck(m_jit.branchPtr(MacroAssembler::NotEqual, valueReg, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(true))))); + + if (taken == (m_block + 1)) + isNonZeroInteger.link(&m_jit); + else { + addBranch(isNonZeroInteger, taken); + addBranch(m_jit.jump(), taken); + } + + noResult(m_compileIndex); + break; + } + + case Return: { + ASSERT(JITCompiler::callFrameRegister != JITCompiler::regT1); + ASSERT(JITCompiler::regT1 != JITCompiler::returnValueRegister); + ASSERT(JITCompiler::returnValueRegister != JITCompiler::callFrameRegister); + +#if DFG_SUCCESS_STATS + static SamplingCounter counter("SpeculativeJIT"); + m_jit.emitCount(counter); +#endif + + // Return the result in returnValueRegister. + JSValueOperand op1(this, node.child1); + m_jit.move(op1.registerID(), JITCompiler::returnValueRegister); + + // Grab the return address. + m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, JITCompiler::regT1); + // Restore our caller's "r". + m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, JITCompiler::callFrameRegister); + // Return. + m_jit.restoreReturnAddressBeforeReturn(JITCompiler::regT1); + m_jit.ret(); + + noResult(m_compileIndex); + break; + } + + case ConvertThis: { + SpeculateCellOperand thisValue(this, node.child1); + GPRTemporary temp(this); + + m_jit.loadPtr(JITCompiler::Address(thisValue.registerID(), JSCell::structureOffset()), temp.registerID()); + speculationCheck(m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(temp.registerID(), Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(NeedsThisConversion))); + + cellResult(thisValue.gpr(), m_compileIndex); + break; + } + + case GetById: { + JSValueOperand base(this, node.child1); + GPRReg baseGPR = base.gpr(); + flushRegisters(); + + GPRResult result(this); + callOperation(operationGetById, result.gpr(), baseGPR, identifier(node.identifierNumber())); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case PutById: { + JSValueOperand base(this, node.child1); + JSValueOperand value(this, node.child2); + GPRReg valueGPR = value.gpr(); + GPRReg baseGPR = base.gpr(); + flushRegisters(); + + callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByIdStrict : operationPutByIdNonStrict, valueGPR, baseGPR, identifier(node.identifierNumber())); + noResult(m_compileIndex); + break; + } + + case PutByIdDirect: { + JSValueOperand base(this, node.child1); + JSValueOperand value(this, node.child2); + GPRReg valueGPR = value.gpr(); + GPRReg baseGPR = base.gpr(); + flushRegisters(); + + callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByIdDirectStrict : operationPutByIdDirectNonStrict, valueGPR, baseGPR, identifier(node.identifierNumber())); + noResult(m_compileIndex); + break; + } + + case GetGlobalVar: { + GPRTemporary result(this); + + JSVariableObject* globalObject = m_jit.codeBlock()->globalObject(); + m_jit.loadPtr(globalObject->addressOfRegisters(), result.registerID()); + m_jit.loadPtr(JITCompiler::addressForGlobalVar(result.registerID(), node.varNumber()), result.registerID()); + + jsValueResult(result.gpr(), m_compileIndex); + break; + } + + case PutGlobalVar: { + JSValueOperand value(this, node.child1); + GPRTemporary temp(this); + + JSVariableObject* globalObject = m_jit.codeBlock()->globalObject(); + m_jit.loadPtr(globalObject->addressOfRegisters(), temp.registerID()); + m_jit.storePtr(value.registerID(), JITCompiler::addressForGlobalVar(temp.registerID(), node.varNumber())); + + noResult(m_compileIndex); + break; + } + } + + // Check if generation for the speculative path has failed catastrophically. :-) + // In the future, we may want to throw away the code we've generated in this case. + // For now, there is no point generating any further code, return immediately. + if (m_didTerminate) + return false; + + if (node.mustGenerate()) + use(m_compileIndex); + + checkConsistency(); + + return true; +} + +bool SpeculativeJIT::compile(BasicBlock& block) +{ + ASSERT(m_compileIndex == block.begin); + m_blockHeads[m_block] = m_jit.label(); +#if DFG_JIT_BREAK_ON_EVERY_BLOCK + m_jit.breakpoint(); +#endif + + for (; m_compileIndex < block.end; ++m_compileIndex) { + Node& node = m_jit.graph()[m_compileIndex]; + if (!node.refCount) + continue; + +#if DFG_DEBUG_VERBOSE + fprintf(stderr, "SpeculativeJIT generating Node @%d at JIT offset 0x%x\n", (int)m_compileIndex, m_jit.debugOffset()); +#endif +#if DFG_JIT_BREAK_ON_EVERY_NODE + m_jit.breakpoint(); +#endif + if (!compile(node)) + return false; + } + return true; +} + +bool SpeculativeJIT::compile() +{ + ASSERT(!m_compileIndex); + Vector<BasicBlock> blocks = m_jit.graph().m_blocks; + for (m_block = 0; m_block < blocks.size(); ++m_block) { + if (!compile(blocks[m_block])) + return false; + } + linkBranches(); + return true; +} + +} } // namespace JSC::DFG + +#endif diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h new file mode 100644 index 0000000..965cdbe --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h @@ -0,0 +1,372 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGSpeculativeJIT_h +#define DFGSpeculativeJIT_h + +#if ENABLE(DFG_JIT) + +#include <dfg/DFGJITCodeGenerator.h> + +namespace JSC { namespace DFG { + +class SpeculativeJIT; + +// This enum describes the types of additional recovery that +// may need be performed should a speculation check fail. +enum SpeculationRecoveryType { + SpeculativeAdd +}; + +// === SpeculationRecovery === +// +// This class provides additional information that may be associated with a +// speculation check - for example +class SpeculationRecovery { +public: + SpeculationRecovery(SpeculationRecoveryType type, GPRReg dest, GPRReg src) + : m_type(type) + , m_dest(dest) + , m_src(src) + { + } + + SpeculationRecoveryType type() { return m_type; } + GPRReg dest() { return m_dest; } + GPRReg src() { return m_src; } + +private: + // Indicates the type of additional recovery to be performed. + SpeculationRecoveryType m_type; + // different recovery types may required different additional information here. + GPRReg m_dest; + GPRReg m_src; +}; + +// === SpeculationCheck === +// +// This structure records a bail-out from the speculative path, +// which will need to be linked in to the non-speculative one. +struct SpeculationCheck { + SpeculationCheck(MacroAssembler::Jump, SpeculativeJIT*, unsigned recoveryIndex = 0); + + // The location of the jump out from the speculative path, + // and the node we were generating code for. + MacroAssembler::Jump m_check; + NodeIndex m_nodeIndex; + // Used to record any additional recovery to be performed; this + // value is an index into the SpeculativeJIT's m_speculationRecoveryList + // array, offset by 1. (m_recoveryIndex == 0) means no recovery. + unsigned m_recoveryIndex; + + struct RegisterInfo { + NodeIndex nodeIndex; + DataFormat format; + }; + RegisterInfo m_gprInfo[numberOfGPRs]; + NodeIndex m_fprInfo[numberOfFPRs]; +}; +typedef SegmentedVector<SpeculationCheck, 16> SpeculationCheckVector; + + +// === SpeculativeJIT === +// +// The SpeculativeJIT is used to generate a fast, but potentially +// incomplete code path for the dataflow. When code generating +// we may make assumptions about operand types, dynamically check, +// and bail-out to an alternate code path if these checks fail. +// Importantly, the speculative code path cannot be reentered once +// a speculative check has failed. This allows the SpeculativeJIT +// to propagate type information (including information that has +// only speculatively been asserted) through the dataflow. +class SpeculativeJIT : public JITCodeGenerator { + friend struct SpeculationCheck; +public: + SpeculativeJIT(JITCompiler& jit) + : JITCodeGenerator(jit, true) + , m_didTerminate(false) + { + } + + bool compile(); + + // Retrieve the list of bail-outs from the speculative path, + // and additional recovery information. + SpeculationCheckVector& speculationChecks() + { + return m_speculationChecks; + } + SpeculationRecovery* speculationRecovery(size_t index) + { + // SpeculationCheck::m_recoveryIndex is offset by 1, + // 0 means no recovery. + return index ? &m_speculationRecoveryList[index - 1] : 0; + } + + // Called by the speculative operand types, below, to fill operand to + // machine registers, implicitly generating speculation checks as needed. + GPRReg fillSpeculateInt(NodeIndex, DataFormat& returnFormat); + GPRReg fillSpeculateIntStrict(NodeIndex); + GPRReg fillSpeculateCell(NodeIndex); + +private: + bool compile(Node&); + bool compile(BasicBlock&); + + bool isDoubleConstantWithInt32Value(NodeIndex nodeIndex, int32_t& out) + { + if (!m_jit.isDoubleConstant(nodeIndex)) + return false; + double value = m_jit.valueOfDoubleConstant(nodeIndex); + + int32_t asInt32 = static_cast<int32_t>(value); + if (value != asInt32) + return false; + if (!asInt32 && signbit(value)) + return false; + + out = asInt32; + return true; + } + + // Add a speculation check without additional recovery. + void speculationCheck(MacroAssembler::Jump jumpToFail) + { + m_speculationChecks.append(SpeculationCheck(jumpToFail, this)); + } + // Add a speculation check with additional recovery. + void speculationCheck(MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery) + { + m_speculationRecoveryList.append(recovery); + m_speculationChecks.append(SpeculationCheck(jumpToFail, this, m_speculationRecoveryList.size())); + } + + // Called when we statically determine that a speculation will fail. + void terminateSpeculativeExecution() + { + // FIXME: in cases where we can statically determine we're going to bail out from the speculative + // JIT we should probably rewind code generation and only produce the non-speculative path. + m_didTerminate = true; + speculationCheck(m_jit.jump()); + } + + template<bool strict> + GPRReg fillSpeculateIntInternal(NodeIndex, DataFormat& returnFormat); + + // It is possible, during speculative generation, to reach a situation in which we + // can statically determine a speculation will fail (for example, when two nodes + // will make conflicting speculations about the same operand). In such cases this + // flag is set, indicating no further code generation should take place. + bool m_didTerminate; + // This vector tracks bail-outs from the speculative path to the non-speculative one. + SpeculationCheckVector m_speculationChecks; + // Some bail-outs need to record additional information recording specific recovery + // to be performed (for example, on detected overflow from an add, we may need to + // reverse the addition if an operand is being overwritten). + Vector<SpeculationRecovery, 16> m_speculationRecoveryList; +}; + + +// === Speculative Operand types === +// +// SpeculateIntegerOperand, SpeculateStrictInt32Operand and SpeculateCellOperand. +// +// These are used to lock the operands to a node into machine registers within the +// SpeculativeJIT. The classes operate like those provided by the JITCodeGenerator, +// however these will perform a speculative check for a more restrictive type than +// we can statically determine the operand to have. If the operand does not have +// the requested type, a bail-out to the non-speculative path will be taken. + +class SpeculateIntegerOperand { +public: + explicit SpeculateIntegerOperand(SpeculativeJIT* jit, NodeIndex index) + : m_jit(jit) + , m_index(index) + , m_gprOrInvalid(InvalidGPRReg) +#ifndef NDEBUG + , m_format(DataFormatNone) +#endif + { + ASSERT(m_jit); + if (jit->isFilled(index)) + gpr(); + } + + ~SpeculateIntegerOperand() + { + ASSERT(m_gprOrInvalid != InvalidGPRReg); + m_jit->unlock(m_gprOrInvalid); + } + + NodeIndex index() const + { + return m_index; + } + + GPRReg gpr() + { + if (m_gprOrInvalid == InvalidGPRReg) + m_gprOrInvalid = m_jit->fillSpeculateInt(index(), m_format); + return m_gprOrInvalid; + } + + DataFormat format() + { + gpr(); // m_format is set when m_gpr is locked. + ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger); + return m_format; + } + + MacroAssembler::RegisterID registerID() + { + return JITCompiler::gprToRegisterID(gpr()); + } + +private: + SpeculativeJIT* m_jit; + NodeIndex m_index; + GPRReg m_gprOrInvalid; + DataFormat m_format; +}; + +class SpeculateStrictInt32Operand { +public: + explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, NodeIndex index) + : m_jit(jit) + , m_index(index) + , m_gprOrInvalid(InvalidGPRReg) + { + ASSERT(m_jit); + if (jit->isFilled(index)) + gpr(); + } + + ~SpeculateStrictInt32Operand() + { + ASSERT(m_gprOrInvalid != InvalidGPRReg); + m_jit->unlock(m_gprOrInvalid); + } + + NodeIndex index() const + { + return m_index; + } + + GPRReg gpr() + { + if (m_gprOrInvalid == InvalidGPRReg) + m_gprOrInvalid = m_jit->fillSpeculateIntStrict(index()); + return m_gprOrInvalid; + } + + MacroAssembler::RegisterID registerID() + { + return JITCompiler::gprToRegisterID(gpr()); + } + +private: + SpeculativeJIT* m_jit; + NodeIndex m_index; + GPRReg m_gprOrInvalid; +}; + +class SpeculateCellOperand { +public: + explicit SpeculateCellOperand(SpeculativeJIT* jit, NodeIndex index) + : m_jit(jit) + , m_index(index) + , m_gprOrInvalid(InvalidGPRReg) + { + ASSERT(m_jit); + if (jit->isFilled(index)) + gpr(); + } + + ~SpeculateCellOperand() + { + ASSERT(m_gprOrInvalid != InvalidGPRReg); + m_jit->unlock(m_gprOrInvalid); + } + + NodeIndex index() const + { + return m_index; + } + + GPRReg gpr() + { + if (m_gprOrInvalid == InvalidGPRReg) + m_gprOrInvalid = m_jit->fillSpeculateCell(index()); + return m_gprOrInvalid; + } + + MacroAssembler::RegisterID registerID() + { + return JITCompiler::gprToRegisterID(gpr()); + } + +private: + SpeculativeJIT* m_jit; + NodeIndex m_index; + GPRReg m_gprOrInvalid; +}; + + +// === SpeculationCheckIndexIterator === +// +// This class is used by the non-speculative JIT to check which +// nodes require entry points from the speculative path. +class SpeculationCheckIndexIterator { +public: + SpeculationCheckIndexIterator(SpeculationCheckVector& speculationChecks) + : m_speculationChecks(speculationChecks) + , m_iter(m_speculationChecks.begin()) + , m_end(m_speculationChecks.end()) + { + } + + bool hasCheckAtIndex(NodeIndex nodeIndex) + { + while (m_iter != m_end) { + NodeIndex current = m_iter->m_nodeIndex; + if (current >= nodeIndex) + return current == nodeIndex; + ++m_iter; + } + return false; + } + +private: + SpeculationCheckVector& m_speculationChecks; + SpeculationCheckVector::Iterator m_iter; + SpeculationCheckVector::Iterator m_end; +}; + + +} } // namespace JSC::DFG + +#endif +#endif + |