diff options
Diffstat (limited to 'V8Binding/v8/src/arm')
-rw-r--r-- | V8Binding/v8/src/arm/cfg-arm.cc | 301 | ||||
-rw-r--r-- | V8Binding/v8/src/arm/codegen-arm-inl.h | 31 | ||||
-rw-r--r-- | V8Binding/v8/src/arm/codegen-arm.cc | 156 | ||||
-rw-r--r-- | V8Binding/v8/src/arm/codegen-arm.h | 121 | ||||
-rw-r--r-- | V8Binding/v8/src/arm/ic-arm.cc | 12 | ||||
-rw-r--r-- | V8Binding/v8/src/arm/stub-cache-arm.cc | 45 | ||||
-rw-r--r-- | V8Binding/v8/src/arm/virtual-frame-arm.h | 15 |
7 files changed, 466 insertions, 215 deletions
diff --git a/V8Binding/v8/src/arm/cfg-arm.cc b/V8Binding/v8/src/arm/cfg-arm.cc new file mode 100644 index 0000000..34e64b3 --- /dev/null +++ b/V8Binding/v8/src/arm/cfg-arm.cc @@ -0,0 +1,301 @@ +// Copyright 2009 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "cfg.h" +#include "codegen-inl.h" +#include "codegen-arm.h" // Include after codegen-inl.h. +#include "macro-assembler-arm.h" + +namespace v8 { +namespace internal { + +#define __ ACCESS_MASM(masm) + +void InstructionBlock::Compile(MacroAssembler* masm) { + ASSERT(!is_marked()); + is_marked_ = true; + { + Comment cmt(masm, "[ InstructionBlock"); + for (int i = 0, len = instructions_.length(); i < len; i++) { + // If the location of the current instruction is a temp, then the + // instruction cannot be in tail position in the block. Allocate the + // temp based on peeking ahead to the next instruction. + Instruction* instr = instructions_[i]; + Location* loc = instr->location(); + if (loc->is_temporary()) { + instructions_[i+1]->FastAllocate(TempLocation::cast(loc)); + } + instructions_[i]->Compile(masm); + } + } + successor_->Compile(masm); +} + + +void EntryNode::Compile(MacroAssembler* masm) { + ASSERT(!is_marked()); + is_marked_ = true; + { + Comment cmnt(masm, "[ EntryNode"); + __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + __ add(fp, sp, Operand(2 * kPointerSize)); + int count = CfgGlobals::current()->fun()->scope()->num_stack_slots(); + if (count > 0) { + __ mov(ip, Operand(Factory::undefined_value())); + for (int i = 0; i < count; i++) { + __ push(ip); + } + } + if (FLAG_trace) { + __ CallRuntime(Runtime::kTraceEnter, 0); + } + if (FLAG_check_stack) { + StackCheckStub stub; + __ CallStub(&stub); + } + } + successor_->Compile(masm); +} + + +void ExitNode::Compile(MacroAssembler* masm) { + ASSERT(!is_marked()); + is_marked_ = true; + Comment cmnt(masm, "[ ExitNode"); + if (FLAG_trace) { + __ push(r0); + __ CallRuntime(Runtime::kTraceExit, 1); + } + __ mov(sp, fp); + __ ldm(ia_w, sp, fp.bit() | lr.bit()); + int count = CfgGlobals::current()->fun()->scope()->num_parameters(); + __ add(sp, sp, Operand((count + 1) * kPointerSize)); + __ Jump(lr); +} + + +void PropLoadInstr::Compile(MacroAssembler* masm) { + // The key should not be on the stack---if it is a compiler-generated + // temporary it is in the accumulator. + ASSERT(!key()->is_on_stack()); + + Comment cmnt(masm, "[ Load from Property"); + // If the key is known at compile-time we may be able to use a load IC. + bool is_keyed_load = true; + if (key()->is_constant()) { + // Still use the keyed load IC if the key can be parsed as an integer so + // we will get into the case that handles [] on string objects. + Handle<Object> key_val = Constant::cast(key())->handle(); + uint32_t ignored; + if (key_val->IsSymbol() && + !String::cast(*key_val)->AsArrayIndex(&ignored)) { + is_keyed_load = false; + } + } + + if (!object()->is_on_stack()) object()->Push(masm); + + if (is_keyed_load) { + key()->Push(masm); + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + // Discard key and receiver. + __ add(sp, sp, Operand(2 * kPointerSize)); + } else { + key()->Get(masm, r2); + Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); + __ Call(ic, RelocInfo::CODE_TARGET); + __ pop(); // Discard receiver. + } + location()->Set(masm, r0); +} + + +void BinaryOpInstr::Compile(MacroAssembler* masm) { + // The right-hand value should not be on the stack---if it is a + // compiler-generated temporary it is in the accumulator. + ASSERT(!right()->is_on_stack()); + + Comment cmnt(masm, "[ BinaryOpInstr"); + // We can overwrite one of the operands if it is a temporary. + OverwriteMode mode = NO_OVERWRITE; + if (left()->is_temporary()) { + mode = OVERWRITE_LEFT; + } else if (right()->is_temporary()) { + mode = OVERWRITE_RIGHT; + } + + // Move left to r1 and right to r0. + left()->Get(masm, r1); + right()->Get(masm, r0); + GenericBinaryOpStub stub(op(), mode); + __ CallStub(&stub); + location()->Set(masm, r0); +} + + +void ReturnInstr::Compile(MacroAssembler* masm) { + // The location should be 'Effect'. As a side effect, move the value to + // the accumulator. + Comment cmnt(masm, "[ ReturnInstr"); + value()->Get(masm, r0); +} + + +void Constant::Get(MacroAssembler* masm, Register reg) { + __ mov(reg, Operand(handle_)); +} + + +void Constant::Push(MacroAssembler* masm) { + __ mov(ip, Operand(handle_)); + __ push(ip); +} + + +static MemOperand ToMemOperand(SlotLocation* loc) { + switch (loc->type()) { + case Slot::PARAMETER: { + int count = CfgGlobals::current()->fun()->scope()->num_parameters(); + return MemOperand(fp, (1 + count - loc->index()) * kPointerSize); + } + case Slot::LOCAL: { + const int kOffset = JavaScriptFrameConstants::kLocal0Offset; + return MemOperand(fp, kOffset - loc->index() * kPointerSize); + } + default: + UNREACHABLE(); + return MemOperand(r0); + } +} + + +void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { + __ mov(ip, Operand(handle_)); + __ str(ip, ToMemOperand(loc)); +} + + +void SlotLocation::Get(MacroAssembler* masm, Register reg) { + __ ldr(reg, ToMemOperand(this)); +} + + +void SlotLocation::Set(MacroAssembler* masm, Register reg) { + __ str(reg, ToMemOperand(this)); +} + + +void SlotLocation::Push(MacroAssembler* masm) { + __ ldr(ip, ToMemOperand(this)); + __ push(ip); // Push will not destroy ip. +} + + +void SlotLocation::Move(MacroAssembler* masm, Value* value) { + // Double dispatch. + value->MoveToSlot(masm, this); +} + + +void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { + __ ldr(ip, ToMemOperand(this)); + __ str(ip, ToMemOperand(loc)); +} + + +void TempLocation::Get(MacroAssembler* masm, Register reg) { + switch (where_) { + case ACCUMULATOR: + if (!reg.is(r0)) __ mov(reg, r0); + break; + case STACK: + __ pop(reg); + break; + case NOT_ALLOCATED: + UNREACHABLE(); + } +} + + +void TempLocation::Set(MacroAssembler* masm, Register reg) { + switch (where_) { + case ACCUMULATOR: + if (!reg.is(r0)) __ mov(r0, reg); + break; + case STACK: + __ push(reg); + break; + case NOT_ALLOCATED: + UNREACHABLE(); + } +} + + +void TempLocation::Push(MacroAssembler* masm) { + switch (where_) { + case ACCUMULATOR: + __ push(r0); + break; + case STACK: + case NOT_ALLOCATED: + UNREACHABLE(); + } +} + + +void TempLocation::Move(MacroAssembler* masm, Value* value) { + switch (where_) { + case ACCUMULATOR: + value->Get(masm, r0); + case STACK: + value->Push(masm); + break; + case NOT_ALLOCATED: + UNREACHABLE(); + } +} + + +void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) { + switch (where_) { + case ACCUMULATOR: + __ str(r0, ToMemOperand(loc)); + case STACK: + __ pop(ip); + __ str(ip, ToMemOperand(loc)); + break; + case NOT_ALLOCATED: + UNREACHABLE(); + } +} + +#undef __ + +} } // namespace v8::internal diff --git a/V8Binding/v8/src/arm/codegen-arm-inl.h b/V8Binding/v8/src/arm/codegen-arm-inl.h index 5a29a45..9ff02cb 100644 --- a/V8Binding/v8/src/arm/codegen-arm-inl.h +++ b/V8Binding/v8/src/arm/codegen-arm-inl.h @@ -34,6 +34,37 @@ namespace internal { #define __ ACCESS_MASM(masm_) +void CodeGenerator::LoadConditionAndSpill(Expression* expression, + TypeofState typeof_state, + JumpTarget* true_target, + JumpTarget* false_target, + bool force_control) { + LoadCondition(expression, typeof_state, true_target, false_target, + force_control); +} + + +void CodeGenerator::LoadAndSpill(Expression* expression, + TypeofState typeof_state) { + Load(expression, typeof_state); +} + + +void CodeGenerator::VisitAndSpill(Statement* statement) { + Visit(statement); +} + + +void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) { + VisitStatements(statements); +} + + +void Reference::GetValueAndSpill(TypeofState typeof_state) { + GetValue(typeof_state); +} + + // Platform-specific inline functions. void DeferredCode::Jump() { __ jmp(&entry_label_); } diff --git a/V8Binding/v8/src/arm/codegen-arm.cc b/V8Binding/v8/src/arm/codegen-arm.cc index 5f8149e..67d4611 100644 --- a/V8Binding/v8/src/arm/codegen-arm.cc +++ b/V8Binding/v8/src/arm/codegen-arm.cc @@ -133,8 +133,7 @@ CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script, allocator_(NULL), cc_reg_(al), state_(NULL), - function_return_is_shadowed_(false), - in_spilled_code_(false) { + function_return_is_shadowed_(false) { } @@ -156,7 +155,6 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) { ASSERT(frame_ == NULL); frame_ = new VirtualFrame(); cc_reg_ = al; - set_in_spilled_code(false); { CodeGenState state(this); @@ -423,22 +421,6 @@ MemOperand CodeGenerator::ContextSlotOperandCheckExtensions( } -void CodeGenerator::LoadConditionAndSpill(Expression* expression, - TypeofState typeof_state, - JumpTarget* true_target, - JumpTarget* false_target, - bool force_control) { - ASSERT(in_spilled_code()); - set_in_spilled_code(false); - LoadCondition(expression, typeof_state, true_target, false_target, - force_control); - if (frame_ != NULL) { - frame_->SpillAll(); - } - set_in_spilled_code(true); -} - - // Loads a value on TOS. If it is a boolean value, the result may have been // (partially) translated into branches, or it may have set the condition // code register. If force_cc is set, the value is forced to set the @@ -450,7 +432,6 @@ void CodeGenerator::LoadCondition(Expression* x, JumpTarget* true_target, JumpTarget* false_target, bool force_cc) { - ASSERT(!in_spilled_code()); ASSERT(!has_cc()); int original_height = frame_->height(); @@ -484,21 +465,10 @@ void CodeGenerator::LoadCondition(Expression* x, } -void CodeGenerator::LoadAndSpill(Expression* expression, - TypeofState typeof_state) { - ASSERT(in_spilled_code()); - set_in_spilled_code(false); - Load(expression, typeof_state); - frame_->SpillAll(); - set_in_spilled_code(true); -} - - void CodeGenerator::Load(Expression* x, TypeofState typeof_state) { #ifdef DEBUG int original_height = frame_->height(); #endif - ASSERT(!in_spilled_code()); JumpTarget true_target; JumpTarget false_target; LoadCondition(x, typeof_state, &true_target, &false_target, false); @@ -697,96 +667,6 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target, } -class GenericBinaryOpStub : public CodeStub { - public: - GenericBinaryOpStub(Token::Value op, - OverwriteMode mode, - int constant_rhs = CodeGenerator::kUnknownIntValue) - : op_(op), - mode_(mode), - constant_rhs_(constant_rhs), - specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { } - - private: - Token::Value op_; - OverwriteMode mode_; - int constant_rhs_; - bool specialized_on_rhs_; - - static const int kMaxKnownRhs = 0x40000000; - - // Minor key encoding in 16 bits. - class ModeBits: public BitField<OverwriteMode, 0, 2> {}; - class OpBits: public BitField<Token::Value, 2, 6> {}; - class KnownIntBits: public BitField<int, 8, 8> {}; - - Major MajorKey() { return GenericBinaryOp; } - int MinorKey() { - // Encode the parameters in a unique 16 bit value. - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | KnownIntBits::encode(MinorKeyForKnownInt()); - } - - void Generate(MacroAssembler* masm); - void HandleNonSmiBitwiseOp(MacroAssembler* masm); - - static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { - if (constant_rhs == CodeGenerator::kUnknownIntValue) return false; - if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3; - if (op == Token::MOD) { - if (constant_rhs <= 1) return false; - if (constant_rhs <= 10) return true; - if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true; - return false; - } - return false; - } - - int MinorKeyForKnownInt() { - if (!specialized_on_rhs_) return 0; - if (constant_rhs_ <= 10) return constant_rhs_ + 1; - ASSERT(IsPowerOf2(constant_rhs_)); - int key = 12; - int d = constant_rhs_; - while ((d & 1) == 0) { - key++; - d >>= 1; - } - return key; - } - - const char* GetName() { - switch (op_) { - case Token::ADD: return "GenericBinaryOpStub_ADD"; - case Token::SUB: return "GenericBinaryOpStub_SUB"; - case Token::MUL: return "GenericBinaryOpStub_MUL"; - case Token::DIV: return "GenericBinaryOpStub_DIV"; - case Token::MOD: return "GenericBinaryOpStub_MOD"; - case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; - case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; - case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; - case Token::SAR: return "GenericBinaryOpStub_SAR"; - case Token::SHL: return "GenericBinaryOpStub_SHL"; - case Token::SHR: return "GenericBinaryOpStub_SHR"; - default: return "GenericBinaryOpStub"; - } - } - -#ifdef DEBUG - void Print() { - if (!specialized_on_rhs_) { - PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); - } else { - PrintF("GenericBinaryOpStub (%s by %d)\n", - Token::String(op_), - constant_rhs_); - } - } -#endif -}; - - void CodeGenerator::GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode, int constant_rhs) { @@ -1236,28 +1116,6 @@ void CodeGenerator::CheckStack() { } -void CodeGenerator::VisitAndSpill(Statement* statement) { - ASSERT(in_spilled_code()); - set_in_spilled_code(false); - Visit(statement); - if (frame_ != NULL) { - frame_->SpillAll(); - } - set_in_spilled_code(true); -} - - -void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) { - ASSERT(in_spilled_code()); - set_in_spilled_code(false); - VisitStatements(statements); - if (frame_ != NULL) { - frame_->SpillAll(); - } - set_in_spilled_code(true); -} - - void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) { #ifdef DEBUG int original_height = frame_->height(); @@ -1854,7 +1712,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - ASSERT(!in_spilled_code()); VirtualFrame::SpilledScope spilled_scope; Comment cmnt(masm_, "[ ForInStatement"); CodeForStatementPosition(node); @@ -2912,7 +2769,6 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { #ifdef DEBUG int original_height = frame_->height(); #endif - ASSERT(!in_spilled_code()); VirtualFrame::SpilledScope spilled_scope; // Call runtime routine to allocate the catch extension object and // assign the exception value to the catch variable. @@ -4248,17 +4104,7 @@ Handle<String> Reference::GetName() { } -void Reference::GetValueAndSpill(TypeofState typeof_state) { - ASSERT(cgen_->in_spilled_code()); - cgen_->set_in_spilled_code(false); - GetValue(typeof_state); - cgen_->frame()->SpillAll(); - cgen_->set_in_spilled_code(true); -} - - void Reference::GetValue(TypeofState typeof_state) { - ASSERT(!cgen_->in_spilled_code()); ASSERT(cgen_->HasValidEntryRegisters()); ASSERT(!is_illegal()); ASSERT(!cgen_->has_cc()); diff --git a/V8Binding/v8/src/arm/codegen-arm.h b/V8Binding/v8/src/arm/codegen-arm.h index 6391a8e..80d1d56 100644 --- a/V8Binding/v8/src/arm/codegen-arm.h +++ b/V8Binding/v8/src/arm/codegen-arm.h @@ -183,9 +183,6 @@ class CodeGenerator: public AstVisitor { void AddDeferred(DeferredCode* code) { deferred_.Add(code); } - bool in_spilled_code() const { return in_spilled_code_; } - void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; } - static const int kUnknownIntValue = -1; private: @@ -215,18 +212,18 @@ class CodeGenerator: public AstVisitor { #define DEF_VISIT(type) \ void Visit##type(type* node); - NODE_LIST(DEF_VISIT) + AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT // Visit a statement and then spill the virtual frame if control flow can // reach the end of the statement (ie, it does not exit via break, // continue, return, or throw). This function is used temporarily while // the code generator is being transformed. - void VisitAndSpill(Statement* statement); + inline void VisitAndSpill(Statement* statement); // Visit a list of statements and then spill the virtual frame if control // flow can reach the end of the list. - void VisitStatementsAndSpill(ZoneList<Statement*>* statements); + inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements); // Main code generation function void GenCode(FunctionLiteral* fun); @@ -263,17 +260,17 @@ class CodeGenerator: public AstVisitor { // Generate code to push the value of an expression on top of the frame // and then spill the frame fully to memory. This function is used // temporarily while the code generator is being transformed. - void LoadAndSpill(Expression* expression, - TypeofState typeof_state = NOT_INSIDE_TYPEOF); + inline void LoadAndSpill(Expression* expression, + TypeofState typeof_state = NOT_INSIDE_TYPEOF); // Call LoadCondition and then spill the virtual frame unless control flow // cannot reach the end of the expression (ie, by emitting only // unconditional jumps to the control targets). - void LoadConditionAndSpill(Expression* expression, - TypeofState typeof_state, - JumpTarget* true_target, - JumpTarget* false_target, - bool force_control); + inline void LoadConditionAndSpill(Expression* expression, + TypeofState typeof_state, + JumpTarget* true_target, + JumpTarget* false_target, + bool force_control); // Read a value from a slot and leave it on top of the expression stack. void LoadFromSlot(Slot* slot, TypeofState typeof_state); @@ -374,7 +371,7 @@ class CodeGenerator: public AstVisitor { // information. void CodeForFunctionPosition(FunctionLiteral* fun); void CodeForReturnPosition(FunctionLiteral* fun); - void CodeForStatementPosition(Node* node); + void CodeForStatementPosition(AstNode* node); void CodeForSourcePosition(int pos); #ifdef DEBUG @@ -405,12 +402,6 @@ class CodeGenerator: public AstVisitor { // to some unlinking code). bool function_return_is_shadowed_; - // True when we are in code that expects the virtual frame to be fully - // spilled. Some virtual frame function are disabled in DEBUG builds when - // called from spilled code, because they do not leave the virtual frame - // in a spilled state. - bool in_spilled_code_; - static InlineRuntimeLUT kInlineRuntimeLUT[]; friend class VirtualFrame; @@ -421,6 +412,96 @@ class CodeGenerator: public AstVisitor { }; +class GenericBinaryOpStub : public CodeStub { + public: + GenericBinaryOpStub(Token::Value op, + OverwriteMode mode, + int constant_rhs = CodeGenerator::kUnknownIntValue) + : op_(op), + mode_(mode), + constant_rhs_(constant_rhs), + specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)) { } + + private: + Token::Value op_; + OverwriteMode mode_; + int constant_rhs_; + bool specialized_on_rhs_; + + static const int kMaxKnownRhs = 0x40000000; + + // Minor key encoding in 16 bits. + class ModeBits: public BitField<OverwriteMode, 0, 2> {}; + class OpBits: public BitField<Token::Value, 2, 6> {}; + class KnownIntBits: public BitField<int, 8, 8> {}; + + Major MajorKey() { return GenericBinaryOp; } + int MinorKey() { + // Encode the parameters in a unique 16 bit value. + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | KnownIntBits::encode(MinorKeyForKnownInt()); + } + + void Generate(MacroAssembler* masm); + void HandleNonSmiBitwiseOp(MacroAssembler* masm); + + static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) { + if (constant_rhs == CodeGenerator::kUnknownIntValue) return false; + if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3; + if (op == Token::MOD) { + if (constant_rhs <= 1) return false; + if (constant_rhs <= 10) return true; + if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true; + return false; + } + return false; + } + + int MinorKeyForKnownInt() { + if (!specialized_on_rhs_) return 0; + if (constant_rhs_ <= 10) return constant_rhs_ + 1; + ASSERT(IsPowerOf2(constant_rhs_)); + int key = 12; + int d = constant_rhs_; + while ((d & 1) == 0) { + key++; + d >>= 1; + } + return key; + } + + const char* GetName() { + switch (op_) { + case Token::ADD: return "GenericBinaryOpStub_ADD"; + case Token::SUB: return "GenericBinaryOpStub_SUB"; + case Token::MUL: return "GenericBinaryOpStub_MUL"; + case Token::DIV: return "GenericBinaryOpStub_DIV"; + case Token::MOD: return "GenericBinaryOpStub_MOD"; + case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR"; + case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND"; + case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR"; + case Token::SAR: return "GenericBinaryOpStub_SAR"; + case Token::SHL: return "GenericBinaryOpStub_SHL"; + case Token::SHR: return "GenericBinaryOpStub_SHR"; + default: return "GenericBinaryOpStub"; + } + } + +#ifdef DEBUG + void Print() { + if (!specialized_on_rhs_) { + PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); + } else { + PrintF("GenericBinaryOpStub (%s by %d)\n", + Token::String(op_), + constant_rhs_); + } + } +#endif +}; + + } } // namespace v8::internal #endif // V8_ARM_CODEGEN_ARM_H_ diff --git a/V8Binding/v8/src/arm/ic-arm.cc b/V8Binding/v8/src/arm/ic-arm.cc index 82a2bec..8781256 100644 --- a/V8Binding/v8/src/arm/ic-arm.cc +++ b/V8Binding/v8/src/arm/ic-arm.cc @@ -582,8 +582,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ cmp(r3, Operand(Factory::hash_table_map())); - __ b(eq, &slow); + __ cmp(r3, Operand(Factory::fixed_array_map())); + __ b(ne, &slow); // Check that the key (index) is within bounds. __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset)); __ cmp(r0, Operand(r3)); @@ -661,8 +661,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ cmp(r2, Operand(Factory::hash_table_map())); - __ b(eq, &slow); + __ cmp(r2, Operand(Factory::fixed_array_map())); + __ b(ne, &slow); // Untag the key (for checking against untagged length in the fixed array). __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Compute address to store into and check array bounds. @@ -710,8 +710,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { __ bind(&array); __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset)); __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); - __ cmp(r1, Operand(Factory::hash_table_map())); - __ b(eq, &slow); + __ cmp(r1, Operand(Factory::fixed_array_map())); + __ b(ne, &slow); // Check the key against the length in the array, compute the // address to store into and fall through to fast case. diff --git a/V8Binding/v8/src/arm/stub-cache-arm.cc b/V8Binding/v8/src/arm/stub-cache-arm.cc index d6650c9..393db59 100644 --- a/V8Binding/v8/src/arm/stub-cache-arm.cc +++ b/V8Binding/v8/src/arm/stub-cache-arm.cc @@ -467,21 +467,23 @@ void StubCompiler::GenerateLoadCallback(JSObject* object, // Push the arguments on the JS stack of the caller. __ push(receiver); // receiver + __ push(reg); // holder __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data __ push(ip); + __ ldr(reg, FieldMemOperand(ip, AccessorInfo::kDataOffset)); + __ push(reg); __ push(name_reg); // name - __ push(reg); // holder // Do tail-call to the runtime system. ExternalReference load_callback_property = ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); - __ TailCallRuntime(load_callback_property, 4); + __ TailCallRuntime(load_callback_property, 5); } void StubCompiler::GenerateLoadInterceptor(JSObject* object, JSObject* holder, - Smi* lookup_hint, + LookupResult* lookup, Register receiver, Register name_reg, Register scratch1, @@ -500,13 +502,18 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object, __ push(receiver); // receiver __ push(reg); // holder __ push(name_reg); // name - __ mov(scratch1, Operand(lookup_hint)); + + InterceptorInfo* interceptor = holder->GetNamedInterceptor(); + ASSERT(!Heap::InNewSpace(interceptor)); + __ mov(scratch1, Operand(Handle<Object>(interceptor))); __ push(scratch1); + __ ldr(scratch2, FieldMemOperand(scratch1, InterceptorInfo::kDataOffset)); + __ push(scratch2); // Do tail-call to the runtime system. ExternalReference load_ic_property = - ExternalReference(IC_Utility(IC::kLoadInterceptorProperty)); - __ TailCallRuntime(load_ic_property, 4); + ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad)); + __ TailCallRuntime(load_ic_property, 5); } @@ -676,13 +683,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object, case JSARRAY_HAS_FAST_ELEMENTS_CHECK: CheckPrototypes(JSObject::cast(object), r1, holder, r3, r2, name, &miss); - // Make sure object->elements()->map() != Heap::hash_table_map() + // Make sure object->HasFastElements(). // Get the elements array of the object. __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset)); // Check that the object is in fast mode (not dictionary). __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ cmp(r2, Operand(Factory::hash_table_map())); - __ b(eq, &miss); + __ cmp(r2, Operand(Factory::fixed_array_map())); + __ b(ne, &miss); break; default: @@ -744,8 +751,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3); - // Get the number of arguments. const int argc = arguments().immediate(); @@ -782,6 +787,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); // Jump to the cached code (tail call). + __ IncrementCounter(&Counters::call_global_inline, 1, r1, r3); ASSERT(function->is_compiled()); Handle<Code> code(function->code()); ParameterCount expected(function->shared()->formal_parameter_count()); @@ -790,7 +796,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object, // Handle call cache miss. __ bind(&miss); - __ DecrementCounter(&Counters::call_global_inline, 1, r1, r3); __ IncrementCounter(&Counters::call_global_inline_miss, 1, r1, r3); Handle<Code> ic = ComputeCallMiss(arguments().immediate()); __ Jump(ic, RelocInfo::CODE_TARGET); @@ -951,8 +956,6 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3); - // Check that the map of the global has not changed. __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); @@ -963,11 +966,11 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object, __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell))); __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + __ IncrementCounter(&Counters::named_store_global_inline, 1, r1, r3); __ Ret(); // Handle store cache miss. __ bind(&miss); - __ DecrementCounter(&Counters::named_store_global_inline, 1, r1, r3); __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r1, r3); Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); __ Jump(ic, RelocInfo::CODE_TARGET); @@ -1054,9 +1057,11 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object, __ ldr(r0, MemOperand(sp, 0)); + LookupResult lookup; + holder->LocalLookupRealNamedProperty(name, &lookup); GenerateLoadInterceptor(object, holder, - holder->InterceptorPropertyLookupHint(name), + &lookup, r0, r2, r3, @@ -1083,8 +1088,6 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, // ----------------------------------- Label miss; - __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3); - // Get the receiver from the stack. __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); @@ -1109,10 +1112,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object, __ b(eq, &miss); } + __ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3); __ Ret(); __ bind(&miss); - __ DecrementCounter(&Counters::named_load_global_inline, 1, r1, r3); __ IncrementCounter(&Counters::named_load_global_inline_miss, 1, r1, r3); GenerateLoadMiss(masm(), Code::LOAD_IC); @@ -1215,9 +1218,11 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver, __ cmp(r2, Operand(Handle<String>(name))); __ b(ne, &miss); + LookupResult lookup; + holder->LocalLookupRealNamedProperty(name, &lookup); GenerateLoadInterceptor(receiver, holder, - Smi::FromInt(JSObject::kLookupInHolder), + &lookup, r0, r2, r3, diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.h b/V8Binding/v8/src/arm/virtual-frame-arm.h index 2f36f10..d575df6 100644 --- a/V8Binding/v8/src/arm/virtual-frame-arm.h +++ b/V8Binding/v8/src/arm/virtual-frame-arm.h @@ -52,20 +52,7 @@ class VirtualFrame : public ZoneObject { // generator is being transformed. class SpilledScope BASE_EMBEDDED { public: - SpilledScope() : previous_state_(cgen()->in_spilled_code()) { - ASSERT(cgen()->has_valid_frame()); - cgen()->frame()->SpillAll(); - cgen()->set_in_spilled_code(true); - } - - ~SpilledScope() { - cgen()->set_in_spilled_code(previous_state_); - } - - private: - bool previous_state_; - - CodeGenerator* cgen() { return CodeGeneratorScope::Current(); } + SpilledScope() {} }; // An illegal index into the virtual frame. |