summaryrefslogtreecommitdiffstats
path: root/V8Binding/v8/src/ia32
diff options
context:
space:
mode:
Diffstat (limited to 'V8Binding/v8/src/ia32')
-rw-r--r--V8Binding/v8/src/ia32/assembler-ia32.h4
-rw-r--r--V8Binding/v8/src/ia32/builtins-ia32.cc14
-rw-r--r--V8Binding/v8/src/ia32/cfg-ia32.cc315
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.cc83
-rw-r--r--V8Binding/v8/src/ia32/codegen-ia32.h57
-rw-r--r--V8Binding/v8/src/ia32/ic-ia32.cc122
-rw-r--r--V8Binding/v8/src/ia32/macro-assembler-ia32.cc95
-rw-r--r--V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc35
-rw-r--r--V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h1
-rw-r--r--V8Binding/v8/src/ia32/stub-cache-ia32.cc500
10 files changed, 939 insertions, 287 deletions
diff --git a/V8Binding/v8/src/ia32/assembler-ia32.h b/V8Binding/v8/src/ia32/assembler-ia32.h
index 70b510e..b648055 100644
--- a/V8Binding/v8/src/ia32/assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/assembler-ia32.h
@@ -226,7 +226,9 @@ enum ScaleFactor {
times_1 = 0,
times_2 = 1,
times_4 = 2,
- times_8 = 3
+ times_8 = 3,
+ times_pointer_size = times_4,
+ times_half_pointer_size = times_2
};
diff --git a/V8Binding/v8/src/ia32/builtins-ia32.cc b/V8Binding/v8/src/ia32/builtins-ia32.cc
index 3cafd90..a70a9d2 100644
--- a/V8Binding/v8/src/ia32/builtins-ia32.cc
+++ b/V8Binding/v8/src/ia32/builtins-ia32.cc
@@ -140,7 +140,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
ExternalReference new_space_allocation_limit =
ExternalReference::new_space_allocation_limit_address();
__ cmp(edi, Operand::StaticVariable(new_space_allocation_limit));
- __ j(greater_equal, &rt_call);
+ __ j(above_equal, &rt_call);
// Allocated the JSObject, now initialize the fields.
// eax: initial map
// ebx: JSObject
@@ -175,8 +175,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ or_(Operand(ebx), Immediate(kHeapObjectTag));
__ mov(Operand::StaticVariable(new_space_allocation_top), edi);
- // Check if a properties array should be setup and allocate one if needed.
- // Otherwise initialize the properties to the empty_fixed_array as well.
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
// eax: initial map
// ebx: JSObject
// edi: start of next object
@@ -184,21 +184,19 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
// Calculate unused properties past the end of the in-object properties.
__ sub(edx, Operand(ecx));
- __ test(edx, Operand(edx));
// Done if no extra properties are to be allocated.
__ j(zero, &allocated);
// Scale the number of elements by pointer size and add the header for
// FixedArrays to the start of the next object calculation from above.
- // eax: initial map
// ebx: JSObject
// edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array
ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize));
- __ lea(ecx, Operand(edi, edx, times_4, FixedArray::kHeaderSize));
+ __ lea(ecx, Operand(edi, edx, times_pointer_size, FixedArray::kHeaderSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
- __ j(greater_equal, &undo_allocation);
+ __ j(above_equal, &undo_allocation);
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
// Initialize the FixedArray.
@@ -223,7 +221,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ add(Operand(eax), Immediate(kPointerSize));
__ bind(&entry);
__ cmp(eax, Operand(ecx));
- __ j(less, &loop);
+ __ j(below, &loop);
}
// Store the initialized FixedArray into the properties field of
diff --git a/V8Binding/v8/src/ia32/cfg-ia32.cc b/V8Binding/v8/src/ia32/cfg-ia32.cc
new file mode 100644
index 0000000..58985a5
--- /dev/null
+++ b/V8Binding/v8/src/ia32/cfg-ia32.cc
@@ -0,0 +1,315 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cfg.h"
+#include "codegen-inl.h"
+#include "codegen-ia32.h"
+#include "macro-assembler-ia32.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void InstructionBlock::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmt(masm, "[ InstructionBlock");
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ // If the location of the current instruction is a temp, then the
+ // instruction cannot be in tail position in the block. Allocate the
+ // temp based on peeking ahead to the next instruction.
+ Instruction* instr = instructions_[i];
+ Location* loc = instr->location();
+ if (loc->is_temporary()) {
+ instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
+ }
+ instructions_[i]->Compile(masm);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void EntryNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Label deferred_enter, deferred_exit;
+ {
+ Comment cmnt(masm, "[ EntryNode");
+ __ push(ebp);
+ __ mov(ebp, esp);
+ __ push(esi);
+ __ push(edi);
+ int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
+ if (count > 0) {
+ __ Set(eax, Immediate(Factory::undefined_value()));
+ for (int i = 0; i < count; i++) {
+ __ push(eax);
+ }
+ }
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ if (FLAG_check_stack) {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(below, &deferred_enter);
+ __ bind(&deferred_exit);
+ }
+ }
+ successor_->Compile(masm);
+ if (FLAG_check_stack) {
+ Comment cmnt(masm, "[ Deferred Stack Check");
+ __ bind(&deferred_enter);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ jmp(&deferred_exit);
+ }
+}
+
+
+void ExitNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Comment cmnt(masm, "[ ExitNode");
+ if (FLAG_trace) {
+ __ push(eax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ RecordJSReturn();
+ __ mov(esp, ebp);
+ __ pop(ebp);
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ ret((count + 1) * kPointerSize);
+}
+
+
+void PropLoadInstr::Compile(MacroAssembler* masm) {
+ // The key should not be on the stack---if it is a compiler-generated
+ // temporary it is in the accumulator.
+ ASSERT(!key()->is_on_stack());
+
+ Comment cmnt(masm, "[ Load from Property");
+ // If the key is known at compile-time we may be able to use a load IC.
+ bool is_keyed_load = true;
+ if (key()->is_constant()) {
+ // Still use the keyed load IC if the key can be parsed as an integer so
+ // we will get into the case that handles [] on string objects.
+ Handle<Object> key_val = Constant::cast(key())->handle();
+ uint32_t ignored;
+ if (key_val->IsSymbol() &&
+ !String::cast(*key_val)->AsArrayIndex(&ignored)) {
+ is_keyed_load = false;
+ }
+ }
+
+ if (!object()->is_on_stack()) object()->Push(masm);
+ // A test eax instruction after the call indicates to the IC code that it
+ // was inlined. Ensure there is not one here.
+ if (is_keyed_load) {
+ key()->Push(masm);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ pop(ebx); // Discard key.
+ } else {
+ key()->Get(masm, ecx);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ }
+ __ pop(ebx); // Discard receiver.
+ location()->Set(masm, eax);
+}
+
+
+void BinaryOpInstr::Compile(MacroAssembler* masm) {
+ // The right-hand value should not be on the stack---if it is a
+ // compiler-generated temporary it is in the accumulator.
+ ASSERT(!right()->is_on_stack());
+
+ Comment cmnt(masm, "[ BinaryOpInstr");
+ // We can overwrite one of the operands if it is a temporary.
+ OverwriteMode mode = NO_OVERWRITE;
+ if (left()->is_temporary()) {
+ mode = OVERWRITE_LEFT;
+ } else if (right()->is_temporary()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ // Push both operands and call the specialized stub.
+ if (!left()->is_on_stack()) left()->Push(masm);
+ right()->Push(masm);
+ GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
+ __ CallStub(&stub);
+ location()->Set(masm, eax);
+}
+
+
+void ReturnInstr::Compile(MacroAssembler* masm) {
+ // The location should be 'Effect'. As a side effect, move the value to
+ // the accumulator.
+ Comment cmnt(masm, "[ ReturnInstr");
+ value_->Get(masm, eax);
+}
+
+
+void Constant::Get(MacroAssembler* masm, Register reg) {
+ __ mov(reg, Immediate(handle_));
+}
+
+
+void Constant::Push(MacroAssembler* masm) {
+ __ push(Immediate(handle_));
+}
+
+
+static Operand ToOperand(SlotLocation* loc) {
+ switch (loc->type()) {
+ case Slot::PARAMETER: {
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ return Operand(ebp, (1 + count - loc->index()) * kPointerSize);
+ }
+ case Slot::LOCAL: {
+ const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
+ return Operand(ebp, kOffset - loc->index() * kPointerSize);
+ }
+ default:
+ UNREACHABLE();
+ return Operand(eax);
+ }
+}
+
+
+void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ mov(ToOperand(loc), Immediate(handle_));
+}
+
+
+void SlotLocation::Get(MacroAssembler* masm, Register reg) {
+ __ mov(reg, ToOperand(this));
+}
+
+
+void SlotLocation::Set(MacroAssembler* masm, Register reg) {
+ __ mov(ToOperand(this), reg);
+}
+
+
+void SlotLocation::Push(MacroAssembler* masm) {
+ __ push(ToOperand(this));
+}
+
+
+void SlotLocation::Move(MacroAssembler* masm, Value* value) {
+ // We dispatch to the value because in some cases (temp or constant)
+ // we can use a single instruction.
+ value->MoveToSlot(masm, this);
+}
+
+
+void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ // The accumulator is not live across a MoveInstr.
+ __ mov(eax, ToOperand(this));
+ __ mov(ToOperand(loc), eax);
+}
+
+
+void TempLocation::Get(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(eax)) __ mov(reg, eax);
+ break;
+ case STACK:
+ __ pop(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Set(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(eax)) __ mov(eax, reg);
+ break;
+ case STACK:
+ __ push(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Push(MacroAssembler* masm) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ push(eax);
+ break;
+ case STACK:
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Move(MacroAssembler* masm, Value* value) {
+ switch (where_) {
+ case ACCUMULATOR:
+ value->Get(masm, eax);
+ break;
+ case STACK:
+ value->Push(masm);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ mov(ToOperand(loc), eax);
+ break;
+ case STACK:
+ __ pop(ToOperand(loc));
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.cc b/V8Binding/v8/src/ia32/codegen-ia32.cc
index 457b22f..9542b16 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.cc
+++ b/V8Binding/v8/src/ia32/codegen-ia32.cc
@@ -754,9 +754,9 @@ class FloatingPointHelper : public AllStatic {
public:
// Code pattern for loading a floating point value. Input value must
// be either a smi or a heap number object (fp value). Requirements:
- // operand on TOS+1. Returns operand as floating point number on FPU
- // stack.
- static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
+ // operand in register number. Returns operand as floating point number
+ // on FPU stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register number);
// Code pattern for loading floating point values. Input values must
// be either smi or heap number objects (fp values). Requirements:
// operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
@@ -778,57 +778,6 @@ class FloatingPointHelper : public AllStatic {
};
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
-enum GenericBinaryFlags {
- SMI_CODE_IN_STUB,
- SMI_CODE_INLINED
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags)
- : op_(op), mode_(mode), flags_(flags) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_));
- }
-#endif
-
- // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 13> {};
- class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_);
- }
- void Generate(MacroAssembler* masm);
-};
-
-
const char* GenericBinaryOpStub::GetName() {
switch (op_) {
case Token::ADD: return "GenericBinaryOpStub_ADD";
@@ -5154,11 +5103,10 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
+ ASSERT(kSmiTag == 0); // EBP value is aligned, so it should look like Smi.
Result ebp_as_smi = allocator_->Allocate();
ASSERT(ebp_as_smi.is_valid());
__ mov(ebp_as_smi.reg(), Operand(ebp));
- __ shr(ebp_as_smi.reg(), kSmiTagSize);
frame_->Push(&ebp_as_smi);
}
@@ -5216,8 +5164,11 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
}
// Go slow case if argument to operation is out of range.
+ Result eax_reg = allocator_->Allocate(eax);
+ ASSERT(eax_reg.is_valid());
__ fnstsw_ax();
__ sahf();
+ eax_reg.Unuse();
call_runtime.Branch(parity_even, not_taken);
// Allocate heap number for result if possible.
@@ -6297,8 +6248,8 @@ void Reference::GetValue(TypeofState typeof_state) {
__ mov(elements.reg(),
FieldOperand(receiver.reg(), JSObject::kElementsOffset));
__ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- deferred->Branch(equal);
+ Immediate(Factory::fixed_array_map()));
+ deferred->Branch(not_equal);
// Shift the key to get the actual index value and check that
// it is within bounds.
@@ -7016,19 +6967,19 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
- Register scratch) {
+ Register number) {
Label load_smi, done;
- __ test(scratch, Immediate(kSmiTagMask));
+ __ test(number, Immediate(kSmiTagMask));
__ j(zero, &load_smi, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
__ jmp(&done);
__ bind(&load_smi);
- __ sar(scratch, kSmiTagSize);
- __ push(scratch);
+ __ sar(number, kSmiTagSize);
+ __ push(number);
__ fild_s(Operand(esp, 0));
- __ pop(scratch);
+ __ pop(number);
__ bind(&done);
}
@@ -7786,7 +7737,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// If this is the outermost JS call, set js_entry_sp value.
ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
__ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
- __ j(NegateCondition(equal), &not_outermost_js);
+ __ j(not_equal, &not_outermost_js);
__ mov(Operand::StaticVariable(js_entry_sp), ebp);
__ bind(&not_outermost_js);
#endif
@@ -7837,7 +7788,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// If current EBP value is the same as js_entry_sp value, it means that
// the current function is the outermost.
__ cmp(ebp, Operand::StaticVariable(js_entry_sp));
- __ j(NegateCondition(equal), &not_outermost_js_2);
+ __ j(not_equal, &not_outermost_js_2);
__ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
__ bind(&not_outermost_js_2);
#endif
diff --git a/V8Binding/v8/src/ia32/codegen-ia32.h b/V8Binding/v8/src/ia32/codegen-ia32.h
index 5cd50b8..1d0cc8b 100644
--- a/V8Binding/v8/src/ia32/codegen-ia32.h
+++ b/V8Binding/v8/src/ia32/codegen-ia32.h
@@ -359,7 +359,7 @@ class CodeGenerator: public AstVisitor {
#define DEF_VISIT(type) \
void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
@@ -558,7 +558,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Node* node);
+ void CodeForStatementPosition(AstNode* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -603,12 +603,63 @@ class CodeGenerator: public AstVisitor {
friend class Reference;
friend class Result;
- friend class CodeGeneratorPatcher; // Used in test-log-ia32.cc
+ friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+ SMI_CODE_IN_STUB,
+ SMI_CODE_INLINED
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags)
+ : op_(op), mode_(mode), flags_(flags) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 13> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_);
+ }
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/V8Binding/v8/src/ia32/ic-ia32.cc b/V8Binding/v8/src/ia32/ic-ia32.cc
index d64dee1..08ffe2f 100644
--- a/V8Binding/v8/src/ia32/ic-ia32.cc
+++ b/V8Binding/v8/src/ia32/ic-ia32.cc
@@ -234,11 +234,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[4] : name
// -- esp[8] : receiver
// -----------------------------------
- Label slow, fast, check_string, index_int, index_string;
+ Label slow, check_string, index_int, index_string, check_pixel_array;
// Load name and receiver.
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
// Check that the object isn't a smi.
__ test(ecx, Immediate(kSmiTagMask));
@@ -269,11 +269,36 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(equal, &slow, not_taken);
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds.
__ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ j(below, &fast, taken);
+ __ j(above_equal, &slow);
+ // Fast case: Do the load.
+ __ mov(eax,
+ Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
+ __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, &slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ ret(0);
+
+ // Check whether the elements is a pixel array.
+ // eax: untagged index
+ // ecx: elements array
+ __ bind(&check_pixel_array);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(Factory::pixel_array_map()));
+ __ j(not_equal, &slow);
+ __ cmp(eax, FieldOperand(ecx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+ __ movzx_b(eax, Operand(ecx, eax, times_1, 0));
+ __ shl(eax, kSmiTagSize);
+ __ ret(0);
+
+
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
@@ -315,16 +340,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ and_(eax, (1 << String::kShortLengthShift) - 1);
__ shr(eax, String::kLongLengthShift);
__ jmp(&index_int);
- // Fast case: Do the load.
- __ bind(&fast);
- __ mov(eax,
- Operand(ecx, eax, times_4, FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(Operand(eax), Immediate(Factory::the_hole_value()));
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ j(equal, &slow, not_taken);
- __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
- __ ret(0);
}
@@ -335,7 +350,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// -- esp[4] : key
// -- esp[8] : receiver
// -----------------------------------
- Label slow, fast, array, extra;
+ Label slow, fast, array, extra, check_pixel_array;
// Get the receiver from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, key
@@ -370,8 +385,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(equal, &slow, not_taken);
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &check_pixel_array, not_taken);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(edx, Operand(ebx));
__ sar(edx, kSmiTagSize); // untag the index and use it for the comparison
@@ -381,7 +396,6 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ebx: index (as a smi)
__ j(below, &fast, taken);
-
// Slow case: Push extra copies of the arguments (3).
__ bind(&slow);
__ pop(ecx);
@@ -392,6 +406,37 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
+ // Check whether the elements is a pixel array.
+ // eax: value
+ // ecx: elements array
+ // ebx: index (as a smi)
+ __ bind(&check_pixel_array);
+ __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
+ Immediate(Factory::pixel_array_map()));
+ __ j(not_equal, &slow);
+ // Check that the value is a smi. If a conversion is needed call into the
+ // runtime to convert and clamp.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+ __ sar(ebx, kSmiTagSize); // Untag the index.
+ __ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ __ sar(eax, kSmiTagSize); // Untag the value.
+ { // Clamp the value to [0..255].
+ Label done, check_255;
+ __ cmp(eax, 0);
+ __ j(greater_equal, &check_255);
+ __ mov(eax, Immediate(0));
+ __ jmp(&done);
+ __ bind(&check_255);
+ __ cmp(eax, 255);
+ __ j(less_equal, &done);
+ __ mov(eax, Immediate(255));
+ __ bind(&done);
+ }
+ __ mov(ecx, FieldOperand(ecx, PixelArray::kExternalPointerOffset));
+ __ mov_b(Operand(ecx, ebx, times_1, 0), eax);
+ __ ret(0);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
@@ -422,15 +467,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ebx: index (as a smi)
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(equal, &slow, not_taken);
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &check_pixel_array);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
__ cmp(ebx, FieldOperand(edx, JSArray::kLengthOffset));
__ j(above_equal, &extra, not_taken);
-
// Fast case: Do the store.
__ bind(&fast);
// eax: value
@@ -749,12 +793,10 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
// -----------------------------------
__ mov(eax, Operand(esp, kPointerSize));
-
- // Move the return address below the arguments.
__ pop(ebx);
- __ push(eax);
- __ push(ecx);
- __ push(ebx);
+ __ push(eax); // receiver
+ __ push(ecx); // name
+ __ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 2);
@@ -797,7 +839,8 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
- Address test_instruction_address = address + 4;
+ Address test_instruction_address =
+ address + Assembler::kTargetAddrToReturnAddrDist;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
@@ -823,7 +866,8 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
static bool PatchInlinedMapCheck(Address address, Object* map) {
- Address test_instruction_address = address + 4; // 4 = stub address
+ Address test_instruction_address =
+ address + Assembler::kTargetAddrToReturnAddrDist;
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address != kTestEaxByte) return false;
@@ -877,12 +921,10 @@ void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
__ mov(eax, Operand(esp, kPointerSize));
__ mov(ecx, Operand(esp, 2 * kPointerSize));
-
- // Move the return address below the arguments.
__ pop(ebx);
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
+ __ push(ecx); // receiver
+ __ push(eax); // name
+ __ push(ebx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 2);
@@ -917,12 +959,12 @@ void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
// -- esp[4] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(ebx);
- __ push(Operand(esp, 0));
- __ push(ecx);
- __ push(eax);
- __ push(ebx);
+ __ push(Operand(esp, 0)); // receiver
+ __ push(ecx); // transition map
+ __ push(eax); // value
+ __ push(ebx); // return address
+
// Perform tail call to the entry.
__ TailCallRuntime(
ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
diff --git a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
index fae1525..7782aa9 100644
--- a/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/macro-assembler-ia32.cc
@@ -54,41 +54,47 @@ static void RecordWriteHelper(MacroAssembler* masm,
Register scratch) {
Label fast;
- // Compute the page address from the heap object pointer, leave it
- // in 'object'.
+ // Compute the page start address from the heap object pointer, and reuse
+ // the 'object' register for it.
masm->and_(object, ~Page::kPageAlignmentMask);
+ Register page_start = object;
- // Compute the bit addr in the remembered set, leave it in "addr".
- masm->sub(addr, Operand(object));
+ // Compute the bit addr in the remembered set/index of the pointer in the
+ // page. Reuse 'addr' as pointer_offset.
+ masm->sub(addr, Operand(page_start));
masm->shr(addr, kObjectAlignmentBits);
+ Register pointer_offset = addr;
// If the bit offset lies beyond the normal remembered set range, it is in
// the extra remembered set area of a large object.
- masm->cmp(addr, Page::kPageSize / kPointerSize);
+ masm->cmp(pointer_offset, Page::kPageSize / kPointerSize);
masm->j(less, &fast);
- // Adjust 'addr' to be relative to the start of the extra remembered set
- // and the page address in 'object' to be the address of the extra
- // remembered set.
- masm->sub(Operand(addr), Immediate(Page::kPageSize / kPointerSize));
- // Load the array length into 'scratch' and multiply by four to get the
- // size in bytes of the elements.
- masm->mov(scratch, Operand(object, Page::kObjectStartOffset
- + FixedArray::kLengthOffset));
- masm->shl(scratch, kObjectAlignmentBits);
- // Add the page header, array header, and array body size to the page
- // address.
- masm->add(Operand(object), Immediate(Page::kObjectStartOffset
- + FixedArray::kHeaderSize));
- masm->add(object, Operand(scratch));
-
+ // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
+ // extra remembered set after the large object.
+
+ // Find the length of the large object (FixedArray).
+ masm->mov(scratch, Operand(page_start, Page::kObjectStartOffset
+ + FixedArray::kLengthOffset));
+ Register array_length = scratch;
+
+ // Extra remembered set starts right after the large object (a FixedArray), at
+ // page_start + kObjectStartOffset + objectSize
+ // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
+ // Add the delta between the end of the normal RSet and the start of the
+ // extra RSet to 'object', so that addressing the bit using 'pointer_offset'
+ // hits the extra RSet words.
+ masm->lea(page_start,
+ Operand(page_start, array_length, times_pointer_size,
+ Page::kObjectStartOffset + FixedArray::kHeaderSize
+ - Page::kRSetEndOffset));
// NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
masm->bind(&fast);
- masm->bts(Operand(object, 0), addr);
+ masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
}
@@ -146,43 +152,30 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// for the remembered set bits.
Label done;
- // This optimization cannot survive serialization and deserialization,
- // so we disable as long as serialization can take place.
- int32_t new_space_start =
- reinterpret_cast<int32_t>(ExternalReference::new_space_start().address());
- if (Serializer::enabled() || new_space_start < 0) {
- // Cannot do smart bit-twiddling. Need to do two consecutive checks.
- // Check for Smi first.
- test(value, Immediate(kSmiTagMask));
- j(zero, &done);
- // Test that the object address is not in the new space. We cannot
- // set remembered set bits in the new space.
+ // Skip barrier if writing a smi.
+ ASSERT_EQ(0, kSmiTag);
+ test(value, Immediate(kSmiTagMask));
+ j(zero, &done);
+
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
mov(value, Operand(object));
and_(value, Heap::NewSpaceMask());
cmp(Operand(value), Immediate(ExternalReference::new_space_start()));
j(equal, &done);
} else {
- // move the value SmiTag into the sign bit
- shl(value, 31);
- // combine the object with value SmiTag
- or_(value, Operand(object));
- // remove the uninteresing bits inside the page
- and_(value, Heap::NewSpaceMask() | (1 << 31));
- // xor has two effects:
- // - if the value was a smi, then the result will be negative
- // - if the object is pointing into new space area the page bits will
- // all be zero
- xor_(value, new_space_start | (1 << 31));
- // Check for both conditions in one branch
- j(less_equal, &done);
+ int32_t new_space_start = reinterpret_cast<int32_t>(
+ ExternalReference::new_space_start().address());
+ lea(value, Operand(object, -new_space_start));
+ and_(value, Heap::NewSpaceMask());
+ j(equal, &done);
}
if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
// Compute the bit offset in the remembered set, leave it in 'value'.
- mov(value, Operand(object));
+ lea(value, Operand(object, offset));
and_(value, Page::kPageAlignmentMask);
- add(Operand(value), Immediate(offset));
- shr(value, kObjectAlignmentBits);
+ shr(value, kPointerSizeLog2);
// Compute the page address from the heap object pointer, leave it in
// 'object'.
@@ -192,7 +185,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// to limit code size. We should probably evaluate this decision by
// measuring the performance of an equivalent implementation using
// "simpler" instructions
- bts(Operand(object, 0), value);
+ bts(Operand(object, Page::kRSetOffset), value);
} else {
Register dst = scratch;
if (offset != 0) {
@@ -201,7 +194,9 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// array access: calculate the destination address in the same manner as
// KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
// into an array of words.
- lea(dst, Operand(object, dst, times_2,
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ lea(dst, Operand(object, dst, times_half_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
}
// If we are already generating a shared stub, not inlining the
diff --git a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc
index c5d7c05..a49c1f5 100644
--- a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -634,11 +634,9 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
__ push(Immediate(0)); // Make room for "input start - 1" constant.
// Check if we have space on the stack for registers.
- Label retry_stack_check;
Label stack_limit_hit;
Label stack_ok;
- __ bind(&retry_stack_check);
ExternalReference stack_guard_limit =
ExternalReference::address_of_stack_guard_limit();
__ mov(ecx, esp);
@@ -658,10 +656,7 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
CallCheckStackGuardState(ebx);
__ or_(eax, Operand(eax));
// If returned value is non-zero, we exit with the returned value as result.
- // Otherwise it was a preemption and we just check the limit again.
- __ j(equal, &retry_stack_check);
- // Return value was non-zero. Exit with exception or retry.
- __ jmp(&exit_label_);
+ __ j(not_zero, &exit_label_);
__ bind(&stack_ok);
@@ -757,24 +752,16 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Preempt-code
if (check_preempt_label_.is_linked()) {
- __ bind(&check_preempt_label_);
+ SafeCallTarget(&check_preempt_label_);
__ push(backtrack_stackpointer());
__ push(edi);
- Label retry;
-
- __ bind(&retry);
CallCheckStackGuardState(ebx);
__ or_(eax, Operand(eax));
// If returning non-zero, we should end execution with the given
// result as return value.
__ j(not_zero, &exit_label_);
- // Check if we are still preempted.
- ExternalReference stack_guard_limit =
- ExternalReference::address_of_stack_guard_limit();
- __ cmp(esp, Operand::StaticVariable(stack_guard_limit));
- __ j(below_equal, &retry);
__ pop(edi);
__ pop(backtrack_stackpointer());
@@ -785,7 +772,7 @@ Handle<Object> RegExpMacroAssemblerIA32::GetCode(Handle<String> source) {
// Backtrack stack overflow code.
if (stack_overflow_label_.is_linked()) {
- __ bind(&stack_overflow_label_);
+ SafeCallTarget(&stack_overflow_label_);
// Reached if the backtrack-stack limit has been hit.
Label grow_failed;
@@ -1262,17 +1249,19 @@ void RegExpMacroAssemblerIA32::BranchOrBacktrack(Condition condition,
void RegExpMacroAssemblerIA32::SafeCall(Label* to) {
- Label return_to;
- __ push(Immediate::CodeRelativeOffset(&return_to));
- __ jmp(to);
- __ bind(&return_to);
+ __ call(to);
}
void RegExpMacroAssemblerIA32::SafeReturn() {
- __ pop(ebx);
- __ add(Operand(ebx), Immediate(masm_->CodeObject()));
- __ jmp(Operand(ebx));
+ __ add(Operand(esp, 0), Immediate(masm_->CodeObject()));
+ __ ret(0);
+}
+
+
+void RegExpMacroAssemblerIA32::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ sub(Operand(esp, 0), Immediate(masm_->CodeObject()));
}
diff --git a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h
index a06700a..c3d9155 100644
--- a/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/V8Binding/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -227,6 +227,7 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
// is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
inline void SafeCall(Label* to);
inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
// Pushes the value of a register on the backtrack stack. Decrements the
// stack pointer (ecx) by a word size and stores the register's value there.
diff --git a/V8Binding/v8/src/ia32/stub-cache-ia32.cc b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
index e47ad1c..a626377 100644
--- a/V8Binding/v8/src/ia32/stub-cache-ia32.cc
+++ b/V8Binding/v8/src/ia32/stub-cache-ia32.cc
@@ -152,6 +152,22 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
}
+template <typename Pushable>
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Pushable name,
+ JSObject* holder_obj) {
+ __ push(receiver);
+ __ push(holder);
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ __ mov(receiver, Immediate(Handle<Object>(interceptor)));
+ __ push(receiver);
+ __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
+}
+
+
void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype) {
@@ -273,6 +289,322 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
}
+template <class Pushable>
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Pushable name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+ __ mov(eax, Immediate(5));
+ __ mov(ebx, Immediate(ref));
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+}
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss, not_taken);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
+ }
+}
+
+
+static void LookupPostInterceptor(JSObject* holder,
+ String* name,
+ LookupResult* lookup) {
+ holder->LocalLookupRealNamedProperty(name, lookup);
+ if (lookup->IsNotFound()) {
+ Object* proto = holder->GetPrototype();
+ if (proto != Heap::null_value()) {
+ proto->Lookup(name, lookup);
+ }
+ }
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ AccessorInfo* callback = 0;
+ bool optimize = false;
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ if (lookup->type() == FIELD) {
+ optimize = true;
+ } else if (lookup->type() == CALLBACKS) {
+ Object* callback_object = lookup->GetCallbackObject();
+ if (callback_object->IsAccessorInfo()) {
+ callback = AccessorInfo::cast(callback_object);
+ optimize = callback->getter() != NULL;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Note: starting a frame here makes GC aware of pointers pushed below.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS) {
+ __ push(receiver);
+ }
+ __ push(holder);
+ __ push(name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ Label interceptor_failed;
+ __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ pop(name_);
+ __ pop(holder);
+ if (lookup->type() == CALLBACKS) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ if (lookup->type() == FIELD) {
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ stub_compiler->GenerateFastPropertyLoad(masm, eax,
+ holder, lookup->holder(),
+ lookup->GetFieldIndex());
+ __ ret(0);
+ } else {
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ Label cleanup;
+ __ pop(scratch2);
+ __ push(receiver);
+ __ push(scratch2);
+
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ &cleanup);
+
+ __ pop(scratch2); // save old return address
+ __ push(holder);
+ __ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
+ __ push(holder);
+ __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
+ __ push(name_);
+ __ push(scratch2); // restore old return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(ref, 5);
+
+ __ bind(&cleanup);
+ __ pop(scratch1);
+ __ pop(scratch2);
+ __ push(scratch1);
+ }
+ }
+
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ pop(scratch); // save old return address
+ PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+ __ push(scratch); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallRuntime(ref, 5);
+ }
+
+ private:
+ Register name_;
+};
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit CallInterceptorCompiler(const ParameterCount& arguments)
+ : arguments_(arguments), argc_(arguments.immediate()) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ JSFunction* function = 0;
+ bool optimize = false;
+ // So far the most popular case for failed interceptor is
+ // CONSTANT_FUNCTION sitting below.
+ if (lookup->type() == CONSTANT_FUNCTION) {
+ function = lookup->GetConstantFunction();
+ // JSArray holder is a special case for call constant function
+ // (see the corresponding code).
+ if (function->is_compiled() && !holder_obj->IsJSArray()) {
+ optimize = true;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ __ EnterInternalFrame();
+ __ push(holder); // save the holder
+
+ CompileCallLoadPropertyWithInterceptor(
+ masm,
+ receiver,
+ holder,
+ // Under EnterInternalFrame this refers to name.
+ Operand(ebp, (argc_ + 3) * kPointerSize),
+ holder_obj);
+
+ __ pop(receiver); // restore holder
+ __ LeaveInternalFrame();
+
+ __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ Label invoke;
+ __ j(not_equal, &invoke);
+
+ stub_compiler->CheckPrototypes(holder_obj, receiver,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ if (lookup->holder()->IsGlobalObject()) {
+ __ mov(edx, Operand(esp, (argc_ + 1) * kPointerSize));
+ __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalReceiverOffset));
+ __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edx);
+ }
+
+ ASSERT(function->is_compiled());
+ // Get the function and setup the context.
+ __ mov(edi, Immediate(Handle<JSFunction>(function)));
+ __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ ASSERT(function->is_compiled());
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments_,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+ __ bind(&invoke);
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ EnterInternalFrame();
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ Operand(ebp, (argc_ + 3) * kPointerSize),
+ holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+ __ mov(eax, Immediate(5));
+ __ mov(ebx, Immediate(ref));
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+
+ __ LeaveInternalFrame();
+ }
+
+ private:
+ const ParameterCount& arguments_;
+ int argc_;
+};
+
+
void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
Code* code = NULL;
@@ -447,15 +779,17 @@ void StubCompiler::GenerateLoadCallback(JSObject* object,
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
__ push(receiver); // receiver
- __ push(Immediate(Handle<AccessorInfo>(callback))); // callback data
- __ push(name_reg); // name
__ push(reg); // holder
+ __ mov(reg, Immediate(Handle<AccessorInfo>(callback))); // callback data
+ __ push(reg);
+ __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+ __ push(name_reg); // name
__ push(scratch2); // restore return address
// Do tail-call to the runtime system.
ExternalReference load_callback_property =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallRuntime(load_callback_property, 4);
+ __ TailCallRuntime(load_callback_property, 5);
}
@@ -484,36 +818,25 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
JSObject* holder,
- Smi* lookup_hint,
+ LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
String* name,
Label* miss) {
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // Check that the maps haven't changed.
- Register reg =
- CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- // Push the arguments on the JS stack of the caller.
- __ pop(scratch2); // remove return address
- __ push(receiver); // receiver
- __ push(reg); // holder
- __ push(name_reg); // name
- // TODO(367): Maybe don't push lookup_hint for LOOKUP_IN_HOLDER and/or
- // LOOKUP_IN_PROTOTYPE, but use a special version of lookup method?
- __ push(Immediate(lookup_hint));
- __ push(scratch2); // restore return address
-
- // Do tail-call to the runtime system.
- ExternalReference load_ic_property =
- ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
- __ TailCallRuntime(load_ic_property, 4);
+ LoadInterceptorCompiler compiler(name_reg);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ object,
+ holder,
+ name,
+ lookup,
+ receiver,
+ scratch1,
+ scratch2,
+ miss);
}
@@ -678,13 +1001,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), edx, holder,
ebx, ecx, name, &miss);
- // Make sure object->elements()->map() != Heap::dictionary_array_map()
+ // Make sure object->HasFastElements().
// Get the elements array of the object.
__ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(Factory::hash_table_map()));
- __ j(equal, &miss, not_taken);
+ Immediate(Factory::fixed_array_map()));
+ __ j(not_equal, &miss, not_taken);
break;
default:
@@ -726,47 +1049,32 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Get the number of arguments.
const int argc = arguments().immediate();
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
// Get the receiver from the stack.
__ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
- // Check that the receiver isn't a smi.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &miss, not_taken);
-
- // Check that maps have not changed and compute the holder register.
- Register reg =
- CheckPrototypes(JSObject::cast(object), edx, holder,
- ebx, ecx, name, &miss);
-
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Push arguments on the expression stack.
- __ push(edx); // receiver
- __ push(reg); // holder
- __ push(Operand(ebp, (argc + 3) * kPointerSize)); // name
- __ push(Immediate(holder->InterceptorPropertyLookupHint(name)));
-
- // Perform call.
- ExternalReference load_interceptor =
- ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
- __ mov(eax, Immediate(4));
- __ mov(ebx, Immediate(load_interceptor));
-
- CEntryStub stub;
- __ CallStub(&stub);
-
- // Move result to edi and restore receiver.
- __ mov(edi, eax);
- __ mov(edx, Operand(ebp, (argc + 2) * kPointerSize)); // receiver
-
- // Exit frame.
- __ LeaveInternalFrame();
+ CallInterceptorCompiler compiler(arguments());
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ JSObject::cast(object),
+ holder,
+ name,
+ &lookup,
+ edx,
+ ebx,
+ ecx,
+ &miss);
+
+ // Restore receiver.
+ __ mov(edx, Operand(esp, (argc + 1) * kPointerSize));
// Check that the function really is a function.
- __ test(edi, Immediate(kSmiTagMask));
+ __ test(eax, Immediate(kSmiTagMask));
__ j(zero, &miss, not_taken);
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ebx);
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
__ j(not_equal, &miss, not_taken);
// Patch the receiver on the stack with the global proxy if
@@ -777,6 +1085,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
}
// Invoke the function.
+ __ mov(edi, eax);
__ InvokeFunction(edi, arguments(), JUMP_FUNCTION);
// Handle load cache miss.
@@ -798,8 +1107,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::call_global_inline, 1);
-
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -835,6 +1142,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
+ __ IncrementCounter(&Counters::call_global_inline, 1);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -843,7 +1151,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::call_global_inline, 1);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -1007,10 +1314,8 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::named_store_global_inline, 1);
-
// Check that the map of the global has not changed.
- __ mov(ebx, (Operand(esp, kPointerSize)));
+ __ mov(ebx, Operand(esp, kPointerSize));
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Handle<Map>(object->map())));
__ j(not_equal, &miss, not_taken);
@@ -1020,11 +1325,11 @@ Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
__ mov(FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset), eax);
// Return the value (register eax).
+ __ IncrementCounter(&Counters::named_store_global_inline, 1);
__ ret(0);
// Handle store cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::named_store_global_inline, 1);
__ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
__ jmp(ic, RelocInfo::CODE_TARGET);
@@ -1089,7 +1394,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
GenerateLoadField(object, holder, eax, ebx, edx, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1110,7 +1415,7 @@ Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
GenerateLoadCallback(object, holder, eax, ecx, ebx, edx,
callback, name, &miss);
__ bind(&miss);
@@ -1132,7 +1437,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
GenerateLoadConstant(object, holder, eax, ebx, edx, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1152,12 +1457,15 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ __ mov(eax, Operand(esp, kPointerSize));
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
GenerateLoadInterceptor(receiver,
holder,
- holder->InterceptorPropertyLookupHint(name),
+ &lookup,
eax,
ecx,
edx,
@@ -1185,10 +1493,8 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
-
// Get the receiver from the stack.
- __ mov(eax, (Operand(esp, kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
@@ -1214,10 +1520,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
+ __ IncrementCounter(&Counters::named_load_global_inline, 1);
__ ret(0);
__ bind(&miss);
- __ DecrementCounter(&Counters::named_load_global_inline, 1);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1237,8 +1543,8 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_field, 1);
// Check that the name has not changed.
@@ -1267,8 +1573,8 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_callback, 1);
// Check that the name has not changed.
@@ -1297,8 +1603,8 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_constant_function, 1);
// Check that the name has not changed.
@@ -1326,17 +1632,19 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_interceptor, 1);
// Check that the name has not changed.
__ cmp(Operand(eax), Immediate(Handle<String>(name)));
__ j(not_equal, &miss, not_taken);
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
GenerateLoadInterceptor(receiver,
holder,
- Smi::FromInt(JSObject::kLookupInHolder),
+ &lookup,
ecx,
eax,
edx,
@@ -1362,8 +1670,8 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_array_length, 1);
// Check that the name has not changed.
@@ -1388,8 +1696,8 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_string_length, 1);
// Check that the name has not changed.
@@ -1414,8 +1722,8 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
// -----------------------------------
Label miss;
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, (Operand(esp, 2 * kPointerSize)));
+ __ mov(eax, Operand(esp, kPointerSize));
+ __ mov(ecx, Operand(esp, 2 * kPointerSize));
__ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
// Check that the name has not changed.