summaryrefslogtreecommitdiffstats
path: root/V8Binding/v8/src/x64
diff options
context:
space:
mode:
Diffstat (limited to 'V8Binding/v8/src/x64')
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.cc54
-rw-r--r--V8Binding/v8/src/x64/assembler-x64.h28
-rw-r--r--V8Binding/v8/src/x64/builtins-x64.cc159
-rw-r--r--V8Binding/v8/src/x64/cfg-x64.cc323
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.cc1099
-rw-r--r--V8Binding/v8/src/x64/codegen-x64.h68
-rw-r--r--V8Binding/v8/src/x64/disasm-x64.cc67
-rw-r--r--V8Binding/v8/src/x64/ic-x64.cc744
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.cc186
-rw-r--r--V8Binding/v8/src/x64/macro-assembler-x64.h3
-rw-r--r--V8Binding/v8/src/x64/stub-cache-x64.cc1452
11 files changed, 3542 insertions, 641 deletions
diff --git a/V8Binding/v8/src/x64/assembler-x64.cc b/V8Binding/v8/src/x64/assembler-x64.cc
index e9a6f7f1..b4fd678 100644
--- a/V8Binding/v8/src/x64/assembler-x64.cc
+++ b/V8Binding/v8/src/x64/assembler-x64.cc
@@ -687,6 +687,13 @@ void Assembler::call(const Operand& op) {
}
+void Assembler::cdq() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x99);
+}
+
+
void Assembler::cmovq(Condition cc, Register dst, Register src) {
// No need to check CpuInfo for CMOV support, it's a required part of the
// 64-bit architecture.
@@ -773,6 +780,15 @@ void Assembler::decq(const Operand& dst) {
}
+void Assembler::decl(Register dst) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst);
+ emit(0xFF);
+ emit_modrm(0x1, dst);
+}
+
+
void Assembler::decl(const Operand& dst) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -798,7 +814,7 @@ void Assembler::hlt() {
}
-void Assembler::idiv(Register src) {
+void Assembler::idivq(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(src);
@@ -807,6 +823,15 @@ void Assembler::idiv(Register src) {
}
+void Assembler::idivl(Register src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(src);
+ emit(0xF7);
+ emit_modrm(0x7, src);
+}
+
+
void Assembler::imul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1115,6 +1140,9 @@ void Assembler::movq(const Operand& dst, Register src) {
void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) {
+ // This method must not be used with heap object references. The stored
+ // address is not GC safe. Use the handle version instead.
+ ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_rex_64(dst);
@@ -1216,6 +1244,26 @@ void Assembler::movzxbq(Register dst, const Operand& src) {
}
+void Assembler::movzxbl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xB6);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movzxwl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xB7);
+ emit_operand(dst, src);
+}
+
+
void Assembler::mul(Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1521,7 +1569,7 @@ void Assembler::store_rax(ExternalReference ref) {
void Assembler::testb(Register reg, Immediate mask) {
- ASSERT(is_int8(mask.value_));
+ ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
if (reg.is(rax)) {
@@ -1540,7 +1588,7 @@ void Assembler::testb(Register reg, Immediate mask) {
void Assembler::testb(const Operand& op, Immediate mask) {
- ASSERT(is_int8(mask.value_));
+ ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_optional_rex_32(rax, op);
diff --git a/V8Binding/v8/src/x64/assembler-x64.h b/V8Binding/v8/src/x64/assembler-x64.h
index 1b2a35c..015fa68 100644
--- a/V8Binding/v8/src/x64/assembler-x64.h
+++ b/V8Binding/v8/src/x64/assembler-x64.h
@@ -44,15 +44,25 @@ namespace internal {
// Test whether a 64-bit value is in a specific range.
static inline bool is_uint32(int64_t x) {
- const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
+ static const int64_t kUInt32Mask = V8_INT64_C(0xffffffff);
return x == (x & kUInt32Mask);
}
static inline bool is_int32(int64_t x) {
- const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
+ static const int64_t kMinIntValue = V8_INT64_C(-0x80000000);
return is_uint32(x - kMinIntValue);
}
+static inline bool uint_is_int32(uint64_t x) {
+ static const uint64_t kMaxIntValue = V8_UINT64_C(0x80000000);
+ return x < kMaxIntValue;
+}
+
+static inline bool is_uint32(uint64_t x) {
+ static const uint64_t kMaxUIntValue = V8_UINT64_C(0x100000000);
+ return x < kMaxUIntValue;
+}
+
// CPU Registers.
//
// 1) We would prefer to use an enum, but enum values are assignment-
@@ -292,6 +302,7 @@ enum ScaleFactor {
times_4 = 2,
times_8 = 3,
times_int_size = times_4,
+ times_half_pointer_size = times_4,
times_pointer_size = times_8
};
@@ -506,6 +517,8 @@ class Assembler : public Malloced {
void movsxlq(Register dst, Register src);
void movsxlq(Register dst, const Operand& src);
void movzxbq(Register dst, const Operand& src);
+ void movzxbl(Register dst, const Operand& src);
+ void movzxwl(Register dst, const Operand& src);
// New x64 instruction to load from an immediate 64-bit pointer into RAX.
void load_rax(void* ptr, RelocInfo::Mode rmode);
@@ -627,13 +640,18 @@ class Assembler : public Malloced {
void decq(Register dst);
void decq(const Operand& dst);
+ void decl(Register dst);
void decl(const Operand& dst);
// Sign-extends rax into rdx:rax.
void cqo();
+ // Sign-extends eax into edx:eax.
+ void cdq();
// Divide rdx:rax by src. Quotient in rax, remainder in rdx.
- void idiv(Register src);
+ void idivq(Register src);
+ // Divide edx:eax by lower 32 bits of src. Quotient in eax, rem. in edx.
+ void idivl(Register src);
// Signed multiply instructions.
void imul(Register src); // rdx:rax = rax * src.
@@ -737,6 +755,10 @@ class Assembler : public Malloced {
shift_32(dst, 0x5);
}
+ void shrl(Register dst, Immediate shift_amount) {
+ shift_32(dst, shift_amount, 0x5);
+ }
+
void store_rax(void* dst, RelocInfo::Mode mode);
void store_rax(ExternalReference ref);
diff --git a/V8Binding/v8/src/x64/builtins-x64.cc b/V8Binding/v8/src/x64/builtins-x64.cc
index 459921c..087aaff 100644
--- a/V8Binding/v8/src/x64/builtins-x64.cc
+++ b/V8Binding/v8/src/x64/builtins-x64.cc
@@ -394,9 +394,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// If given receiver is already a JavaScript object then there's no
// reason for converting it.
__ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(less, &call_to_object);
+ __ j(below, &call_to_object);
__ CmpInstanceType(rcx, LAST_JS_OBJECT_TYPE);
- __ j(less_equal, &push_receiver);
+ __ j(below_equal, &push_receiver);
// Convert the receiver to an object.
__ bind(&call_to_object);
@@ -503,13 +503,160 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Try to allocate the object without transitioning into C code. If any of the
// preconditions is not met, the code bails out to the runtime call.
Label rt_call, allocated;
-
- // TODO(x64): Implement inlined allocation.
+ if (FLAG_inline_new) {
+ Label undo_allocation;
+ // TODO(X64): Enable debugger support, using debug_step_in_fp.
+
+ // Verified that the constructor is a JSFunction.
+ // Load the initial map and verify that it is in fact a map.
+ // rdi: constructor
+ __ movq(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
+ // Will both indicate a NULL and a Smi
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &rt_call);
+ // rdi: constructor
+ // rax: initial map (if proven valid below)
+ __ CmpObjectType(rax, MAP_TYPE, rbx);
+ __ j(not_equal, &rt_call);
+
+ // Check that the constructor is not constructing a JSFunction (see comments
+ // in Runtime_NewObject in runtime.cc). In which case the initial map's
+ // instance type would be JS_FUNCTION_TYPE.
+ // rdi: constructor
+ // rax: initial map
+ __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
+ __ j(equal, &rt_call);
+
+ // Now allocate the JSObject on the heap.
+ __ movzxbq(rdi, FieldOperand(rax, Map::kInstanceSizeOffset));
+ __ shl(rdi, Immediate(kPointerSizeLog2));
+ // rdi: size of new object
+ // Make sure that the maximum heap object size will never cause us
+ // problem here, because it is always greater than the maximum
+ // instance size that can be represented in a byte.
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ __ movq(kScratchRegister, new_space_allocation_top);
+ __ movq(rbx, Operand(kScratchRegister, 0));
+ __ addq(rdi, rbx); // Calculate new top
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ __ movq(kScratchRegister, new_space_allocation_limit);
+ __ cmpq(rdi, Operand(kScratchRegister, 0));
+ __ j(above_equal, &rt_call);
+ // Allocated the JSObject, now initialize the fields.
+ // rax: initial map
+ // rbx: JSObject (not HeapObject tagged - the actual address).
+ // rdi: start of next object
+ __ movq(Operand(rbx, JSObject::kMapOffset), rax);
+ __ Move(rcx, Factory::empty_fixed_array());
+ __ movq(Operand(rbx, JSObject::kPropertiesOffset), rcx);
+ __ movq(Operand(rbx, JSObject::kElementsOffset), rcx);
+ // Set extra fields in the newly allocated object.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ { Label loop, entry;
+ __ Move(rdx, Factory::undefined_value());
+ __ lea(rcx, Operand(rbx, JSObject::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(rcx, 0), rdx);
+ __ addq(rcx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rcx, rdi);
+ __ j(less, &loop);
+ }
+
+ // Mostly done with the JSObject. Add the heap tag and store the new top, so
+ // that we can continue and jump into the continuation code at any time from
+ // now on. Any failures need to undo the setting of the new top, so that the
+ // heap is in a consistent state and verifiable.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ __ or_(rbx, Immediate(kHeapObjectTag));
+ __ movq(kScratchRegister, new_space_allocation_top);
+ __ movq(Operand(kScratchRegister, 0), rdi);
+
+ // Check if a non-empty properties array is needed.
+ // Allocate and initialize a FixedArray if it is.
+ // rax: initial map
+ // rbx: JSObject
+ // rdi: start of next object
+ __ movzxbq(rdx, FieldOperand(rax, Map::kUnusedPropertyFieldsOffset));
+ __ movzxbq(rcx, FieldOperand(rax, Map::kInObjectPropertiesOffset));
+ // Calculate unused properties past the end of the in-object properties.
+ __ subq(rdx, rcx);
+ // Done if no extra properties are to be allocated.
+ __ j(zero, &allocated);
+
+ // Scale the number of elements by pointer size and add the header for
+ // FixedArrays to the start of the next object calculation from above.
+ // rbx: JSObject
+ // rdi: start of next object (will be start of FixedArray)
+ // rdx: number of elements in properties array
+ ASSERT(Heap::MaxObjectSizeInPagedSpace() >
+ (FixedArray::kHeaderSize + 255*kPointerSize));
+ __ lea(rax, Operand(rdi, rdx, times_pointer_size, FixedArray::kHeaderSize));
+ __ movq(kScratchRegister, new_space_allocation_limit);
+ __ cmpq(rax, Operand(kScratchRegister, 0));
+ __ j(above_equal, &undo_allocation);
+ __ store_rax(new_space_allocation_top);
+
+ // Initialize the FixedArray.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rdx: number of elements
+ // rax: start of next object
+ __ Move(rcx, Factory::fixed_array_map());
+ __ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map
+ __ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+
+ // Initialize the fields to undefined.
+ // rbx: JSObject
+ // rdi: FixedArray
+ // rax: start of next object
+ // rdx: number of elements
+ { Label loop, entry;
+ __ Move(rdx, Factory::undefined_value());
+ __ lea(rcx, Operand(rdi, FixedArray::kHeaderSize));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(Operand(rcx, 0), rdx);
+ __ addq(rcx, Immediate(kPointerSize));
+ __ bind(&entry);
+ __ cmpq(rcx, rax);
+ __ j(below, &loop);
+ }
+
+ // Store the initialized FixedArray into the properties field of
+ // the JSObject
+ // rbx: JSObject
+ // rdi: FixedArray
+ __ or_(rdi, Immediate(kHeapObjectTag)); // add the heap tag
+ __ movq(FieldOperand(rbx, JSObject::kPropertiesOffset), rdi);
+
+
+ // Continue with JSObject being successfully allocated
+ // rbx: JSObject
+ __ jmp(&allocated);
+
+ // Undo the setting of the new top so that the heap is verifiable. For
+ // example, the map's unused properties potentially do not match the
+ // allocated objects unused properties.
+ // rbx: JSObject (previous new top)
+ __ bind(&undo_allocation);
+ __ xor_(rbx, Immediate(kHeapObjectTag)); // clear the heap tag
+ __ movq(kScratchRegister, new_space_allocation_top);
+ __ movq(Operand(kScratchRegister, 0), rbx);
+ }
// Allocate the new receiver object using the runtime call.
// rdi: function (constructor)
__ bind(&rt_call);
- // Must restore edi (constructor) before calling runtime.
+ // Must restore rdi (constructor) before calling runtime.
__ movq(rdi, Operand(rsp, 0));
__ push(rdi);
__ CallRuntime(Runtime::kNewObject, 1);
@@ -562,7 +709,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
__ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
- __ j(greater_equal, &exit);
+ __ j(above_equal, &exit);
// Throw away the result of the constructor invocation and use the
// on-stack receiver as the result.
diff --git a/V8Binding/v8/src/x64/cfg-x64.cc b/V8Binding/v8/src/x64/cfg-x64.cc
new file mode 100644
index 0000000..8d01ed2
--- /dev/null
+++ b/V8Binding/v8/src/x64/cfg-x64.cc
@@ -0,0 +1,323 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "cfg.h"
+#include "codegen-inl.h"
+#include "codegen-x64.h"
+#include "debug.h"
+#include "macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void InstructionBlock::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ {
+ Comment cmt(masm, "[ InstructionBlock");
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ // If the location of the current instruction is a temp, then the
+ // instruction cannot be in tail position in the block. Allocate the
+ // temp based on peeking ahead to the next instruction.
+ Instruction* instr = instructions_[i];
+ Location* loc = instr->location();
+ if (loc->is_temporary()) {
+ instructions_[i+1]->FastAllocate(TempLocation::cast(loc));
+ }
+ instructions_[i]->Compile(masm);
+ }
+ }
+ successor_->Compile(masm);
+}
+
+
+void EntryNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Label deferred_enter, deferred_exit;
+ {
+ Comment cmnt(masm, "[ EntryNode");
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ __ push(rdi);
+ int count = CfgGlobals::current()->fun()->scope()->num_stack_slots();
+ if (count > 0) {
+ __ movq(kScratchRegister, Factory::undefined_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ for (int i = 0; i < count; i++) {
+ __ push(kScratchRegister);
+ }
+ }
+ if (FLAG_trace) {
+ __ CallRuntime(Runtime::kTraceEnter, 0);
+ }
+ if (FLAG_check_stack) {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_guard_limit();
+ __ movq(kScratchRegister, stack_limit);
+ __ cmpq(rsp, Operand(kScratchRegister, 0));
+ __ j(below, &deferred_enter);
+ __ bind(&deferred_exit);
+ }
+ }
+ successor_->Compile(masm);
+ if (FLAG_check_stack) {
+ Comment cmnt(masm, "[ Deferred Stack Check");
+ __ bind(&deferred_enter);
+ StackCheckStub stub;
+ __ CallStub(&stub);
+ __ jmp(&deferred_exit);
+ }
+}
+
+
+void ExitNode::Compile(MacroAssembler* masm) {
+ ASSERT(!is_marked());
+ is_marked_ = true;
+ Comment cmnt(masm, "[ ExitNode");
+ if (FLAG_trace) {
+ __ push(rax);
+ __ CallRuntime(Runtime::kTraceExit, 1);
+ }
+ __ RecordJSReturn();
+ __ movq(rsp, rbp);
+ __ pop(rbp);
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ __ ret((count + 1) * kPointerSize);
+ // Add padding that will be overwritten by a debugger breakpoint.
+ // "movq rsp, rbp; pop rbp" has length 5. "ret k" has length 2.
+ const int kPadding = Debug::kX64JSReturnSequenceLength - 5 - 2;
+ for (int i = 0; i < kPadding; ++i) {
+ __ int3();
+ }
+}
+
+
+void PropLoadInstr::Compile(MacroAssembler* masm) {
+ // The key should not be on the stack---if it is a compiler-generated
+ // temporary it is in the accumulator.
+ ASSERT(!key()->is_on_stack());
+
+ Comment cmnt(masm, "[ Load from Property");
+ // If the key is known at compile-time we may be able to use a load IC.
+ bool is_keyed_load = true;
+ if (key()->is_constant()) {
+ // Still use the keyed load IC if the key can be parsed as an integer so
+ // we will get into the case that handles [] on string objects.
+ Handle<Object> key_val = Constant::cast(key())->handle();
+ uint32_t ignored;
+ if (key_val->IsSymbol() &&
+ !String::cast(*key_val)->AsArrayIndex(&ignored)) {
+ is_keyed_load = false;
+ }
+ }
+
+ if (!object()->is_on_stack()) object()->Push(masm);
+ // A test rax instruction after the call indicates to the IC code that it
+ // was inlined. Ensure there is not one after the call below.
+ if (is_keyed_load) {
+ key()->Push(masm);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ pop(rbx); // Discard key.
+ } else {
+ key()->Get(masm, rcx);
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ }
+ __ pop(rbx); // Discard receiver.
+ location()->Set(masm, rax);
+}
+
+
+void BinaryOpInstr::Compile(MacroAssembler* masm) {
+ // The right-hand value should not be on the stack---if it is a
+ // compiler-generated temporary it is in the accumulator.
+ ASSERT(!right()->is_on_stack());
+
+ Comment cmnt(masm, "[ BinaryOpInstr");
+ // We can overwrite one of the operands if it is a temporary.
+ OverwriteMode mode = NO_OVERWRITE;
+ if (left()->is_temporary()) {
+ mode = OVERWRITE_LEFT;
+ } else if (right()->is_temporary()) {
+ mode = OVERWRITE_RIGHT;
+ }
+
+ // Push both operands and call the specialized stub.
+ if (!left()->is_on_stack()) left()->Push(masm);
+ right()->Push(masm);
+ GenericBinaryOpStub stub(op(), mode, SMI_CODE_IN_STUB);
+ __ CallStub(&stub);
+ location()->Set(masm, rax);
+}
+
+
+void ReturnInstr::Compile(MacroAssembler* masm) {
+ // The location should be 'Effect'. As a side effect, move the value to
+ // the accumulator.
+ Comment cmnt(masm, "[ ReturnInstr");
+ value()->Get(masm, rax);
+}
+
+
+void Constant::Get(MacroAssembler* masm, Register reg) {
+ __ Move(reg, handle_);
+}
+
+
+void Constant::Push(MacroAssembler* masm) {
+ __ Push(handle_);
+}
+
+
+static Operand ToOperand(SlotLocation* loc) {
+ switch (loc->type()) {
+ case Slot::PARAMETER: {
+ int count = CfgGlobals::current()->fun()->scope()->num_parameters();
+ return Operand(rbp, (1 + count - loc->index()) * kPointerSize);
+ }
+ case Slot::LOCAL: {
+ const int kOffset = JavaScriptFrameConstants::kLocal0Offset;
+ return Operand(rbp, kOffset - loc->index() * kPointerSize);
+ }
+ default:
+ UNREACHABLE();
+ return Operand(rax, 0);
+ }
+}
+
+
+void Constant::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ Move(ToOperand(loc), handle_);
+}
+
+
+void SlotLocation::Get(MacroAssembler* masm, Register reg) {
+ __ movq(reg, ToOperand(this));
+}
+
+
+void SlotLocation::Set(MacroAssembler* masm, Register reg) {
+ __ movq(ToOperand(this), reg);
+}
+
+
+void SlotLocation::Push(MacroAssembler* masm) {
+ __ push(ToOperand(this));
+}
+
+
+void SlotLocation::Move(MacroAssembler* masm, Value* value) {
+ // We dispatch to the value because in some cases (temp or constant) we
+ // can use special instruction sequences.
+ value->MoveToSlot(masm, this);
+}
+
+
+void SlotLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ __ movq(kScratchRegister, ToOperand(this));
+ __ movq(ToOperand(loc), kScratchRegister);
+}
+
+
+void TempLocation::Get(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(rax)) __ movq(reg, rax);
+ break;
+ case STACK:
+ __ pop(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Set(MacroAssembler* masm, Register reg) {
+ switch (where_) {
+ case ACCUMULATOR:
+ if (!reg.is(rax)) __ movq(rax, reg);
+ break;
+ case STACK:
+ __ push(reg);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Push(MacroAssembler* masm) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ push(rax);
+ break;
+ case STACK:
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::Move(MacroAssembler* masm, Value* value) {
+ switch (where_) {
+ case ACCUMULATOR:
+ value->Get(masm, rax);
+ break;
+ case STACK:
+ value->Push(masm);
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+void TempLocation::MoveToSlot(MacroAssembler* masm, SlotLocation* loc) {
+ switch (where_) {
+ case ACCUMULATOR:
+ __ movq(ToOperand(loc), rax);
+ break;
+ case STACK:
+ __ pop(ToOperand(loc));
+ break;
+ case NOT_ALLOCATED:
+ UNREACHABLE();
+ }
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/x64/codegen-x64.cc b/V8Binding/v8/src/x64/codegen-x64.cc
index 9ed7e74..87f1040 100644
--- a/V8Binding/v8/src/x64/codegen-x64.cc
+++ b/V8Binding/v8/src/x64/codegen-x64.cc
@@ -97,6 +97,137 @@ CodeGenState::~CodeGenState() {
}
+// -------------------------------------------------------------------------
+// Deferred code objects
+//
+// These subclasses of DeferredCode add pieces of code to the end of generated
+// code. They are branched to from the generated code, and
+// keep some slower code out of the main body of the generated code.
+// Many of them call a code stub or a runtime function.
+
+class DeferredInlineSmiAdd: public DeferredCode {
+ public:
+ DeferredInlineSmiAdd(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiAdd");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+class DeferredInlineSmiSub: public DeferredCode {
+ public:
+ DeferredInlineSmiSub(Register dst,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiSub");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+// Call the appropriate binary operation stub to compute src op value
+// and leave the result in dst.
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+ DeferredInlineSmiOperation(Token::Value op,
+ Register dst,
+ Register src,
+ Smi* value,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ dst_(dst),
+ src_(src),
+ value_(value),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlineSmiOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Token::Value op_;
+ Register dst_;
+ Register src_;
+ Smi* value_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand on TOS+1. Returns operand as floating point number on FPU
+ // stack.
+ static void LoadFloatOperand(MacroAssembler* masm, Register scratch);
+
+ // Code pattern for loading a floating point value. Input value must
+ // be either a smi or a heap number object (fp value). Requirements:
+ // operand in src register. Returns operand as floating point number
+ // in XMM register
+ static void LoadFloatOperand(MacroAssembler* masm,
+ Register src,
+ XMMRegister dst);
+
+ // Code pattern for loading floating point values. Input values must
+ // be either smi or heap number objects (fp values). Requirements:
+ // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
+ // floating point numbers in XMM registers.
+ static void LoadFloatOperands(MacroAssembler* masm,
+ XMMRegister dst1,
+ XMMRegister dst2);
+
+ // Code pattern for loading floating point values onto the fp stack.
+ // Input values must be either smi or heap number objects (fp values).
+ // Requirements:
+ // Register version: operands in registers lhs and rhs.
+ // Stack version: operands on TOS+1 and TOS+2.
+ // Returns operands as floating point numbers on fp stack.
+ static void LoadFloatOperands(MacroAssembler* masm);
+ static void LoadFloatOperands(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+
+ // Code pattern for loading a floating point value and converting it
+ // to a 32 bit integer. Input value must be either a smi or a heap number
+ // object.
+ // Returns operands as 32-bit sign extended integers in a general purpose
+ // registers.
+ static void LoadInt32Operand(MacroAssembler* masm,
+ const Operand& src,
+ Register dst);
+
+ // Test if operands are smi or number objects (fp). Requirements:
+ // operand_1 in rax, operand_2 in rdx; falls through on float or smi
+ // operands, jumps to the non_float label otherwise.
+ static void CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float);
+
+ // Allocate a heap number in new space with undefined value.
+ // Returns tagged pointer in result, or jumps to need_gc if new space is full.
+ static void AllocateHeapNumber(MacroAssembler* masm,
+ Label* need_gc,
+ Register scratch,
+ Register result);
+};
+
+
// -----------------------------------------------------------------------------
// CodeGenerator implementation.
@@ -389,6 +520,112 @@ bool CodeGenerator::HasValidEntryRegisters() {
#endif
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+ explicit DeferredReferenceGetKeyedValue(Register dst,
+ Register receiver,
+ Register key,
+ bool is_global)
+ : dst_(dst), receiver_(receiver), key_(key), is_global_(is_global) {
+ set_comment("[ DeferredReferenceGetKeyedValue");
+ }
+
+ virtual void Generate();
+
+ Label* patch_site() { return &patch_site_; }
+
+ private:
+ Label patch_site_;
+ Register dst_;
+ Register receiver_;
+ Register key_;
+ bool is_global_;
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+ __ push(receiver_); // First IC argument.
+ __ push(key_); // Second IC argument.
+
+ // Calculate the delta from the IC call instruction to the map check
+ // movq instruction in the inlined version. This delta is stored in
+ // a test(rax, delta) instruction after the call so that we can find
+ // it in the IC initialization code and patch the movq instruction.
+ // This means that we cannot allow test instructions after calls to
+ // KeyedLoadIC stubs in other places.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ RelocInfo::Mode mode = is_global_
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ __ Call(ic, mode);
+ // The delta from the start of the map-compare instruction to the
+ // test instruction. We use masm_-> directly here instead of the __
+ // macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ // TODO(X64): Consider whether it's worth switching the test to a
+ // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
+ // be generated normally.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
+
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+ __ pop(key_);
+ __ pop(receiver_);
+}
+
+
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+ DeferredReferenceSetKeyedValue(Register value,
+ Register key,
+ Register receiver)
+ : value_(value), key_(key), receiver_(receiver) {
+ set_comment("[ DeferredReferenceSetKeyedValue");
+ }
+
+ virtual void Generate();
+
+ Label* patch_site() { return &patch_site_; }
+
+ private:
+ Register value_;
+ Register key_;
+ Register receiver_;
+ Label patch_site_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+ __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
+ // Push receiver and key arguments on the stack.
+ __ push(receiver_);
+ __ push(key_);
+ // Move value argument to eax as expected by the IC stub.
+ if (!value_.is(rax)) __ movq(rax, value_);
+ // Call the IC stub.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // The delta from the start of the map-compare instructions (initial movq)
+ // to the test instruction. We use masm_-> directly here instead of the
+ // __ macro because the macro sometimes uses macro expansion to turn
+ // into something that can't return a value. This is encountered
+ // when doing generated code coverage tests.
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
+ // Here we use masm_-> instead of the __ macro because this is the
+ // instruction that gets patched and coverage code gets in the way.
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
+ // Restore value (returned from store IC), key and receiver
+ // registers.
+ if (!value_.is(rax)) __ movq(value_, rax);
+ __ pop(key_);
+ __ pop(receiver_);
+}
+
+
class DeferredStackCheck: public DeferredCode {
public:
DeferredStackCheck() {
@@ -2193,9 +2430,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// The receiver is the argument to the runtime call. It is the
// first value pushed when the reference was loaded to the
// frame.
- // TODO(X64): Enable this and the switch back to fast, once they work.
- // frame_->PushElementAt(target.size() - 1);
- // Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+ frame_->PushElementAt(target.size() - 1);
+ Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
}
if (node->op() == Token::ASSIGN ||
node->op() == Token::INIT_VAR ||
@@ -2203,20 +2439,18 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
Load(node->value());
} else {
- // Literal* literal = node->value()->AsLiteral();
+ Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
- // Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
+ Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
// There are two cases where the target is not read in the right hand
// side, that are easy to test for: the right hand side is a literal,
// or the right hand side is a different variable. TakeValue invalidates
// the target, with an implicit promise that it will be written to again
// before it is read.
- // TODO(X64): Implement TakeValue optimization. Check issue 150016.
- if (false) {
- // if (literal != NULL || (right_var != NULL && right_var != var)) {
- // target.TakeValue(NOT_INSIDE_TYPEOF);
+ if (literal != NULL || (right_var != NULL && right_var != var)) {
+ target.TakeValue(NOT_INSIDE_TYPEOF);
} else {
target.GetValue(NOT_INSIDE_TYPEOF);
}
@@ -2247,9 +2481,8 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
// argument to the runtime call is the receiver, which is the
// first value pushed as part of the reference, which is below
// the lhs value.
- // TODO(X64): Enable this once ToFastProperties works.
- // frame_->PushElementAt(target.size());
- // Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+ frame_->PushElementAt(target.size());
+ Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
}
}
}
@@ -3249,10 +3482,161 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* a) {
- // TODO(X64): Implement this function.
- // Ignore arguments and return undefined, to signal failure.
- frame_->Push(Factory::undefined_value());
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateFastCharCodeAt");
+ ASSERT(args->length() == 2);
+
+ Label slow_case;
+ Label end;
+ Label not_a_flat_string;
+ Label a_cons_string;
+ Label try_again_with_new_string;
+ Label ascii_string;
+ Label got_char_code;
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+
+ // Get register rcx to use as shift amount later.
+ Result shift_amount;
+ if (object.is_register() && object.reg().is(rcx)) {
+ Result fresh = allocator_->Allocate();
+ shift_amount = object;
+ object = fresh;
+ __ movq(object.reg(), rcx);
+ }
+ if (index.is_register() && index.reg().is(rcx)) {
+ Result fresh = allocator_->Allocate();
+ shift_amount = index;
+ index = fresh;
+ __ movq(index.reg(), rcx);
+ }
+ // There could be references to ecx in the frame. Allocating will
+ // spill them, otherwise spill explicitly.
+ if (shift_amount.is_valid()) {
+ frame_->Spill(rcx);
+ } else {
+ shift_amount = allocator()->Allocate(rcx);
+ }
+ ASSERT(shift_amount.is_register());
+ ASSERT(shift_amount.reg().is(rcx));
+ ASSERT(allocator_->count(rcx) == 1);
+
+ // We will mutate the index register and possibly the object register.
+ // The case where they are somehow the same register is handled
+ // because we only mutate them in the case where the receiver is a
+ // heap object and the index is not.
+ object.ToRegister();
+ index.ToRegister();
+ frame_->Spill(object.reg());
+ frame_->Spill(index.reg());
+
+ // We need a single extra temporary register.
+ Result temp = allocator()->Allocate();
+ ASSERT(temp.is_valid());
+
+ // There is no virtual frame effect from here up to the final result
+ // push.
+
+ // If the receiver is a smi trigger the slow case.
+ ASSERT(kSmiTag == 0);
+ __ testl(object.reg(), Immediate(kSmiTagMask));
+ __ j(zero, &slow_case);
+
+ // If the index is negative or non-smi trigger the slow case.
+ ASSERT(kSmiTag == 0);
+ __ testl(index.reg(),
+ Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000U)));
+ __ j(not_zero, &slow_case);
+ // Untag the index.
+ __ sarl(index.reg(), Immediate(kSmiTagSize));
+
+ __ bind(&try_again_with_new_string);
+ // Fetch the instance type of the receiver into rcx.
+ __ movq(rcx, FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzxbl(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
+ // If the receiver is not a string trigger the slow case.
+ __ testb(rcx, Immediate(kIsNotStringMask));
+ __ j(not_zero, &slow_case);
+
+ // Here we make assumptions about the tag values and the shifts needed.
+ // See the comment in objects.h.
+ ASSERT(kLongStringTag == 0);
+ ASSERT(kMediumStringTag + String::kLongLengthShift ==
+ String::kMediumLengthShift);
+ ASSERT(kShortStringTag + String::kLongLengthShift ==
+ String::kShortLengthShift);
+ __ and_(rcx, Immediate(kStringSizeMask));
+ __ addq(rcx, Immediate(String::kLongLengthShift));
+ // Fetch the length field into the temporary register.
+ __ movl(temp.reg(), FieldOperand(object.reg(), String::kLengthOffset));
+ __ shrl(temp.reg()); // The shift amount in ecx is implicit operand.
+ // Check for index out of range.
+ __ cmpl(index.reg(), temp.reg());
+ __ j(greater_equal, &slow_case);
+ // Reload the instance type (into the temp register this time)..
+ __ movq(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzxbl(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+
+ // We need special handling for non-flat strings.
+ ASSERT(kSeqStringTag == 0);
+ __ testb(temp.reg(), Immediate(kStringRepresentationMask));
+ __ j(not_zero, &not_a_flat_string);
+ // Check for 1-byte or 2-byte string.
+ __ testb(temp.reg(), Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the temp register.
+ __ movzxwl(temp.reg(), FieldOperand(object.reg(),
+ index.reg(),
+ times_2,
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
+ __ bind(&ascii_string);
+ // Load the byte into the temp register.
+ __ movzxbl(temp.reg(), FieldOperand(object.reg(),
+ index.reg(),
+ times_1,
+ SeqAsciiString::kHeaderSize));
+ __ bind(&got_char_code);
+ ASSERT(kSmiTag == 0);
+ __ shl(temp.reg(), Immediate(kSmiTagSize));
+ __ jmp(&end);
+
+ // Handle non-flat strings.
+ __ bind(&not_a_flat_string);
+ __ and_(temp.reg(), Immediate(kStringRepresentationMask));
+ __ cmpb(temp.reg(), Immediate(kConsStringTag));
+ __ j(equal, &a_cons_string);
+ __ cmpb(temp.reg(), Immediate(kSlicedStringTag));
+ __ j(not_equal, &slow_case);
+
+ // SlicedString.
+ // Add the offset to the index and trigger the slow case on overflow.
+ __ addl(index.reg(), FieldOperand(object.reg(), SlicedString::kStartOffset));
+ __ j(overflow, &slow_case);
+ // Getting the underlying string is done by running the cons string code.
+
+ // ConsString.
+ __ bind(&a_cons_string);
+ // Get the first of the two strings. Both sliced and cons strings
+ // store their source string at the same offset.
+ ASSERT(SlicedString::kBufferOffset == ConsString::kFirstOffset);
+ __ movq(object.reg(), FieldOperand(object.reg(), ConsString::kFirstOffset));
+ __ jmp(&try_again_with_new_string);
+
+ __ bind(&slow_case);
+ // Move the undefined value into the result register, which will
+ // trigger the slow case.
+ __ Move(temp.reg(), Factory::undefined_value());
+
+ __ bind(&end);
+ frame_->Push(&temp);
}
@@ -3319,9 +3703,20 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
}
+void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 0);
+ ASSERT(kSmiTag == 0); // RBP value is aligned, so it should look like Smi.
+ Result rbp_as_smi = allocator_->Allocate();
+ ASSERT(rbp_as_smi.is_valid());
+ __ movq(rbp_as_smi.reg(), rbp);
+ frame_->Push(&rbp_as_smi);
+}
+
+
void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
frame_->SpillAll();
+ __ push(rsi);
// Make sure the frame is aligned like the OS expects.
static const int kFrameAlignment = OS::ActivationFrameAlignment();
@@ -3334,22 +3729,70 @@ void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
// Call V8::RandomPositiveSmi().
__ Call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
- // Restore stack pointer from callee-saved register edi.
+ // Restore stack pointer from callee-saved register.
if (kFrameAlignment > 0) {
__ movq(rsp, rbx);
}
+ __ pop(rsi);
Result result = allocator_->Allocate(rax);
frame_->Push(&result);
}
void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
- // TODO(X64): Use inline floating point in the fast case.
+ JumpTarget done;
+ JumpTarget call_runtime;
ASSERT(args->length() == 1);
- // Load number.
+ // Load number and duplicate it.
Load(args->at(0));
+ frame_->Dup();
+
+ // Get the number into an unaliased register and load it onto the
+ // floating point stack still leaving one copy on the frame.
+ Result number = frame_->Pop();
+ number.ToRegister();
+ frame_->Spill(number.reg());
+ FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
+ number.Unuse();
+
+ // Perform the operation on the number.
+ switch (op) {
+ case SIN:
+ __ fsin();
+ break;
+ case COS:
+ __ fcos();
+ break;
+ }
+
+ // Go slow case if argument to operation is out of range.
+ Result eax_reg = allocator()->Allocate(rax);
+ ASSERT(eax_reg.is_valid());
+ __ fnstsw_ax();
+ __ testl(rax, Immediate(0x0400)); // Bit 10 is condition flag C2.
+ eax_reg.Unuse();
+ call_runtime.Branch(not_zero);
+
+ // Allocate heap number for result if possible.
+ Result scratch = allocator()->Allocate();
+ Result heap_number = allocator()->Allocate();
+ FloatingPointHelper::AllocateHeapNumber(masm_,
+ call_runtime.entry_label(),
+ scratch.reg(),
+ heap_number.reg());
+ scratch.Unuse();
+
+ // Store the result in the allocated heap number.
+ __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
+ // Replace the extra copy of the argument with the result.
+ frame_->SetElementAt(0, &heap_number);
+ done.Jump();
+
+ call_runtime.Bind();
+ // Free ST(0) which was not popped before calling into the runtime.
+ __ ffree(0);
Result answer;
switch (op) {
case SIN:
@@ -3360,6 +3803,7 @@ void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
break;
}
frame_->Push(&answer);
+ done.Bind();
}
@@ -3379,7 +3823,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
// functions to make sure they have 'Function' as their class.
__ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
- null.Branch(less);
+ null.Branch(below);
// As long as JS_FUNCTION_TYPE is the last instance type and it is
// right after LAST_JS_OBJECT_TYPE, we can avoid checking for
@@ -3645,7 +4089,7 @@ void CodeGenerator::ToBoolean(ControlDestination* dest) {
// Smi => false iff zero.
ASSERT(kSmiTag == 0);
- __ testq(value.reg(), value.reg());
+ __ testl(value.reg(), value.reg());
dest->false_target()->Branch(zero);
__ testl(value.reg(), Immediate(kSmiTagMask));
dest->true_target()->Branch(zero);
@@ -3728,7 +4172,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
} else {
// Anything else is a runtime error.
Load(e);
- // frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+ frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
}
in_spilled_code_ = was_in_spilled_code;
@@ -3966,8 +4410,6 @@ void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- // TODO(X64): Enable more types of slot.
-
if (slot->type() == Slot::LOOKUP) {
ASSERT(slot->var()->is_dynamic());
@@ -4130,7 +4572,7 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// A test rax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test eax
// instruction here.
- __ nop();
+ masm_->nop();
// Discard the global object. The result is in answer.
frame_->Drop();
return answer;
@@ -4420,108 +4862,6 @@ void CodeGenerator::Comparison(Condition cc,
}
-// Flag that indicates whether or not the code that handles smi arguments
-// should be placed in the stub, inlined, or omitted entirely.
-enum GenericBinaryFlags {
- SMI_CODE_IN_STUB,
- SMI_CODE_INLINED
-};
-
-
-class FloatingPointHelper : public AllStatic {
- public:
- // Code pattern for loading a floating point value. Input value must
- // be either a smi or a heap number object (fp value). Requirements:
- // operand in src register. Returns operand as floating point number
- // in XMM register
- static void LoadFloatOperand(MacroAssembler* masm,
- Register src,
- XMMRegister dst);
- // Code pattern for loading floating point values. Input values must
- // be either smi or heap number objects (fp values). Requirements:
- // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
- // floating point numbers in XMM registers.
- static void LoadFloatOperands(MacroAssembler* masm,
- XMMRegister dst1,
- XMMRegister dst2);
-
- // Code pattern for loading floating point values onto the fp stack.
- // Input values must be either smi or heap number objects (fp values).
- // Requirements:
- // Register version: operands in registers lhs and rhs.
- // Stack version: operands on TOS+1 and TOS+2.
- // Returns operands as floating point numbers on fp stack.
- static void LoadFloatOperands(MacroAssembler* masm);
- static void LoadFloatOperands(MacroAssembler* masm,
- Register lhs,
- Register rhs);
-
- // Code pattern for loading a floating point value and converting it
- // to a 32 bit integer. Input value must be either a smi or a heap number
- // object.
- // Returns operands as 32-bit sign extended integers in a general purpose
- // registers.
- static void LoadInt32Operand(MacroAssembler* masm,
- const Operand& src,
- Register dst);
-
- // Test if operands are smi or number objects (fp). Requirements:
- // operand_1 in rax, operand_2 in rdx; falls through on float
- // operands, jumps to the non_float label otherwise.
- static void CheckFloatOperands(MacroAssembler* masm,
- Label* non_float);
- // Allocate a heap number in new space with undefined value.
- // Returns tagged pointer in result, or jumps to need_gc if new space is full.
- static void AllocateHeapNumber(MacroAssembler* masm,
- Label* need_gc,
- Register scratch,
- Register result);
-};
-
-
-class GenericBinaryOpStub: public CodeStub {
- public:
- GenericBinaryOpStub(Token::Value op,
- OverwriteMode mode,
- GenericBinaryFlags flags)
- : op_(op), mode_(mode), flags_(flags) {
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- void GenerateSmiCode(MacroAssembler* masm, Label* slow);
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- GenericBinaryFlags flags_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
- Token::String(op_),
- static_cast<int>(mode_),
- static_cast<int>(flags_));
- }
-#endif
-
- // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 13> {};
- class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
-
- Major MajorKey() { return GenericBinaryOp; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | FlagBits::encode(flags_);
- }
- void Generate(MacroAssembler* masm);
-};
-
-
class DeferredInlineBinaryOperation: public DeferredCode {
public:
DeferredInlineBinaryOperation(Token::Value op,
@@ -4700,7 +5040,7 @@ void DeferredReferenceGetNamedValue::Generate() {
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the __ macro because this is the
// instruction that gets patched and coverage code gets in the way.
- masm_->testq(rax, Immediate(-delta_to_patch_site));
+ masm_->testl(rax, Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
@@ -4708,29 +5048,6 @@ void DeferredReferenceGetNamedValue::Generate() {
}
-
-
-// The result of src + value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative addition and call the appropriate
-// specialized stub for add. The result is left in dst.
-class DeferredInlineSmiAdd: public DeferredCode {
- public:
- DeferredInlineSmiAdd(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiAdd");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
void DeferredInlineSmiAdd::Generate() {
__ push(dst_);
__ push(Immediate(value_));
@@ -4762,7 +5079,7 @@ class DeferredInlineSmiAddReversed: public DeferredCode {
void DeferredInlineSmiAddReversed::Generate() {
- __ push(Immediate(value_));
+ __ push(Immediate(value_)); // Note: sign extended.
__ push(dst_);
GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
@@ -4770,37 +5087,28 @@ void DeferredInlineSmiAddReversed::Generate() {
}
-// The result of src - value is in dst. It either overflowed or was not
-// smi tagged. Undo the speculative subtraction and call the
-// appropriate specialized stub for subtract. The result is left in
-// dst.
-class DeferredInlineSmiSub: public DeferredCode {
- public:
- DeferredInlineSmiSub(Register dst,
- Smi* value,
- OverwriteMode overwrite_mode)
- : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
- set_comment("[ DeferredInlineSmiSub");
- }
-
- virtual void Generate();
-
- private:
- Register dst_;
- Smi* value_;
- OverwriteMode overwrite_mode_;
-};
-
-
void DeferredInlineSmiSub::Generate() {
__ push(dst_);
- __ push(Immediate(value_));
+ __ push(Immediate(value_)); // Note: sign extended.
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, SMI_CODE_INLINED);
__ CallStub(&igostub);
if (!dst_.is(rax)) __ movq(dst_, rax);
}
+void DeferredInlineSmiOperation::Generate() {
+ __ push(src_);
+ __ push(Immediate(value_)); // Note: sign extended.
+ // For mod we don't generate all the Smi code inline.
+ GenericBinaryOpStub stub(
+ op_,
+ overwrite_mode_,
+ (op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
+ __ CallStub(&stub);
+ if (!dst_.is(rax)) __ movq(dst_, rax);
+}
+
+
void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> value,
@@ -4829,6 +5137,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
// Get the literal value.
Smi* smi_value = Smi::cast(*value);
+ int int_value = smi_value->value();
switch (op) {
case Token::ADD: {
@@ -4851,15 +5160,43 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
Label add_success;
__ j(no_overflow, &add_success);
__ subl(operand->reg(), Immediate(smi_value));
- __ movsxlq(operand->reg(), operand->reg());
deferred->Jump();
__ bind(&add_success);
- __ movsxlq(operand->reg(), operand->reg());
deferred->BindExit();
frame_->Push(operand);
break;
}
// TODO(X64): Move other implementations from ia32 to here.
+
+ // Generate inline code for mod of powers of 2 and negative powers of 2.
+ case Token::MOD:
+ if (!reversed &&
+ int_value != 0 &&
+ (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
+ operand->ToRegister();
+ frame_->Spill(operand->reg());
+ DeferredCode* deferred = new DeferredInlineSmiOperation(op,
+ operand->reg(),
+ operand->reg(),
+ smi_value,
+ overwrite_mode);
+ // Check for negative or non-Smi left hand side.
+ __ testl(operand->reg(),
+ Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000)));
+ deferred->Branch(not_zero);
+ if (int_value < 0) int_value = -int_value;
+ if (int_value == 1) {
+ __ movl(operand->reg(), Immediate(Smi::FromInt(0)));
+ } else {
+ __ and_(operand->reg(), Immediate((int_value << kSmiTagSize) - 1));
+ }
+ deferred->BindExit();
+ frame_->Push(operand);
+ break; // This break only applies if we generated code for MOD.
+ }
+ // Fall through if we did not find a power of 2 on the right hand side!
+ // The next case must be the default.
+
default: {
Result constant_operand(value);
if (reversed) {
@@ -4965,35 +5302,36 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
}
deferred->Branch(not_zero);
- if (!left_is_in_rax) __ movq(rax, left->reg());
- // Sign extend rax into rdx:rax.
- __ cqo();
+ // All operations on the smi values are on 32-bit registers, which are
+ // zero-extended into 64-bits by all 32-bit operations.
+ if (!left_is_in_rax) __ movl(rax, left->reg());
+ // Sign extend eax into edx:eax.
+ __ cdq();
// Check for 0 divisor.
- __ testq(right->reg(), right->reg());
+ __ testl(right->reg(), right->reg());
deferred->Branch(zero);
// Divide rdx:rax by the right operand.
- __ idiv(right->reg());
+ __ idivl(right->reg());
// Complete the operation.
if (op == Token::DIV) {
- // Check for negative zero result. If result is zero, and divisor
- // is negative, return a floating point negative zero. The
- // virtual frame is unchanged in this block, so local control flow
- // can use a Label rather than a JumpTarget.
+ // Check for negative zero result. If the result is zero, and the
+ // divisor is negative, return a floating point negative zero.
Label non_zero_result;
- __ testq(left->reg(), left->reg());
+ __ testl(left->reg(), left->reg());
__ j(not_zero, &non_zero_result);
- __ testq(right->reg(), right->reg());
+ __ testl(right->reg(), right->reg());
deferred->Branch(negative);
+ // The frame is identical on all paths reaching this label.
__ bind(&non_zero_result);
// Check for the corner case of dividing the most negative smi by
// -1. We cannot use the overflow flag, since it is not set by
// idiv instruction.
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmpq(rax, Immediate(0x40000000));
+ __ cmpl(rax, Immediate(0x40000000));
deferred->Branch(equal);
// Check that the remainder is zero.
- __ testq(rdx, rdx);
+ __ testl(rdx, rdx);
deferred->Branch(not_zero);
// Tag the result and store it in the quotient register.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
@@ -5004,15 +5342,14 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
frame_->Push(&quotient);
} else {
ASSERT(op == Token::MOD);
- // Check for a negative zero result. If the result is zero, and
- // the dividend is negative, return a floating point negative
- // zero. The frame is unchanged in this block, so local control
- // flow can use a Label rather than a JumpTarget.
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, return a floating point negative zero.
Label non_zero_result;
- __ testq(rdx, rdx);
+ __ testl(rdx, rdx);
__ j(not_zero, &non_zero_result);
- __ testq(left->reg(), left->reg());
+ __ testl(left->reg(), left->reg());
deferred->Branch(negative);
+ // The frame is identical on all paths reaching this label.
__ bind(&non_zero_result);
deferred->BindExit();
left->Unuse();
@@ -5056,9 +5393,9 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
deferred->Branch(not_zero);
// Untag both operands.
- __ movq(answer.reg(), left->reg());
- __ sar(answer.reg(), Immediate(kSmiTagSize));
- __ sar(rcx, Immediate(kSmiTagSize));
+ __ movl(answer.reg(), left->reg());
+ __ sarl(answer.reg(), Immediate(kSmiTagSize));
+ __ sarl(rcx, Immediate(kSmiTagSize));
// Perform the operation.
switch (op) {
case Token::SAR:
@@ -5154,7 +5491,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// Remove smi tag from the left operand (but keep sign).
// Left-hand operand has been copied into answer.
- __ sar(answer.reg(), Immediate(kSmiTagSize));
+ __ sarl(answer.reg(), Immediate(kSmiTagSize));
// Do multiplication of smis, leaving result in answer.
__ imull(answer.reg(), right->reg());
// Go slow on overflows.
@@ -5164,7 +5501,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// in this block, so local control flow can use a Label rather
// than a JumpTarget.
Label non_zero_result;
- __ testq(answer.reg(), answer.reg());
+ __ testl(answer.reg(), answer.reg());
__ j(not_zero, &non_zero_result);
__ movq(answer.reg(), left->reg());
__ or_(answer.reg(), right->reg());
@@ -5183,6 +5520,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
break;
case Token::BIT_XOR:
+ ASSERT(kSmiTag == 0); // Adjust code below if not the case.
__ xor_(answer.reg(), right->reg());
break;
@@ -5267,9 +5605,20 @@ void Reference::GetValue(TypeofState typeof_state) {
Comment cmnt(masm, "[ Inlined named property load");
Result receiver = cgen_->frame()->Pop();
receiver.ToRegister();
-
Result value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid());
+ // Cannot use r12 for receiver, because that changes
+ // the distance between a call and a fixup location,
+ // due to a special encoding of r12 as r/m in a ModR/M byte.
+ if (receiver.reg().is(r12)) {
+ // Swap receiver and value.
+ __ movq(value.reg(), receiver.reg());
+ Result temp = receiver;
+ receiver = value;
+ value = temp;
+ cgen_->frame()->Spill(value.reg()); // r12 may have been shared.
+ }
+
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(value.reg(),
receiver.reg(),
@@ -5288,7 +5637,8 @@ void Reference::GetValue(TypeofState typeof_state) {
kScratchRegister);
// This branch is always a forwards branch so it's always a fixed
// size which allows the assert below to succeed and patching to work.
- deferred->Branch(not_equal);
+ // Don't use deferred->Branch(...), since that might add coverage code.
+ masm->j(not_equal, deferred->entry_label());
// The delta from the patch label to the load offset must be
// statically known.
@@ -5315,26 +5665,118 @@ void Reference::GetValue(TypeofState typeof_state) {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
+
// Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we
// patch the map check if appropriate.
+ if (cgen_->loop_nesting() > 0) {
+ Comment cmnt(masm, "[ Inlined load from keyed Property");
- // TODO(x64): Implement inlined loads for keyed properties.
- // Make sure to load length field as a 32-bit quantity.
- // Comment cmnt(masm, "[ Load from keyed Property");
-
- RelocInfo::Mode mode = is_global
- ? RelocInfo::CODE_TARGET_CONTEXT
- : RelocInfo::CODE_TARGET;
- Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed load. The explicit nop instruction is here because
- // the push that follows might be peep-hole optimized away.
- __ nop();
- cgen_->frame()->Push(&answer);
+ Result key = cgen_->frame()->Pop();
+ Result receiver = cgen_->frame()->Pop();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ // Use a fresh temporary to load the elements without destroying
+ // the receiver which is needed for the deferred slow case.
+ Result elements = cgen_->allocator()->Allocate();
+ ASSERT(elements.is_valid());
+
+ // Use a fresh temporary for the index and later the loaded
+ // value.
+ Result index = cgen_->allocator()->Allocate();
+ ASSERT(index.is_valid());
+
+ DeferredReferenceGetKeyedValue* deferred =
+ new DeferredReferenceGetKeyedValue(index.reg(),
+ receiver.reg(),
+ key.reg(),
+ is_global);
+
+ // Check that the receiver is not a smi (only needed if this
+ // is not a load from the global context) and that it has the
+ // expected map.
+ if (!is_global) {
+ __ testl(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+ }
+
+ // Initially, use an invalid map. The map is patched in the IC
+ // initialization code.
+ __ bind(deferred->patch_site());
+ // Use masm-> here instead of the double underscore macro since extra
+ // coverage code can interfere with the patching.
+ masm->movq(kScratchRegister, Factory::null_value(),
+ RelocInfo::EMBEDDED_OBJECT);
+ masm->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is a non-negative smi.
+ __ testl(key.reg(),
+ Immediate(static_cast<int32_t>(kSmiTagMask | 0x80000000u)));
+ deferred->Branch(not_zero);
+
+ // Get the elements array from the receiver and check that it
+ // is not a dictionary.
+ __ movq(elements.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ __ Cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset),
+ Factory::fixed_array_map());
+ deferred->Branch(not_equal);
+
+ // Shift the key to get the actual index value and check that
+ // it is within bounds.
+ __ movl(index.reg(), key.reg());
+ __ shrl(index.reg(), Immediate(kSmiTagSize));
+ __ cmpl(index.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ deferred->Branch(above_equal);
+
+ // The index register holds the un-smi-tagged key. It has been
+ // zero-extended to 64-bits, so it can be used directly as index in the
+ // operand below.
+ // Load and check that the result is not the hole. We could
+ // reuse the index or elements register for the value.
+ //
+ // TODO(206): Consider whether it makes sense to try some
+ // heuristic about which register to reuse. For example, if
+ // one is rax, the we can reuse that one because the value
+ // coming from the deferred code will be in rax.
+ Result value = index;
+ __ movq(value.reg(),
+ Operand(elements.reg(),
+ index.reg(),
+ times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ elements.Unuse();
+ index.Unuse();
+ __ Cmp(value.reg(), Factory::the_hole_value());
+ deferred->Branch(equal);
+ __ IncrementCounter(&Counters::keyed_load_inline, 1);
+
+ deferred->BindExit();
+ // Restore the receiver and key to the frame and push the
+ // result on top of it.
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&key);
+ cgen_->frame()->Push(&value);
+
+ } else {
+ Comment cmnt(masm, "[ Load from keyed Property");
+ RelocInfo::Mode mode = is_global
+ ? RelocInfo::CODE_TARGET_CONTEXT
+ : RelocInfo::CODE_TARGET;
+ Result answer = cgen_->frame()->CallKeyedLoadIC(mode);
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed load. The explicit nop instruction is here because
+ // the push that follows might be peep-hole optimized away.
+ __ nop();
+ cgen_->frame()->Push(&answer);
+ }
break;
}
@@ -5361,13 +5803,16 @@ void Reference::TakeValue(TypeofState typeof_state) {
ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT ||
- slot->var()->mode() == Variable::CONST) {
+ slot->var()->mode() == Variable::CONST ||
+ slot->is_arguments()) {
GetValue(typeof_state);
return;
}
// Only non-constant, frame-allocated parameters and locals can reach
- // here.
+ // here. Be careful not to use the optimizations for arguments
+ // object access since it may not have been initialized yet.
+ ASSERT(!slot->is_arguments());
if (slot->type() == Slot::PARAMETER) {
cgen_->frame()->TakeParameterAt(slot->index());
} else {
@@ -5401,15 +5846,105 @@ void Reference::SetValue(InitState init_state) {
case KEYED: {
Comment cmnt(masm, "[ Store to keyed Property");
- // TODO(x64): Implement inlined version of keyed stores.
+ // Generate inlined version of the keyed store if the code is in
+ // a loop and the key is likely to be a smi.
+ Property* property = expression()->AsProperty();
+ ASSERT(property != NULL);
+ SmiAnalysis* key_smi_analysis = property->key()->type();
- Result answer = cgen_->frame()->CallKeyedStoreIC();
- // Make sure that we do not have a test instruction after the
- // call. A test instruction after the call is used to
- // indicate that we have generated an inline version of the
- // keyed store.
- __ nop();
- cgen_->frame()->Push(&answer);
+ if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
+ Comment cmnt(masm, "[ Inlined store to keyed Property");
+
+ // Get the receiver, key and value into registers.
+ Result value = cgen_->frame()->Pop();
+ Result key = cgen_->frame()->Pop();
+ Result receiver = cgen_->frame()->Pop();
+
+ Result tmp = cgen_->allocator_->Allocate();
+ ASSERT(tmp.is_valid());
+
+ // Determine whether the value is a constant before putting it
+ // in a register.
+ bool value_is_constant = value.is_constant();
+
+ // Make sure that value, key and receiver are in registers.
+ value.ToRegister();
+ key.ToRegister();
+ receiver.ToRegister();
+
+ DeferredReferenceSetKeyedValue* deferred =
+ new DeferredReferenceSetKeyedValue(value.reg(),
+ key.reg(),
+ receiver.reg());
+
+ // Check that the value is a smi if it is not a constant.
+ // We can skip the write barrier for smis and constants.
+ if (!value_is_constant) {
+ __ testl(value.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(not_zero);
+ }
+
+ // Check that the key is a non-negative smi.
+ __ testl(key.reg(),
+ Immediate(static_cast<uint32_t>(kSmiTagMask | 0x80000000U)));
+ deferred->Branch(not_zero);
+
+ // Check that the receiver is not a smi.
+ __ testl(receiver.reg(), Immediate(kSmiTagMask));
+ deferred->Branch(zero);
+
+ // Check that the receiver is a JSArray.
+ __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Check that the key is within bounds. Both the key and the
+ // length of the JSArray are smis, so compare only low 32 bits.
+ __ cmpl(key.reg(),
+ FieldOperand(receiver.reg(), JSArray::kLengthOffset));
+ deferred->Branch(greater_equal);
+
+ // Get the elements array from the receiver and check that it
+ // is a flat array (not a dictionary).
+ __ movq(tmp.reg(),
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset));
+ // Bind the deferred code patch site to be able to locate the
+ // fixed array map comparison. When debugging, we patch this
+ // comparison to always fail so that we will hit the IC call
+ // in the deferred code which will allow the debugger to
+ // break for fast case stores.
+ __ bind(deferred->patch_site());
+ // Avoid using __ to ensure the distance from patch_site
+ // to the map address is always the same.
+ masm->movq(kScratchRegister, Factory::fixed_array_map(),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+ kScratchRegister);
+ deferred->Branch(not_equal);
+
+ // Store the value.
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ __ movq(Operand(tmp.reg(),
+ key.reg(),
+ times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag),
+ value.reg());
+ __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+ deferred->BindExit();
+
+ cgen_->frame()->Push(&receiver);
+ cgen_->frame()->Push(&key);
+ cgen_->frame()->Push(&value);
+ } else {
+ Result answer = cgen_->frame()->CallKeyedStoreIC();
+ // Make sure that we do not have a test instruction after the
+ // call. A test instruction after the call is used to
+ // indicate that we have generated an inline version of the
+ // keyed store.
+ masm->nop();
+ cgen_->frame()->Push(&answer);
+ }
break;
}
@@ -5447,7 +5982,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ and_(rcx, Immediate(kStringSizeMask));
__ cmpq(rcx, Immediate(kShortStringTag));
__ j(not_equal, &true_result); // Empty string is always short.
- __ movq(rdx, FieldOperand(rax, String::kLengthOffset));
+ __ movl(rdx, FieldOperand(rax, String::kLengthOffset));
__ shr(rdx, Immediate(String::kShortLengthShift));
__ j(zero, &false_result);
__ jmp(&true_result);
@@ -6135,22 +6670,23 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
// Fetch top stack handler.
ExternalReference handler_address(Top::k_handler_address);
__ movq(kScratchRegister, handler_address);
- __ movq(rdx, Operand(kScratchRegister, 0));
+ __ movq(rsp, Operand(kScratchRegister, 0));
// Unwind the handlers until the ENTRY handler is found.
Label loop, done;
__ bind(&loop);
// Load the type of the current stack handler.
- __ cmpq(Operand(rdx, StackHandlerConstants::kStateOffset),
+ __ cmpq(Operand(rsp, StackHandlerConstants::kStateOffset),
Immediate(StackHandler::ENTRY));
__ j(equal, &done);
// Fetch the next handler in the list.
- __ movq(rdx, Operand(rdx, StackHandlerConstants::kNextOffset));
+ ASSERT(StackHandlerConstants::kNextOffset == 0);
+ __ pop(rsp);
__ jmp(&loop);
__ bind(&done);
// Set the top handler address to next handler past the current ENTRY handler.
- __ movq(rax, Operand(rdx, StackHandlerConstants::kNextOffset));
+ __ pop(rax);
__ store_rax(handler_address);
// Set external caught exception to false.
@@ -6163,14 +6699,12 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
ExternalReference pending_exception(Top::k_pending_exception_address);
__ store_rax(pending_exception);
- // Restore the stack to the address of the ENTRY handler
- __ movq(rsp, rdx);
-
// Clear the context pointer;
__ xor_(rsi, rsi);
// Restore registers from handler.
-
+ ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
+ StackHandlerConstants::kFPOffset);
__ pop(rbp); // FP
ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
StackHandlerConstants::kStateOffset);
@@ -6286,6 +6820,9 @@ void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, exit;
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ Label not_outermost_js, not_outermost_js_2;
+#endif
// Setup frame.
__ push(rbp);
@@ -6311,6 +6848,17 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ load_rax(c_entry_fp);
__ push(rax);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If this is the outermost JS call, set js_entry_sp value.
+ ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
+ __ load_rax(js_entry_sp);
+ __ testq(rax, rax);
+ __ j(not_zero, &not_outermost_js);
+ __ movq(rax, rbp);
+ __ store_rax(js_entry_sp);
+ __ bind(&not_outermost_js);
+#endif
+
// Call a faked try-block that does the invoke.
__ call(&invoke);
@@ -6353,6 +6901,16 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Pop next_sp.
__ addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ // If current EBP value is the same as js_entry_sp value, it means that
+ // the current function is the outermost.
+ __ movq(kScratchRegister, js_entry_sp);
+ __ cmpq(rbp, Operand(kScratchRegister, 0));
+ __ j(not_equal, &not_outermost_js_2);
+ __ movq(Operand(kScratchRegister, 0), Immediate(0));
+ __ bind(&not_outermost_js_2);
+#endif
+
// Restore the top frame descriptor from the stack.
__ bind(&exit);
__ movq(kScratchRegister, ExternalReference(Top::k_c_entry_fp_address));
@@ -6418,6 +6976,24 @@ void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
+ Register number) {
+ Label load_smi, done;
+
+ __ testl(number, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi);
+ __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi);
+ __ sarl(number, Immediate(kSmiTagSize));
+ __ push(number);
+ __ fild_s(Operand(rsp, 0));
+ __ pop(number);
+
+ __ bind(&done);
+}
+
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register src,
@@ -6430,7 +7006,7 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
__ jmp(&done);
__ bind(&load_smi);
- __ sar(src, Immediate(kSmiTagSize));
+ __ sarl(src, Immediate(kSmiTagSize));
__ cvtlsi2sd(dst, src);
__ bind(&done);
@@ -6486,6 +7062,7 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm) {
__ bind(&done);
}
+
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
Register lhs,
Register rhs) {
@@ -6520,6 +7097,7 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
__ bind(&done);
}
+
void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
Label* non_float) {
Label test_other, done;
@@ -6557,13 +7135,14 @@ const char* GenericBinaryOpStub::GetName() {
}
}
+
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (rax <op> rbx) and
// leave result in register rax.
// Smi check both operands.
__ movq(rcx, rbx);
- __ or_(rcx, rax);
+ __ or_(rcx, rax); // The value in ecx is used for negative zero test later.
__ testl(rcx, Immediate(kSmiTagMask));
__ j(not_zero, slow);
@@ -6571,14 +7150,12 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::ADD: {
__ addl(rax, rbx);
__ j(overflow, slow); // The slow case rereads operands from the stack.
- __ movsxlq(rax, rax); // Sign extend eax into rax.
break;
}
case Token::SUB: {
__ subl(rax, rbx);
__ j(overflow, slow); // The slow case rereads operands from the stack.
- __ movsxlq(rax, rax); // Sign extend eax into rax.
break;
}
@@ -6586,27 +7163,25 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // adjust code below if not the case
// Remove tag from one of the operands (but keep sign).
- __ sar(rax, Immediate(kSmiTagSize));
+ __ sarl(rax, Immediate(kSmiTagSize));
// Do multiplication.
__ imull(rax, rbx); // multiplication of smis; result in eax
// Go slow on overflows.
__ j(overflow, slow);
// Check for negative zero result.
- __ movsxlq(rax, rax); // Sign extend eax into rax.
- __ NegativeZeroTest(rax, rcx, slow); // use rcx = x | y
+ __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
break;
case Token::DIV:
- // Sign extend rax into rdx:rax
- // (also sign extends eax into edx if eax is Smi).
- __ cqo();
+ // Sign extend eax into edx:eax.
+ __ cdq();
// Check for 0 divisor.
- __ testq(rbx, rbx);
+ __ testl(rbx, rbx);
__ j(zero, slow);
- // Divide rdx:rax by rbx (where rdx:rax is equivalent to the smi in eax).
- __ idiv(rbx);
+ // Divide edx:eax by ebx (where edx:eax is equivalent to the smi in eax).
+ __ idivl(rbx);
// Check that the remainder is zero.
- __ testq(rdx, rdx);
+ __ testl(rdx, rdx);
__ j(not_zero, slow);
// Check for the corner case of dividing the most negative smi
// by -1. We cannot use the overflow flag, since it is not set
@@ -6614,28 +7189,27 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
// TODO(X64): TODO(Smi): Smi implementation dependent constant.
// Value is Smi::fromInt(-(1<<31)) / Smi::fromInt(-1)
- __ cmpq(rax, Immediate(0x40000000));
+ __ cmpl(rax, Immediate(0x40000000));
__ j(equal, slow);
// Check for negative zero result.
- __ NegativeZeroTest(rax, rcx, slow); // use ecx = x | y
+ __ NegativeZeroTest(rax, rcx, slow); // ecx (not rcx) holds x | y.
// Tag the result and store it in register rax.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(rax, Operand(rax, rax, times_1, kSmiTag));
break;
case Token::MOD:
- // Sign extend rax into rdx:rax
- // (also sign extends eax into edx if eax is Smi).
- __ cqo();
+ // Sign extend eax into edx:eax
+ __ cdq();
// Check for 0 divisor.
- __ testq(rbx, rbx);
+ __ testl(rbx, rbx);
__ j(zero, slow);
- // Divide rdx:rax by rbx.
- __ idiv(rbx);
+ // Divide edx:eax by ebx.
+ __ idivl(rbx);
// Check for negative zero result.
- __ NegativeZeroTest(rdx, rcx, slow); // use ecx = x | y
+ __ NegativeZeroTest(rdx, rcx, slow); // ecx (not rcx) holds x | y.
// Move remainder to register rax.
- __ movq(rax, rdx);
+ __ movl(rax, rdx);
break;
case Token::BIT_OR:
@@ -6655,7 +7229,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
case Token::SHR:
case Token::SAR:
// Move the second operand into register ecx.
- __ movq(rcx, rbx);
+ __ movl(rcx, rbx);
// Remove tags from operands (but keep sign).
__ sarl(rax, Immediate(kSmiTagSize));
__ sarl(rcx, Immediate(kSmiTagSize));
@@ -6702,7 +7276,6 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
-
if (flags_ == SMI_CODE_IN_STUB) {
// The fast case smi code wasn't inlined in the stub caller
// code. Generate it here to speed up common operations.
diff --git a/V8Binding/v8/src/x64/codegen-x64.h b/V8Binding/v8/src/x64/codegen-x64.h
index bb4b538..b1c61d8 100644
--- a/V8Binding/v8/src/x64/codegen-x64.h
+++ b/V8Binding/v8/src/x64/codegen-x64.h
@@ -361,7 +361,7 @@ class CodeGenerator: public AstVisitor {
#define DEF_VISIT(type) \
void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
+ AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
@@ -534,6 +534,8 @@ class CodeGenerator: public AstVisitor {
void GenerateLog(ZoneList<Expression*>* args);
+ void GenerateGetFramePointer(ZoneList<Expression*>* args);
+
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
@@ -548,7 +550,7 @@ class CodeGenerator: public AstVisitor {
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
- void CodeForStatementPosition(Node* node);
+ void CodeForStatementPosition(AstNode* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
@@ -593,10 +595,72 @@ class CodeGenerator: public AstVisitor {
friend class Reference;
friend class Result;
+ friend class CodeGeneratorPatcher; // Used in test-log-stack-tracer.cc
+
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
+// -------------------------------------------------------------------------
+// Code stubs
+//
+// These independent code objects are created once, and used multiple
+// times by generated code to perform common tasks, often the slow
+// case of a JavaScript operation. They are all subclasses of CodeStub,
+// which is declared in code-stubs.h.
+
+
+// Flag that indicates whether or not the code that handles smi arguments
+// should be placed in the stub, inlined, or omitted entirely.
+enum GenericBinaryFlags {
+ SMI_CODE_IN_STUB,
+ SMI_CODE_INLINED
+};
+
+
+class GenericBinaryOpStub: public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ GenericBinaryFlags flags)
+ : op_(op), mode_(mode), flags_(flags) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ void GenerateSmiCode(MacroAssembler* masm, Label* slow);
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ GenericBinaryFlags flags_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("GenericBinaryOpStub (op %s), (mode %d, flags %d)\n",
+ Token::String(op_),
+ static_cast<int>(mode_),
+ static_cast<int>(flags_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits FOOOOOOOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 13> {};
+ class FlagBits: public BitField<GenericBinaryFlags, 15, 1> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FlagBits::encode(flags_);
+ }
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/V8Binding/v8/src/x64/disasm-x64.cc b/V8Binding/v8/src/x64/disasm-x64.cc
index 83fa9cd..cc8365c 100644
--- a/V8Binding/v8/src/x64/disasm-x64.cc
+++ b/V8Binding/v8/src/x64/disasm-x64.cc
@@ -88,7 +88,7 @@ static ByteMnemonic two_operands_instr[] = {
{ 0x39, OPER_REG_OP_ORDER, "cmp" },
{ 0x3A, BYTE_REG_OPER_OP_ORDER, "cmp" },
{ 0x3B, REG_OPER_OP_ORDER, "cmp" },
- { 0x8D, REG_OPER_OP_ORDER, "lea" },
+ { 0x63, REG_OPER_OP_ORDER, "movsxlq" },
{ 0x84, BYTE_REG_OPER_OP_ORDER, "test" },
{ 0x85, REG_OPER_OP_ORDER, "test" },
{ 0x86, BYTE_REG_OPER_OP_ORDER, "xchg" },
@@ -97,6 +97,7 @@ static ByteMnemonic two_operands_instr[] = {
{ 0x89, OPER_REG_OP_ORDER, "mov" },
{ 0x8A, BYTE_REG_OPER_OP_ORDER, "mov" },
{ 0x8B, REG_OPER_OP_ORDER, "mov" },
+ { 0x8D, REG_OPER_OP_ORDER, "lea" },
{ -1, UNSET_OP_ORDER, "" }
};
@@ -139,7 +140,7 @@ static ByteMnemonic short_immediate_instr[] = {
static const char* conditional_code_suffix[] = {
- "o", "no", "c", "nc", "z", "nz", "a", "na",
+ "o", "no", "c", "nc", "z", "nz", "na", "a",
"s", "ns", "pe", "po", "l", "ge", "le", "g"
};
@@ -252,6 +253,24 @@ void InstructionTable::AddJumpConditionalShort() {
static InstructionTable instruction_table;
+static InstructionDesc cmov_instructions[16] = {
+ {"cmovo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovno", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovnc", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovnz", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovna", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmova", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovs", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovns", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovpe", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovpo", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovl", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovge", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
+ {"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}
+};
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
@@ -533,7 +552,7 @@ int DisassemblerX64::PrintImmediate(byte* data, OperandSize size) {
value = 0; // Initialize variables on all paths to satisfy the compiler.
count = 0;
}
- AppendToBuffer(V8_PTR_PREFIX"x", value);
+ AppendToBuffer("%" V8_PTR_PREFIX "x", value);
return count;
}
@@ -687,7 +706,7 @@ int DisassemblerX64::ShiftInstruction(byte* data) {
byte modrm = *(data + 1);
int mod, regop, rm;
get_modrm(modrm, &mod, &regop, &rm);
- ASSERT(regop < 8);
+ regop &= 0x7; // The REX.R bit does not affect the operation.
int imm8 = -1;
int num_bytes = 2;
if (mod != 3) {
@@ -966,6 +985,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
// RDTSC or CPUID
AppendToBuffer("%s", mnemonic);
+ } else if ((opcode & 0xF0) == 0x40) {
+ // CMOVcc: conditional move.
+ int condition = opcode & 0x0F;
+ const InstructionDesc& idesc = cmov_instructions[condition];
+ byte_size_operand_ = idesc.byte_size_operation;
+ current += PrintOperands(idesc.mnem, idesc.op_order_, current);
+
} else if ((opcode & 0xF0) == 0x80) {
// Jcc: Conditional jump (branch).
current = data + JumpConditional(data);
@@ -1343,6 +1369,39 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2;
break;
+ case 0xA1: // Fall through.
+ case 0xA3:
+ switch (operand_size()) {
+ case DOUBLEWORD_SIZE: {
+ const char* memory_location = NameOfAddress(
+ reinterpret_cast<byte*>(
+ *reinterpret_cast<int32_t*>(data + 1)));
+ if (*data == 0xA1) { // Opcode 0xA1
+ AppendToBuffer("movzxlq rax,(%s)", memory_location);
+ } else { // Opcode 0xA3
+ AppendToBuffer("movzxlq (%s),rax", memory_location);
+ }
+ data += 5;
+ break;
+ }
+ case QUADWORD_SIZE: {
+ // New x64 instruction mov rax,(imm_64).
+ const char* memory_location = NameOfAddress(
+ *reinterpret_cast<byte**>(data + 1));
+ if (*data == 0xA1) { // Opcode 0xA1
+ AppendToBuffer("movq rax,(%s)", memory_location);
+ } else { // Opcode 0xA3
+ AppendToBuffer("movq (%s),rax", memory_location);
+ }
+ data += 9;
+ break;
+ }
+ default:
+ UnimplementedInstruction();
+ data += 2;
+ }
+ break;
+
case 0xA8:
AppendToBuffer("test al,0x%x", *reinterpret_cast<uint8_t*>(data + 1));
data += 2;
diff --git a/V8Binding/v8/src/x64/ic-x64.cc b/V8Binding/v8/src/x64/ic-x64.cc
index db74baf..86008eb 100644
--- a/V8Binding/v8/src/x64/ic-x64.cc
+++ b/V8Binding/v8/src/x64/ic-x64.cc
@@ -42,16 +42,181 @@ namespace internal {
#define __ ACCESS_MASM(masm)
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if the receiver has fast properties,
+// or if name is not a symbol, and will jump to the miss_label in that case.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+ Register r0, Register r1, Register r2,
+ Register name) {
+ // Register use:
+ //
+ // r0 - used to hold the property dictionary.
+ //
+ // r1 - initially the receiver
+ // - used for the index into the property dictionary
+ // - holds the result on exit.
+ //
+ // r2 - used to hold the capacity of the property dictionary.
+ //
+ // name - holds the name of the property and is unchanged.
+
+ Label done;
+
+ // Check for the absence of an interceptor.
+ // Load the map into r0.
+ __ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
+ // Test the has_named_interceptor bit in the map.
+ __ testl(FieldOperand(r0, Map::kInstanceAttributesOffset),
+ Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+
+ // Jump to miss if the interceptor bit is set.
+ __ j(not_zero, miss_label);
+
+ // Bail out if we have a JS global proxy object.
+ __ movzxbq(r0, FieldOperand(r0, Map::kInstanceTypeOffset));
+ __ cmpb(r0, Immediate(JS_GLOBAL_PROXY_TYPE));
+ __ j(equal, miss_label);
+
+ // Possible work-around for http://crbug.com/16276.
+ __ cmpb(r0, Immediate(JS_GLOBAL_OBJECT_TYPE));
+ __ j(equal, miss_label);
+ __ cmpb(r0, Immediate(JS_BUILTINS_OBJECT_TYPE));
+ __ j(equal, miss_label);
+
+ // Check that the properties array is a dictionary.
+ __ movq(r0, FieldOperand(r1, JSObject::kPropertiesOffset));
+ __ Cmp(FieldOperand(r0, HeapObject::kMapOffset), Factory::hash_table_map());
+ __ j(not_equal, miss_label);
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kCapacityIndex * kPointerSize;
+ __ movq(r2, FieldOperand(r0, kCapacityOffset));
+ __ shrl(r2, Immediate(kSmiTagSize)); // convert smi to int
+ __ decl(r2);
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ static const int kProbes = 4;
+ const int kElementsStartOffset =
+ StringDictionary::kHeaderSize +
+ StringDictionary::kElementsStartIndex * kPointerSize;
+ for (int i = 0; i < kProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ movl(r1, FieldOperand(name, String::kLengthOffset));
+ __ shrl(r1, Immediate(String::kHashShift));
+ if (i > 0) {
+ __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+ }
+ __ and_(r1, r2);
+
+ // Scale the index by multiplying by the entry size.
+ ASSERT(StringDictionary::kEntrySize == 3);
+ __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+
+ // Check if the key is identical to the name.
+ __ cmpq(name, Operand(r0, r1, times_pointer_size,
+ kElementsStartOffset - kHeapObjectTag));
+ if (i != kProbes - 1) {
+ __ j(equal, &done);
+ } else {
+ __ j(not_equal, miss_label);
+ }
+ }
+
+ // Check that the value is a normal property.
+ __ bind(&done);
+ const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ __ testl(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+ Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ j(not_zero, miss_label);
+
+ // Get the value at the masked, scaled index.
+ const int kValueOffset = kElementsStartOffset + kPointerSize;
+ __ movq(r1,
+ Operand(r0, r1, times_pointer_size, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to check that a value is either not an object
+// or is loaded if it is an object.
+static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
+ Register value) {
+ Label done;
+ // Check if the value is a Smi.
+ __ testl(value, Immediate(kSmiTagMask));
+ __ j(zero, &done);
+ // Check if the object has been loaded.
+ __ movq(kScratchRegister, FieldOperand(value, JSFunction::kMapOffset));
+ __ testb(FieldOperand(kScratchRegister, Map::kBitField2Offset),
+ Immediate(1 << Map::kNeedsLoading));
+ __ j(not_zero, miss);
+ __ bind(&done);
+}
+
+
+// One byte opcode for test eax,0xXXXXXXXX.
+static const byte kTestEaxByte = 0xA9;
+
+
+static bool PatchInlinedMapCheck(Address address, Object* map) {
+ // Arguments are address of start of call sequence that called
+ // the IC,
+ Address test_instruction_address =
+ address + Assembler::kTargetAddrToReturnAddrDist;
+ // The keyed load has a fast inlined case if the IC call instruction
+ // is immediately followed by a test instruction.
+ if (*test_instruction_address != kTestEaxByte) return false;
+
+ // Fetch the offset from the test instruction to the map compare
+ // instructions (starting with the 64-bit immediate mov of the map
+ // address). This offset is stored in the last 4 bytes of the 5
+ // byte test instruction.
+ Address delta_address = test_instruction_address + 1;
+ int delta = *reinterpret_cast<int*>(delta_address);
+ // Compute the map address. The map address is in the last 8 bytes
+ // of the 10-byte immediate mov instruction (incl. REX prefix), so we add 2
+ // to the offset to get the map address.
+ Address map_address = test_instruction_address + delta + 2;
+ // Patch the map check.
+ *(reinterpret_cast<Object**>(map_address)) = map;
+ return true;
+}
+
+
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+ return PatchInlinedMapCheck(address, map);
+}
+
+
+bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+ return PatchInlinedMapCheck(address, map);
+}
+
+
void KeyedLoadIC::ClearInlinedVersion(Address address) {
- // TODO(X64): Implement this when LoadIC is enabled.
+ // Insert null as the map to check for to make sure the map check fails
+ // sending control flow to the IC instead of the inlined version.
+ PatchInlinedLoad(address, Heap::null_value());
}
+
void KeyedStoreIC::ClearInlinedVersion(Address address) {
- // TODO(X64): Implement this when LoadIC is enabled.
+ // Insert null as the elements map to check for. This will make
+ // sure that the elements fast-case map check fails so that control
+ // flows to the IC instead of the inlined version.
+ PatchInlinedStore(address, Heap::null_value());
}
+
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
- UNIMPLEMENTED();
+ // Restore the fast-case elements map check so that the inlined
+ // version can be used again.
+ PatchInlinedStore(address, Heap::fixed_array_map());
}
@@ -65,127 +230,288 @@ void KeyedLoadIC::Generate(MacroAssembler* masm,
__ movq(rax, Operand(rsp, kPointerSize));
__ movq(rcx, Operand(rsp, 2 * kPointerSize));
-
- // Move the return address below the arguments.
__ pop(rbx);
- __ push(rcx);
- __ push(rax);
- __ push(rbx);
+ __ push(rcx); // receiver
+ __ push(rax); // name
+ __ push(rbx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 2);
}
+#ifdef DEBUG
+// For use in assert below.
+static int TenToThe(int exponent) {
+ ASSERT(exponent <= 9);
+ ASSERT(exponent >= 1);
+ int answer = 10;
+ for (int i = 1; i < exponent; i++) answer *= 10;
+ return answer;
+}
+#endif
+
+
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
+ Label slow, fast, check_string, index_int, index_string;
- Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ // Load name and receiver.
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ testl(rcx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing
+ // into string objects work as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ CmpObjectType(rcx, JS_OBJECT_TYPE, rdx);
+ __ j(below, &slow);
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks. The map is already in rdx.
+ __ testb(FieldOperand(rdx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+
+ // Check that the key is a smi.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(not_zero, &check_string);
+ __ sarl(rax, Immediate(kSmiTagSize));
+ // Get the elements array of the object.
+ __ bind(&index_int);
+ __ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+ __ j(not_equal, &slow);
+ // Check that the key (index) is within bounds.
+ __ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ j(below, &fast); // Unsigned comparison rejects negative indices.
+ // Slow case: Load name and receiver from stack and jump to runtime.
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
+ KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ __ bind(&check_string);
+ // The key is not a smi.
+ // Is it a string?
+ __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx);
+ __ j(above_equal, &slow);
+ // Is the string an array index, with cached numeric value?
+ __ movl(rbx, FieldOperand(rax, String::kLengthOffset));
+ __ testl(rbx, Immediate(String::kIsArrayIndexMask));
+
+ // If the string is a symbol, do a quick inline probe of the receiver's
+ // dictionary, if it exists.
+ __ j(not_zero, &index_string); // The value in rbx is used at jump target.
+ __ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
+ Immediate(kIsSymbolMask));
+ __ j(zero, &slow);
+ // Probe the dictionary leaving result in ecx.
+ GenerateDictionaryLoad(masm, &slow, rbx, rcx, rdx, rax);
+ GenerateCheckNonObjectOrLoaded(masm, &slow, rcx);
+ __ movq(rax, rcx);
+ __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
+ __ ret(0);
+ // Array index string: If short enough use cache in length/hash field (ebx).
+ // We assert that there are enough bits in an int32_t after the hash shift
+ // bits have been subtracted to allow space for the length and the cached
+ // array index.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << (String::kShortLengthShift - String::kHashShift)));
+ __ bind(&index_string);
+ const int kLengthFieldLimit =
+ (String::kMaxCachedArrayIndexLength + 1) << String::kShortLengthShift;
+ __ cmpl(rbx, Immediate(kLengthFieldLimit));
+ __ j(above_equal, &slow);
+ __ movl(rax, rbx);
+ __ and_(rax, Immediate((1 << String::kShortLengthShift) - 1));
+ __ shrl(rax, Immediate(String::kLongLengthShift));
+ __ jmp(&index_int);
+ // Fast case: Do the load.
+ __ bind(&fast);
+ __ movq(rax, Operand(rcx, rax, times_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Cmp(rax, Factory::the_hole_value());
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ j(equal, &slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1);
+ __ ret(0);
}
+
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
-
- Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
+ Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
}
-bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
- // Never patch the map in the map check, so the check always fails.
- return false;
-}
-bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
- // Never patch the map in the map check, so the check always fails.
- return false;
-}
+void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
+ // -- rsp[16] : receiver
+ // -----------------------------------
-Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
- UNIMPLEMENTED();
- return NULL;
-}
+ __ pop(rcx);
+ __ push(Operand(rsp, 1 * kPointerSize)); // receiver
+ __ push(Operand(rsp, 1 * kPointerSize)); // key
+ __ push(rax); // value
+ __ push(rcx); // return address
-Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
- UNIMPLEMENTED();
- return NULL;
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(f, 3);
}
-Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* object,
- JSObject* holder,
- Object* callback) {
- UNIMPLEMENTED();
- return NULL;
-}
-Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* object,
- JSObject* holder,
- int index) {
- UNIMPLEMENTED();
- return NULL;
-}
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : transition map
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
+ // -- rsp[16] : receiver
+ // -----------------------------------
-Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
- UNIMPLEMENTED();
- return NULL;
-}
+ __ pop(rbx);
+ __ push(Operand(rsp, 1 * kPointerSize)); // receiver
+ __ push(rcx); // transition map
+ __ push(rax); // value
+ __ push(rbx); // return address
-Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- UNIMPLEMENTED();
- return NULL;
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
}
-Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
- UNIMPLEMENTED();
- return NULL;
-}
-void KeyedStoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : value
- // -- rsp[0] : return address
- // -- rsp[8] : key
+ // -- rax : value
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
// -- rsp[16] : receiver
// -----------------------------------
+ Label slow, fast, array, extra;
- // Move the return address below the arguments.
+ // Get the receiver from the stack.
+ __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // 2 ~ return address, key
+ // Check that the object isn't a smi.
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+ // Get the map from the receiver.
+ __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_zero, &slow);
+ // Get the key from the stack.
+ __ movq(rbx, Operand(rsp, 1 * kPointerSize)); // 1 ~ return address
+ // Check that the key is a smi.
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow);
+ // If it is a smi, make sure it is zero-extended, so it can be
+ // used as an index in a memory operand.
+ __ movl(rbx, rbx); // Clear the high bits of rbx.
+
+ __ CmpInstanceType(rcx, JS_ARRAY_TYPE);
+ __ j(equal, &array);
+ // Check that the object is some kind of JS object.
+ __ CmpInstanceType(rcx, FIRST_JS_OBJECT_TYPE);
+ __ j(below, &slow);
+
+ // Object case: Check key against length in the elements array.
+ // rax: value
+ // rdx: JSObject
+ // rbx: index (as a smi), zero-extended.
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+ __ j(not_equal, &slow);
+ // Untag the key (for checking against untagged length in the fixed array).
+ __ movl(rdx, rbx);
+ __ sarl(rdx, Immediate(kSmiTagSize));
+ __ cmpl(rdx, FieldOperand(rcx, Array::kLengthOffset));
+ // rax: value
+ // rcx: FixedArray
+ // rbx: index (as a smi)
+ __ j(below, &fast);
+
+
+ // Slow case: Push extra copies of the arguments (3).
+ __ bind(&slow);
__ pop(rcx);
__ push(Operand(rsp, 1 * kPointerSize));
__ push(Operand(rsp, 1 * kPointerSize));
__ push(rax);
__ push(rcx);
-
// Do tail-call to runtime routine.
- __ TailCallRuntime(f, 3);
-}
-
-void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
-}
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kKeyedStoreIC_Miss)));
-}
-
-Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- UNIMPLEMENTED();
- return NULL;
+ __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
+
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ __ bind(&extra);
+ // rax: value
+ // rdx: JSArray
+ // rcx: FixedArray
+ // rbx: index (as a smi)
+ // flags: compare (rbx, rdx.length())
+ __ j(not_equal, &slow); // do not leave holes in the array
+ __ sarl(rbx, Immediate(kSmiTagSize)); // untag
+ __ cmpl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ j(above_equal, &slow);
+ // Restore tag and increment.
+ __ lea(rbx, Operand(rbx, rbx, times_1, 1 << kSmiTagSize));
+ __ movq(FieldOperand(rdx, JSArray::kLengthOffset), rbx);
+ __ subl(rbx, Immediate(1 << kSmiTagSize)); // decrement rbx again
+ __ jmp(&fast);
+
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode; if it is the
+ // length is always a smi.
+ __ bind(&array);
+ // rax: value
+ // rdx: JSArray
+ // rbx: index (as a smi)
+ __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
+ __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), Factory::fixed_array_map());
+ __ j(not_equal, &slow);
+
+ // Check the key against the length in the array, compute the
+ // address to store into and fall through to fast case.
+ __ cmpl(rbx, FieldOperand(rdx, JSArray::kLengthOffset));
+ __ j(above_equal, &extra);
+
+
+ // Fast case: Do the store.
+ __ bind(&fast);
+ // rax: value
+ // rcx: FixedArray
+ // rbx: index (as a smi)
+ __ movq(Operand(rcx, rbx, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag),
+ rax);
+ // Update write barrier for the elements array address.
+ __ movq(rdx, rax);
+ __ RecordWrite(rcx, 0, rdx, rbx);
+ __ ret(0);
}
@@ -236,13 +562,175 @@ void CallIC::Generate(MacroAssembler* masm,
__ InvokeFunction(rdi, actual, JUMP_FUNCTION);
}
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // rsp[0] return address
+ // rsp[8] argument argc
+ // rsp[16] argument argc - 1
+ // ...
+ // rsp[argc * 8] argument 1
+ // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 2) * 8] function name
+ // -----------------------------------
+ Label number, non_number, non_string, boolean, probe, miss;
+
+ // Get the receiver of the function from the stack; 1 ~ return address.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ // Get the name of the function from the stack; 2 ~ return address, receiver
+ __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
+
+ // Probe the stub cache.
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, rax);
+
+ // If the stub cache probing failed, the receiver might be a value.
+ // For value objects, we use the map of the prototype objects for
+ // the corresponding JSValue for the cache and that is what we need
+ // to probe.
+ //
+ // Check for number.
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &number);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rbx);
+ __ j(not_equal, &non_number);
+ __ bind(&number);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::NUMBER_FUNCTION_INDEX, rdx);
+ __ jmp(&probe);
+
+ // Check for string.
+ __ bind(&non_number);
+ __ CmpInstanceType(rbx, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &non_string);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::STRING_FUNCTION_INDEX, rdx);
+ __ jmp(&probe);
+
+ // Check for boolean.
+ __ bind(&non_string);
+ __ Cmp(rdx, Factory::true_value());
+ __ j(equal, &boolean);
+ __ Cmp(rdx, Factory::false_value());
+ __ j(not_equal, &miss);
+ __ bind(&boolean);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::BOOLEAN_FUNCTION_INDEX, rdx);
+
+ // Probe the stub cache for the value object.
+ __ bind(&probe);
+ StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
+
// Cache miss: Jump to runtime.
+ __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
+
+static void GenerateNormalHelper(MacroAssembler* masm,
+ int argc,
+ bool is_global_object,
+ Label* miss) {
+ // Search dictionary - put result in register edx.
+ GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx);
+
+ // Move the result to register rdi and check that it isn't a smi.
+ __ movq(rdi, rdx);
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, miss);
+
+ // Check that the value is a JavaScript function.
+ __ CmpObjectType(rdx, JS_FUNCTION_TYPE, rdx);
+ __ j(not_equal, miss);
+ // Check that the function has been loaded.
+ __ testb(FieldOperand(rdx, Map::kBitField2Offset),
+ Immediate(1 << Map::kNeedsLoading));
+ __ j(not_zero, miss);
+
+ // Patch the receiver with the global proxy if necessary.
+ if (is_global_object) {
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ }
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
+}
+
+
void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // rsp[0] return address
+ // rsp[8] argument argc
+ // rsp[16] argument argc - 1
+ // ...
+ // rsp[argc * 8] argument 1
+ // rsp[(argc + 1) * 8] argument 0 = reciever
+ // rsp[(argc + 2) * 8] function name
+ // -----------------------------------
+
+ Label miss, global_object, non_global_object;
+
+ // Get the receiver of the function from the stack.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+ // Get the name of the function from the stack.
+ __ movq(rcx, Operand(rsp, (argc + 2) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ testl(rdx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Check that the receiver is a valid JS object.
+ // Because there are so many map checks and type checks, do not
+ // use CmpObjectType, but load map and type into registers.
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ movb(rax, FieldOperand(rbx, Map::kInstanceTypeOffset));
+ __ cmpb(rax, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ j(below, &miss);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object.
+ __ cmpb(rax, Immediate(JS_GLOBAL_OBJECT_TYPE));
+ __ j(equal, &global_object);
+ __ cmpb(rax, Immediate(JS_BUILTINS_OBJECT_TYPE));
+ __ j(not_equal, &non_global_object);
+
+ // Accessing global object: Load and invoke.
+ __ bind(&global_object);
+ // Check that the global object does not require access checks.
+ __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
+ __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_equal, &miss);
+ GenerateNormalHelper(masm, argc, true, &miss);
+
+ // Accessing non-global object: Check for access to global proxy.
+ Label global_proxy, invoke;
+ __ bind(&non_global_object);
+ __ cmpb(rax, Immediate(JS_GLOBAL_PROXY_TYPE));
+ __ j(equal, &global_proxy);
+ // Check that the non-global, non-global-proxy object does not
+ // require access checks.
+ __ movb(rbx, FieldOperand(rbx, Map::kBitFieldOffset));
+ __ testb(rbx, Immediate(1 << Map::kIsAccessCheckNeeded));
+ __ j(not_equal, &miss);
+ __ bind(&invoke);
+ GenerateNormalHelper(masm, argc, false, &miss);
+
+ // Global object proxy access: Check access rights.
+ __ bind(&global_proxy);
+ __ CheckAccessGlobalProxy(rdx, rax, &miss);
+ __ jmp(&invoke);
+
// Cache miss: Jump to runtime.
+ __ bind(&miss);
Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
}
@@ -253,7 +741,10 @@ const int LoadIC::kOffsetToLoadInstruction = 20;
void LoadIC::ClearInlinedVersion(Address address) {
- // TODO(X64): Implement this when LoadIC is enabled.
+ // Reset the map check of the inlined inobject property load (if
+ // present) to guarantee failure by holding an invalid map (the null
+ // value). The offset can be patched to anything.
+ PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
}
@@ -266,11 +757,10 @@ void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
__ movq(rax, Operand(rsp, kPointerSize));
- // Move the return address below the arguments.
__ pop(rbx);
- __ push(rax);
- __ push(rcx);
- __ push(rbx);
+ __ push(rax); // receiver
+ __ push(rcx); // name
+ __ push(rbx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 2);
@@ -278,9 +768,22 @@ void LoadIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
+
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
@@ -320,13 +823,50 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
}
+
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+
+ StubCompiler::GenerateLoadStringLength(masm, rax, rdx, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
}
-bool LoadIC::PatchInlinedLoad(Address address, Object* map, int index) {
- // TODO(X64): Implement this function. Until then, the code is not patched.
- return false;
+
+
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+ // The address of the instruction following the call.
+ Address test_instruction_address =
+ address + Assembler::kTargetAddrToReturnAddrDist;
+ // If the instruction following the call is not a test eax, nothing
+ // was inlined.
+ if (*test_instruction_address != kTestEaxByte) return false;
+
+ Address delta_address = test_instruction_address + 1;
+ // The delta to the start of the map check instruction.
+ int delta = *reinterpret_cast<int*>(delta_address);
+
+ // The map address is the last 8 bytes of the 10-byte
+ // immediate move instruction, so we add 2 to get the
+ // offset to the last 8 bytes.
+ Address map_address = test_instruction_address + delta + 2;
+ *(reinterpret_cast<Object**>(map_address)) = map;
+
+ // The offset is in the 32-bit displacement of a seven byte
+ // memory-to-register move instruction (REX.W 0x88 ModR/M disp32),
+ // so we add 3 to get the offset of the displacement.
+ Address offset_address =
+ test_instruction_address + delta + kOffsetToLoadInstruction + 3;
+ *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+ return true;
}
void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
@@ -336,19 +876,33 @@ void StoreIC::Generate(MacroAssembler* masm, ExternalReference const& f) {
// -- rsp[0] : return address
// -- rsp[8] : receiver
// -----------------------------------
- // Move the return address below the arguments.
__ pop(rbx);
- __ push(Operand(rsp, 0));
- __ push(rcx);
- __ push(rax);
- __ push(rbx);
+ __ push(Operand(rsp, 0)); // receiver
+ __ push(rcx); // name
+ __ push(rax); // value
+ __ push(rbx); // return address
// Perform tail call to the entry.
__ TailCallRuntime(f, 3);
}
void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
- Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : Map (target of map transition)
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+
+ __ pop(rbx);
+ __ push(Operand(rsp, 0)); // receiver
+ __ push(rcx); // transition map
+ __ push(rax); // value
+ __ push(rbx); // return address
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
}
void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.cc b/V8Binding/v8/src/x64/macro-assembler-x64.cc
index 457011b..2219a5a 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.cc
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.cc
@@ -31,6 +31,7 @@
#include "codegen-inl.h"
#include "assembler-x64.h"
#include "macro-assembler-x64.h"
+#include "serialize.h"
#include "debug.h"
namespace v8 {
@@ -45,11 +46,163 @@ MacroAssembler::MacroAssembler(void* buffer, int size)
}
-// TODO(x64): For now, the write barrier is disabled on x64 and we
-// therefore generate no code. This should be fixed when the write
-// barrier is enabled.
-void MacroAssembler::RecordWrite(Register object, int offset,
- Register value, Register scratch) {
+
+static void RecordWriteHelper(MacroAssembler* masm,
+ Register object,
+ Register addr,
+ Register scratch) {
+ Label fast;
+
+ // Compute the page start address from the heap object pointer, and reuse
+ // the 'object' register for it.
+ ASSERT(is_int32(~Page::kPageAlignmentMask));
+ masm->and_(object,
+ Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
+ Register page_start = object;
+
+ // Compute the bit addr in the remembered set/index of the pointer in the
+ // page. Reuse 'addr' as pointer_offset.
+ masm->subq(addr, page_start);
+ masm->shr(addr, Immediate(kPointerSizeLog2));
+ Register pointer_offset = addr;
+
+ // If the bit offset lies beyond the normal remembered set range, it is in
+ // the extra remembered set area of a large object.
+ masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
+ masm->j(less, &fast);
+
+ // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
+ // extra remembered set after the large object.
+
+ // Load the array length into 'scratch'.
+ masm->movl(scratch,
+ Operand(page_start,
+ Page::kObjectStartOffset + FixedArray::kLengthOffset));
+ Register array_length = scratch;
+
+ // Extra remembered set starts right after the large object (a FixedArray), at
+ // page_start + kObjectStartOffset + objectSize
+ // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
+ // Add the delta between the end of the normal RSet and the start of the
+ // extra RSet to 'page_start', so that addressing the bit using
+ // 'pointer_offset' hits the extra RSet words.
+ masm->lea(page_start,
+ Operand(page_start, array_length, times_pointer_size,
+ Page::kObjectStartOffset + FixedArray::kHeaderSize
+ - Page::kRSetEndOffset));
+
+ // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+ // to limit code size. We should probably evaluate this decision by
+ // measuring the performance of an equivalent implementation using
+ // "simpler" instructions
+ masm->bind(&fast);
+ masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
+}
+
+
+class RecordWriteStub : public CodeStub {
+ public:
+ RecordWriteStub(Register object, Register addr, Register scratch)
+ : object_(object), addr_(addr), scratch_(scratch) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register object_;
+ Register addr_;
+ Register scratch_;
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
+ object_.code(), addr_.code(), scratch_.code());
+ }
+#endif
+
+ // Minor key encoding in 12 bits of three registers (object, address and
+ // scratch) OOOOAAAASSSS.
+ class ScratchBits: public BitField<uint32_t, 0, 4> {};
+ class AddressBits: public BitField<uint32_t, 4, 4> {};
+ class ObjectBits: public BitField<uint32_t, 8, 4> {};
+
+ Major MajorKey() { return RecordWrite; }
+
+ int MinorKey() {
+ // Encode the registers.
+ return ObjectBits::encode(object_.code()) |
+ AddressBits::encode(addr_.code()) |
+ ScratchBits::encode(scratch_.code());
+ }
+};
+
+
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+ RecordWriteHelper(masm, object_, addr_, scratch_);
+ masm->ret(0);
+}
+
+
+// Set the remembered set bit for [object+offset].
+// object is the object being stored into, value is the object being stored.
+// If offset is zero, then the scratch register contains the array index into
+// the elements array represented as a Smi.
+// All registers are clobbered by the operation.
+void MacroAssembler::RecordWrite(Register object,
+ int offset,
+ Register value,
+ Register scratch) {
+ // First, check if a remembered set write is even needed. The tests below
+ // catch stores of Smis and stores into young gen (which does not have space
+ // for the remembered set bits.
+ Label done;
+
+ // Test that the object address is not in the new space. We cannot
+ // set remembered set bits in the new space.
+ movq(value, object);
+ ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+ and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ movq(kScratchRegister, ExternalReference::new_space_start());
+ cmpq(value, kScratchRegister);
+ j(equal, &done);
+
+ if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
+ // Compute the bit offset in the remembered set, leave it in 'value'.
+ lea(value, Operand(object, offset));
+ ASSERT(is_int32(Page::kPageAlignmentMask));
+ and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
+ shr(value, Immediate(kObjectAlignmentBits));
+
+ // Compute the page address from the heap object pointer, leave it in
+ // 'object' (immediate value is sign extended).
+ and_(object, Immediate(~Page::kPageAlignmentMask));
+
+ // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
+ // to limit code size. We should probably evaluate this decision by
+ // measuring the performance of an equivalent implementation using
+ // "simpler" instructions
+ bts(Operand(object, Page::kRSetOffset), value);
+ } else {
+ Register dst = scratch;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
+ } else {
+ // array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric. Multiply a smi by 4 to get an offset
+ // into an array of pointers.
+ lea(dst, Operand(object, dst, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ }
+ // If we are already generating a shared stub, not inlining the
+ // record write code isn't going to save us any memory.
+ if (generating_stub()) {
+ RecordWriteHelper(this, object, dst, value);
+ } else {
+ RecordWriteStub stub(object, dst, value);
+ CallStub(&stub);
+ }
+ }
+
+ bind(&done);
}
@@ -71,9 +224,9 @@ void MacroAssembler::NegativeZeroTest(Register result,
Register op,
Label* then_label) {
Label ok;
- testq(result, result);
+ testl(result, result);
j(not_zero, &ok);
- testq(op, op);
+ testl(op, op);
j(sign, then_label);
bind(&ok);
}
@@ -151,6 +304,13 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
int num_arguments) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : argument num_arguments - 1
+ // ...
+ // -- rsp[8 * num_arguments] : argument 0 (receiver)
+ // -----------------------------------
+
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
@@ -311,6 +471,17 @@ void MacroAssembler::Push(Handle<Object> source) {
}
+void MacroAssembler::Push(Smi* source) {
+ if (IsUnsafeSmi(source)) {
+ LoadUnsafeSmi(kScratchRegister, source);
+ push(kScratchRegister);
+ } else {
+ int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
+ push(Immediate(smi));
+ }
+}
+
+
void MacroAssembler::Jump(ExternalReference ext) {
movq(kScratchRegister, ext);
jmp(kScratchRegister);
@@ -356,6 +527,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ASSERT(RelocInfo::IsCodeTarget(rmode));
movq(kScratchRegister, code_object, rmode);
#ifdef DEBUG
+ // Patch target is kPointer size bytes *before* target label.
Label target;
bind(&target);
#endif
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.h b/V8Binding/v8/src/x64/macro-assembler-x64.h
index 2ee6eea..cba55eb 100644
--- a/V8Binding/v8/src/x64/macro-assembler-x64.h
+++ b/V8Binding/v8/src/x64/macro-assembler-x64.h
@@ -164,6 +164,7 @@ class MacroAssembler: public Assembler {
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
void Push(Handle<Object> source);
+ void Push(Smi* smi);
// Control Flow
void Jump(Address destination, RelocInfo::Mode rmode);
@@ -175,11 +176,13 @@ class MacroAssembler: public Assembler {
void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
// Compare object type for heap object.
+ // Always use unsigned comparisons: above and below, not less and greater.
// Incoming register is heap_object and outgoing register is map.
// They may be the same register, and may be kScratchRegister.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
// Compare instance type for map.
+ // Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
// FCmp is similar to integer cmp, but requires unsigned
diff --git a/V8Binding/v8/src/x64/stub-cache-x64.cc b/V8Binding/v8/src/x64/stub-cache-x64.cc
index ce7886b..091c826 100644
--- a/V8Binding/v8/src/x64/stub-cache-x64.cc
+++ b/V8Binding/v8/src/x64/stub-cache-x64.cc
@@ -36,6 +36,645 @@
namespace v8 {
namespace internal {
+//-----------------------------------------------------------------------------
+// StubCompiler static helper functions
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register offset) {
+ ExternalReference key_offset(SCTableReference::keyReference(table));
+ Label miss;
+
+ __ movq(kScratchRegister, key_offset);
+ // Check that the key in the entry matches the name.
+ __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
+ __ j(not_equal, &miss);
+ // Get the code entry from the cache.
+ // Use key_offset + kPointerSize, rather than loading value_offset.
+ __ movq(kScratchRegister,
+ Operand(kScratchRegister, offset, times_4, kPointerSize));
+ // Check that the flags match what we're looking for.
+ __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+ __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+ __ cmpl(offset, Immediate(flags));
+ __ j(not_equal, &miss);
+
+ // Jump to the first instruction in the code stub.
+ __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ jmp(kScratchRegister);
+
+ __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+ ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+ Code* code = NULL;
+ if (kind == Code::LOAD_IC) {
+ code = Builtins::builtin(Builtins::LoadIC_Miss);
+ } else {
+ code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ }
+
+ Handle<Code> ic(code);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ movq(prototype,
+ Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ __ movq(prototype,
+ FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
+ // Load the initial map. The global functions all have initial maps.
+ __ movq(prototype,
+ FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst, Register src,
+ JSObject* holder, int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
+ __ movq(dst, FieldOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ movq(dst, FieldOperand(dst, offset));
+ }
+}
+
+
+template <typename Pushable>
+static void PushInterceptorArguments(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Pushable name,
+ JSObject* holder_obj) {
+ __ push(receiver);
+ __ push(holder);
+ __ push(name);
+ InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
+ __ movq(kScratchRegister, Handle<Object>(interceptor),
+ RelocInfo::EMBEDDED_OBJECT);
+ __ push(kScratchRegister);
+ __ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch,
+ Register extra) {
+ Label miss;
+ USE(extra); // The register extra is not used on the X64 platform.
+ // Make sure that code is valid. The shifting code relies on the
+ // entry size being 16.
+ ASSERT(sizeof(Entry) == 16);
+
+ // Make sure the flags do not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ // Use only the low 32 bits of the map pointer.
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the primary table.
+ ProbeTable(masm, flags, kPrimary, name, scratch);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ movl(scratch, FieldOperand(name, String::kLengthOffset));
+ __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ xor_(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ __ subl(scratch, name);
+ __ addl(scratch, Immediate(flags));
+ __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the secondary table.
+ ProbeTable(masm, flags, kSecondary, name, scratch);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Builtins::Name storage_extend,
+ JSObject* object,
+ int index,
+ Map* transition,
+ Register receiver_reg,
+ Register name_reg,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the object isn't a smi.
+ __ testl(receiver_reg, Immediate(kSmiTagMask));
+ __ j(zero, miss_label);
+
+ // Check that the map of the object hasn't changed.
+ __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+ Handle<Map>(object->map()));
+ __ j(not_equal, miss_label);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ Move(rcx, Handle<Map>(transition));
+ Handle<Code> ic(Builtins::builtin(storage_extend));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+ return;
+ }
+
+ if (transition != NULL) {
+ // Update the map of the object; no write barrier updating is
+ // needed because the map is never in new space.
+ __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
+ Handle<Map>(transition));
+ }
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ movq(FieldOperand(receiver_reg, offset), rax);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, rax);
+ __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movq(FieldOperand(scratch, offset), rax);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, rax);
+ __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+ }
+
+ // Return the value (register rax).
+ __ ret(0);
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss_label);
+
+ // Check that the object is a JS array.
+ __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
+ __ j(not_equal, miss_label);
+
+ // Load length directly from the JS array.
+ __ movq(rax, FieldOperand(receiver, JSArray::kLengthOffset));
+ __ ret(0);
+}
+
+
+// Generate code to check if an object is a string. If the object is
+// a string, the map's instance type is left in the scratch register.
+static void GenerateStringCheck(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* smi,
+ Label* non_string_object) {
+ // Check that the object isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, smi);
+
+ // Check that the object is a string.
+ __ movq(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+ __ movzxbq(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ __ testl(scratch, Immediate(kNotStringTag));
+ __ j(not_zero, non_string_object);
+}
+
+
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss) {
+ Label load_length, check_wrapper;
+
+ // Check if the object is a string leaving the instance type in the
+ // scratch register.
+ GenerateStringCheck(masm, receiver, scratch, miss, &check_wrapper);
+
+ // Load length directly from the string.
+ __ bind(&load_length);
+ __ and_(scratch, Immediate(kStringSizeMask));
+ __ movl(rax, FieldOperand(receiver, String::kLengthOffset));
+ // rcx is also the receiver.
+ __ lea(rcx, Operand(scratch, String::kLongLengthShift));
+ __ shr(rax); // rcx is implicit shift register.
+ __ shl(rax, Immediate(kSmiTagSize));
+ __ ret(0);
+
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmpl(scratch, Immediate(JS_VALUE_TYPE));
+ __ j(not_equal, miss);
+
+ // Check if the wrapped value is a string and load the length
+ // directly if it is.
+ __ movq(receiver, FieldOperand(receiver, JSValue::kValueOffset));
+ GenerateStringCheck(masm, receiver, scratch, miss, miss);
+ __ jmp(&load_length);
+}
+
+
+template <class Pushable>
+static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Pushable name,
+ JSObject* holder_obj) {
+ PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
+ __ movq(rax, Immediate(5));
+ __ movq(rbx, ref);
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+}
+
+
+
+void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
+ Register receiver,
+ Register result,
+ Register scratch,
+ Label* miss_label) {
+ __ TryGetFunctionPrototype(receiver, result, miss_label);
+ if (!result.is(rax)) __ movq(rax, result);
+ __ ret(0);
+}
+
+
+static void LookupPostInterceptor(JSObject* holder,
+ String* name,
+ LookupResult* lookup) {
+ holder->LocalLookupRealNamedProperty(name, lookup);
+ if (lookup->IsNotFound()) {
+ Object* proto = holder->GetPrototype();
+ if (proto != Heap::null_value()) {
+ proto->Lookup(name, lookup);
+ }
+ }
+}
+
+
+class LoadInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit LoadInterceptorCompiler(Register name) : name_(name) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ AccessorInfo* callback = 0;
+ bool optimize = false;
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ if (lookup->type() == FIELD) {
+ optimize = true;
+ } else if (lookup->type() == CALLBACKS) {
+ Object* callback_object = lookup->GetCallbackObject();
+ if (callback_object->IsAccessorInfo()) {
+ callback = AccessorInfo::cast(callback_object);
+ optimize = callback->getter() != NULL;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ // Note: starting a frame here makes GC aware of pointers pushed below.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS) {
+ __ push(receiver);
+ }
+ __ push(holder);
+ __ push(name_);
+
+ CompileCallLoadPropertyWithInterceptor(masm,
+ receiver,
+ holder,
+ name_,
+ holder_obj);
+
+ Label interceptor_failed;
+ __ Cmp(rax, Factory::no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ pop(name_);
+ __ pop(holder);
+ if (lookup->type() == CALLBACKS) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ if (lookup->type() == FIELD) {
+ holder = stub_compiler->CheckPrototypes(holder_obj,
+ holder,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss_label);
+ stub_compiler->GenerateFastPropertyLoad(masm,
+ rax,
+ holder,
+ lookup->holder(),
+ lookup->GetFieldIndex());
+ __ ret(0);
+ } else {
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ Label cleanup;
+ __ pop(scratch2);
+ __ push(receiver);
+ __ push(scratch2);
+
+ holder = stub_compiler->CheckPrototypes(holder_obj, holder,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ &cleanup);
+
+ __ pop(scratch2); // save old return address
+ __ push(holder);
+ __ Move(holder, Handle<AccessorInfo>(callback));
+ __ push(holder);
+ __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
+ __ push(name_);
+ __ push(scratch2); // restore old return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(ref, 5);
+
+ __ bind(&cleanup);
+ __ pop(scratch1);
+ __ pop(scratch2);
+ __ push(scratch1);
+ }
+ }
+
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ pop(scratch); // save old return address
+ PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
+ __ push(scratch); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallRuntime(ref, 5);
+ }
+
+ private:
+ Register name_;
+};
+
+
+template <class Compiler>
+static void CompileLoadInterceptor(Compiler* compiler,
+ StubCompiler* stub_compiler,
+ MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ ASSERT(holder->HasNamedInterceptor());
+ ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ stub_compiler->CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ if (lookup->IsValid() && lookup->IsCacheable()) {
+ compiler->CompileCacheable(masm,
+ stub_compiler,
+ receiver,
+ reg,
+ scratch1,
+ scratch2,
+ holder,
+ lookup,
+ name,
+ miss);
+ } else {
+ compiler->CompileRegular(masm,
+ receiver,
+ reg,
+ scratch2,
+ holder,
+ miss);
+ }
+}
+
+
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ explicit CallInterceptorCompiler(const ParameterCount& arguments)
+ : arguments_(arguments), argc_(arguments.immediate()) {}
+
+ void CompileCacheable(MacroAssembler* masm,
+ StubCompiler* stub_compiler,
+ Register receiver,
+ Register holder,
+ Register scratch1,
+ Register scratch2,
+ JSObject* holder_obj,
+ LookupResult* lookup,
+ String* name,
+ Label* miss_label) {
+ JSFunction* function = 0;
+ bool optimize = false;
+ // So far the most popular case for failed interceptor is
+ // CONSTANT_FUNCTION sitting below.
+ if (lookup->type() == CONSTANT_FUNCTION) {
+ function = lookup->GetConstantFunction();
+ // JSArray holder is a special case for call constant function
+ // (see the corresponding code).
+ if (function->is_compiled() && !holder_obj->IsJSArray()) {
+ optimize = true;
+ }
+ }
+
+ if (!optimize) {
+ CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
+ return;
+ }
+
+ __ EnterInternalFrame();
+ __ push(holder); // save the holder
+
+ CompileCallLoadPropertyWithInterceptor(
+ masm,
+ receiver,
+ holder,
+ // Under EnterInternalFrame this refers to name.
+ Operand(rbp, (argc_ + 3) * kPointerSize),
+ holder_obj);
+
+ __ pop(receiver); // restore holder
+ __ LeaveInternalFrame();
+
+ __ Cmp(rax, Factory::no_interceptor_result_sentinel());
+ Label invoke;
+ __ j(not_equal, &invoke);
+
+ stub_compiler->CheckPrototypes(holder_obj, receiver,
+ lookup->holder(), scratch1,
+ scratch2,
+ name,
+ miss_label);
+ if (lookup->holder()->IsGlobalObject()) {
+ __ movq(rdx, Operand(rsp, (argc_ + 1) * kPointerSize));
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdx);
+ }
+
+ ASSERT(function->is_compiled());
+ // Get the function and setup the context.
+ __ Move(rdi, Handle<JSFunction>(function));
+ __ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ ASSERT(function->is_compiled());
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments_,
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+ __ bind(&invoke);
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ Register scratch,
+ JSObject* holder_obj,
+ Label* miss_label) {
+ __ EnterInternalFrame();
+
+ PushInterceptorArguments(masm,
+ receiver,
+ holder,
+ Operand(rbp, (argc_ + 3) * kPointerSize),
+ holder_obj);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForCall));
+ __ movq(rax, Immediate(5));
+ __ movq(rbx, ref);
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+
+ __ LeaveInternalFrame();
+ }
+
+ private:
+ const ParameterCount& arguments_;
+ int argc_;
+};
+
+
+#undef __
+
#define __ ACCESS_MASM((masm()))
@@ -133,13 +772,13 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
CheckPrototypes(JSObject::cast(object), rdx, holder,
rbx, rcx, name, &miss);
- // Make sure object->elements()->map() != Heap::dictionary_array_map()
+ // Make sure object->HasFastElements().
// Get the elements array of the object.
__ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- Factory::hash_table_map());
- __ j(equal, &miss);
+ Factory::fixed_array_map());
+ __ j(not_equal, &miss);
break;
default:
@@ -227,11 +866,62 @@ Object* CallStubCompiler::CompileCallField(Object* object,
}
-Object* CallStubCompiler::CompileCallInterceptor(Object* a,
- JSObject* b,
- String* c) {
- // TODO(X64): Implement a real stub.
- return Failure::InternalError();
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -----------------------------------
+ Label miss;
+
+ // Get the number of arguments.
+ const int argc = arguments().immediate();
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ // Get the receiver from the stack.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ CallInterceptorCompiler compiler(arguments());
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ JSObject::cast(object),
+ holder,
+ name,
+ &lookup,
+ rdx,
+ rbx,
+ rcx,
+ &miss);
+
+ // Restore receiver.
+ __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
+
+ // Check that the function really is a function.
+ __ testl(rax, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+ __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ j(not_equal, &miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalReceiverOffset));
+ __ movq(Operand(rsp, (argc + 1) * kPointerSize), rdx);
+ }
+
+ // Invoke the function.
+ __ movq(rdi, rax);
+ __ InvokeFunction(rdi, arguments(), JUMP_FUNCTION);
+
+ // Handle load cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(argc);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
@@ -252,8 +942,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// rsp[(argc + 2) * 8] function name
Label miss;
- __ IncrementCounter(&Counters::call_global_inline, 1);
-
// Get the number of arguments.
const int argc = arguments().immediate();
@@ -289,6 +977,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
+ __ IncrementCounter(&Counters::call_global_inline, 1);
ASSERT(function->is_compiled());
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -297,7 +986,6 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
// Handle call cache miss.
__ bind(&miss);
- __ DecrementCounter(&Counters::call_global_inline, 1);
__ IncrementCounter(&Counters::call_global_inline_miss, 1);
Handle<Code> ic = ComputeCallMiss(arguments().immediate());
__ Jump(ic, RelocInfo::CODE_TARGET);
@@ -307,12 +995,25 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
}
-Object* LoadStubCompiler::CompileLoadCallback(JSObject* a,
- JSObject* b,
- AccessorInfo* c,
- String* d) {
- // TODO(X64): Implement a real stub.
- return Failure::InternalError();
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
+ callback, name, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
}
@@ -327,7 +1028,7 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
// -----------------------------------
Label miss;
- __ movq(rax, (Operand(rsp, kPointerSize)));
+ __ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -348,7 +1049,7 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
// -----------------------------------
Label miss;
- __ movq(rax, (Operand(rsp, kPointerSize)));
+ __ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -358,11 +1059,37 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
}
-Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* a,
- JSObject* b,
- String* c) {
- // TODO(X64): Implement a real stub.
- return Failure::InternalError();
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ // TODO(368): Compile in the whole chain: all the interceptors in
+ // prototypes and ultimate answer.
+ GenerateLoadInterceptor(receiver,
+ holder,
+ &lookup,
+ rax,
+ rcx,
+ rdx,
+ rbx,
+ name,
+ &miss);
+
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
@@ -378,10 +1105,8 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
// -----------------------------------
Label miss;
- __ IncrementCounter(&Counters::named_load_global_inline, 1);
-
// Get the receiver from the stack.
- __ movq(rax, (Operand(rsp, kPointerSize)));
+ __ movq(rax, Operand(rsp, kPointerSize));
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
@@ -407,10 +1132,10 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
+ __ IncrementCounter(&Counters::named_load_global_inline, 1);
__ ret(0);
__ bind(&miss);
- __ DecrementCounter(&Counters::named_load_global_inline, 1);
__ IncrementCounter(&Counters::named_load_global_inline_miss, 1);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -419,11 +1144,234 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
}
-Object* StoreStubCompiler::CompileStoreCallback(JSObject* a,
- AccessorInfo* b,
- String* c) {
- UNIMPLEMENTED();
- return NULL;
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_callback, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadCallback(receiver, holder, rcx, rax, rbx, rdx,
+ callback, name, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_callback, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_array_length, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadArrayLength(masm(), rcx, rdx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_array_length, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_constant_function, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadConstant(receiver, holder, rcx, rbx, rdx,
+ value, name, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_constant_function, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_function_prototype, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadFunctionPrototype(masm(), rcx, rdx, rbx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_function_prototype, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_interceptor, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ LookupResult lookup;
+ LookupPostInterceptor(holder, name, &lookup);
+ GenerateLoadInterceptor(receiver,
+ holder,
+ &lookup,
+ rcx,
+ rax,
+ rdx,
+ rbx,
+ name,
+ &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_interceptor, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_string_length, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadStringLength(masm(), rcx, rdx, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_string_length, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Get the object from the stack.
+ __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Handle<Map>(object->map()));
+ __ j(not_equal, &miss);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ __ pop(rbx); // remove the return address
+ __ push(Operand(rsp, 0)); // receiver
+ __ Push(Handle<AccessorInfo>(callback)); // callback info
+ __ push(rcx); // name
+ __ push(rax); // value
+ __ push(rbx); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+ __ TailCallRuntime(store_callback_property, 4);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ Move(rcx, Handle<String>(name)); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
}
@@ -462,17 +1410,165 @@ Object* StoreStubCompiler::CompileStoreField(JSObject* object,
}
-Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* a, String* b) {
- UNIMPLEMENTED();
- return NULL;
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Get the object from the stack.
+ __ movq(rbx, Operand(rsp, 1 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ testl(rbx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Handle<Map>(receiver->map()));
+ __ j(not_equal, &miss);
+
+ // Perform global security token check if needed.
+ if (receiver->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(rbx, rdx, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+ __ pop(rbx); // remove the return address
+ __ push(Operand(rsp, 0)); // receiver
+ __ push(rcx); // name
+ __ push(rax); // value
+ __ push(rbx); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+ __ TailCallRuntime(store_ic_property, 3);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ Move(rcx, Handle<String>(name)); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
}
Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
JSGlobalPropertyCell* cell,
String* name) {
- UNIMPLEMENTED();
- return NULL;
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rcx : name
+ // -- rsp[0] : return address
+ // -- rsp[8] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check that the map of the global has not changed.
+ __ movq(rbx, Operand(rsp, kPointerSize));
+ __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
+ Handle<Map>(object->map()));
+ __ j(not_equal, &miss);
+
+ // Store the value in the cell.
+ __ Move(rcx, Handle<JSGlobalPropertyCell>(cell));
+ __ movq(FieldOperand(rcx, JSGlobalPropertyCell::kValueOffset), rax);
+
+ // Return the value (register rax).
+ __ IncrementCounter(&Counters::named_store_global_inline, 1);
+ __ ret(0);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ IncrementCounter(&Counters::named_store_global_inline_miss, 1);
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(NORMAL, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ movq(rax, Operand(rsp, kPointerSize));
+ __ movq(rcx, Operand(rsp, 2 * kPointerSize));
+ __ IncrementCounter(&Counters::keyed_load_field, 1);
+
+ // Check that the name has not changed.
+ __ Cmp(rax, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ GenerateLoadField(receiver, holder, rcx, rbx, rdx, index, name, &miss);
+
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_field, 1);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(FIELD, name);
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- rax : value
+ // -- rsp[0] : return address
+ // -- rsp[8] : key
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ IncrementCounter(&Counters::keyed_store_field, 1);
+
+ // Get the name from the stack.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+ // Check that the name has not changed.
+ __ Cmp(rcx, Handle<String>(name));
+ __ j(not_equal, &miss);
+
+ // Get the object from the stack.
+ __ movq(rbx, Operand(rsp, 2 * kPointerSize));
+
+ // Generate store field code. Trashes the name register.
+ GenerateStoreField(masm(),
+ Builtins::KeyedStoreIC_ExtendStorage,
+ object,
+ index,
+ transition,
+ rbx, rcx, rdx,
+ &miss);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_store_field, 1);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
}
@@ -500,6 +1596,66 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
+
+void StubCompiler::GenerateLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ LookupResult* lookup,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ String* name,
+ Label* miss) {
+ LoadInterceptorCompiler compiler(name_reg);
+ CompileLoadInterceptor(&compiler,
+ this,
+ masm(),
+ object,
+ holder,
+ name,
+ lookup,
+ receiver,
+ scratch1,
+ scratch2,
+ miss);
+}
+
+
+void StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ __ testl(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ CheckPrototypes(object, receiver, holder,
+ scratch1, scratch2, name, miss);
+
+ // Push the arguments on the JS stack of the caller.
+ __ pop(scratch2); // remove return address
+ __ push(receiver); // receiver
+ __ push(reg); // holder
+ __ Move(reg, Handle<AccessorInfo>(callback)); // callback data
+ __ push(reg);
+ __ push(FieldOperand(reg, AccessorInfo::kDataOffset));
+ __ push(name_reg); // name
+ __ push(scratch2); // restore return address
+
+ // Do tail-call to the runtime system.
+ ExternalReference load_callback_property =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(load_callback_property, 5);
+}
+
+
Register StubCompiler::CheckPrototypes(JSObject* object,
Register object_reg,
JSObject* holder,
@@ -584,224 +1740,4 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
#undef __
-//-----------------------------------------------------------------------------
-// StubCompiler static helper functions
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(MacroAssembler* masm,
- Code::Flags flags,
- StubCache::Table table,
- Register name,
- Register offset) {
- ExternalReference key_offset(SCTableReference::keyReference(table));
- Label miss;
-
- __ movq(kScratchRegister, key_offset);
- // Check that the key in the entry matches the name.
- __ cmpl(name, Operand(kScratchRegister, offset, times_4, 0));
- __ j(not_equal, &miss);
- // Get the code entry from the cache.
- // Use key_offset + kPointerSize, rather than loading value_offset.
- __ movq(kScratchRegister,
- Operand(kScratchRegister, offset, times_4, kPointerSize));
- // Check that the flags match what we're looking for.
- __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
- __ and_(offset, Immediate(~Code::kFlagsNotUsedInLookup));
- __ cmpl(offset, Immediate(flags));
- __ j(not_equal, &miss);
-
- // Jump to the first instruction in the code stub.
- __ addq(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ jmp(kScratchRegister);
-
- __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
- ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
- Code* code = NULL;
- if (kind == Code::LOAD_IC) {
- code = Builtins::builtin(Builtins::LoadIC_Miss);
- } else {
- code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
- }
-
- Handle<Code> ic(code);
- __ Jump(ic, RelocInfo::CODE_TARGET);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
- int index,
- Register prototype) {
- // Load the global or builtins object from the current context.
- __ movq(prototype,
- Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
- // Load the global context from the global or builtins object.
- __ movq(prototype,
- FieldOperand(prototype, GlobalObject::kGlobalContextOffset));
- // Load the function from the global context.
- __ movq(prototype, Operand(prototype, Context::SlotOffset(index)));
- // Load the initial map. The global functions all have initial maps.
- __ movq(prototype,
- FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
- // Load the prototype from the initial map.
- __ movq(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-// Load a fast property out of a holder object (src). In-object properties
-// are loaded directly otherwise the property is loaded from the properties
-// fixed array.
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
- Register dst, Register src,
- JSObject* holder, int index) {
- // Adjust for the number of properties stored in the holder.
- index -= holder->map()->inobject_properties();
- if (index < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (index * kPointerSize);
- __ movq(dst, FieldOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ movq(dst, FieldOperand(dst, offset));
- }
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
- Code::Flags flags,
- Register receiver,
- Register name,
- Register scratch,
- Register extra) {
- Label miss;
- USE(extra); // The register extra is not used on the X64 platform.
- // Make sure that code is valid. The shifting code relies on the
- // entry size being 16.
- ASSERT(sizeof(Entry) == 16);
-
- // Make sure the flags do not name a specific type.
- ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
- // Make sure that there are no register conflicts.
- ASSERT(!scratch.is(receiver));
- ASSERT(!scratch.is(name));
-
- // Check that the receiver isn't a smi.
- __ testl(receiver, Immediate(kSmiTagMask));
- __ j(zero, &miss);
-
- // Get the map of the receiver and compute the hash.
- __ movl(scratch, FieldOperand(name, String::kLengthOffset));
- // Use only the low 32 bits of the map pointer.
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the primary table.
- ProbeTable(masm, flags, kPrimary, name, scratch);
-
- // Primary miss: Compute hash for secondary probe.
- __ movl(scratch, FieldOperand(name, String::kLengthOffset));
- __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ xor_(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
- __ subl(scratch, name);
- __ addl(scratch, Immediate(flags));
- __ and_(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
-
- // Probe the secondary table.
- ProbeTable(masm, flags, kSecondary, name, scratch);
-
- // Cache miss: Fall-through and let caller handle the miss by
- // entering the runtime system.
- __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Builtins::Name storage_extend,
- JSObject* object,
- int index,
- Map* transition,
- Register receiver_reg,
- Register name_reg,
- Register scratch,
- Label* miss_label) {
- // Check that the object isn't a smi.
- __ testl(receiver_reg, Immediate(kSmiTagMask));
- __ j(zero, miss_label);
-
- // Check that the map of the object hasn't changed.
- __ Cmp(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(object->map()));
- __ j(not_equal, miss_label);
-
- // Perform global security token check if needed.
- if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
- }
-
- // Stub never generated for non-global objects that require access
- // checks.
- ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
- // Perform map transition for the receiver if necessary.
- if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
- // The properties must be extended before we can store the value.
- // We jump to a runtime call that extends the properties array.
- __ Move(rcx, Handle<Map>(transition));
- Handle<Code> ic(Builtins::builtin(storage_extend));
- __ Jump(ic, RelocInfo::CODE_TARGET);
- return;
- }
-
- if (transition != NULL) {
- // Update the map of the object; no write barrier updating is
- // needed because the map is never in new space.
- __ Move(FieldOperand(receiver_reg, HeapObject::kMapOffset),
- Handle<Map>(transition));
- }
-
- // Adjust for the number of properties stored in the object. Even in the
- // face of a transition we can use the old map here because the size of the
- // object and the number of in-object properties is not going to change.
- index -= object->map()->inobject_properties();
-
- if (index < 0) {
- // Set the property straight into the object.
- int offset = object->map()->instance_size() + (index * kPointerSize);
- __ movq(FieldOperand(receiver_reg, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWrite(receiver_reg, offset, name_reg, scratch);
- } else {
- // Write to the properties array.
- int offset = index * kPointerSize + FixedArray::kHeaderSize;
- // Get the properties array (optimistically).
- __ movq(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
- __ movq(FieldOperand(scratch, offset), rax);
-
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ movq(name_reg, rax);
- __ RecordWrite(scratch, offset, name_reg, receiver_reg);
- }
-
- // Return the value (register rax).
- __ ret(0);
-}
-
-
-#undef __
-
-
} } // namespace v8::internal