diff options
Diffstat (limited to 'V8Binding/v8/src/x64/macro-assembler-x64.cc')
-rw-r--r-- | V8Binding/v8/src/x64/macro-assembler-x64.cc | 186 |
1 files changed, 179 insertions, 7 deletions
diff --git a/V8Binding/v8/src/x64/macro-assembler-x64.cc b/V8Binding/v8/src/x64/macro-assembler-x64.cc index 457011b..2219a5a 100644 --- a/V8Binding/v8/src/x64/macro-assembler-x64.cc +++ b/V8Binding/v8/src/x64/macro-assembler-x64.cc @@ -31,6 +31,7 @@ #include "codegen-inl.h" #include "assembler-x64.h" #include "macro-assembler-x64.h" +#include "serialize.h" #include "debug.h" namespace v8 { @@ -45,11 +46,163 @@ MacroAssembler::MacroAssembler(void* buffer, int size) } -// TODO(x64): For now, the write barrier is disabled on x64 and we -// therefore generate no code. This should be fixed when the write -// barrier is enabled. -void MacroAssembler::RecordWrite(Register object, int offset, - Register value, Register scratch) { + +static void RecordWriteHelper(MacroAssembler* masm, + Register object, + Register addr, + Register scratch) { + Label fast; + + // Compute the page start address from the heap object pointer, and reuse + // the 'object' register for it. + ASSERT(is_int32(~Page::kPageAlignmentMask)); + masm->and_(object, + Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask))); + Register page_start = object; + + // Compute the bit addr in the remembered set/index of the pointer in the + // page. Reuse 'addr' as pointer_offset. + masm->subq(addr, page_start); + masm->shr(addr, Immediate(kPointerSizeLog2)); + Register pointer_offset = addr; + + // If the bit offset lies beyond the normal remembered set range, it is in + // the extra remembered set area of a large object. + masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize)); + masm->j(less, &fast); + + // Adjust 'page_start' so that addressing using 'pointer_offset' hits the + // extra remembered set after the large object. + + // Load the array length into 'scratch'. + masm->movl(scratch, + Operand(page_start, + Page::kObjectStartOffset + FixedArray::kLengthOffset)); + Register array_length = scratch; + + // Extra remembered set starts right after the large object (a FixedArray), at + // page_start + kObjectStartOffset + objectSize + // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length. + // Add the delta between the end of the normal RSet and the start of the + // extra RSet to 'page_start', so that addressing the bit using + // 'pointer_offset' hits the extra RSet words. + masm->lea(page_start, + Operand(page_start, array_length, times_pointer_size, + Page::kObjectStartOffset + FixedArray::kHeaderSize + - Page::kRSetEndOffset)); + + // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction + // to limit code size. We should probably evaluate this decision by + // measuring the performance of an equivalent implementation using + // "simpler" instructions + masm->bind(&fast); + masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset); +} + + +class RecordWriteStub : public CodeStub { + public: + RecordWriteStub(Register object, Register addr, Register scratch) + : object_(object), addr_(addr), scratch_(scratch) { } + + void Generate(MacroAssembler* masm); + + private: + Register object_; + Register addr_; + Register scratch_; + +#ifdef DEBUG + void Print() { + PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n", + object_.code(), addr_.code(), scratch_.code()); + } +#endif + + // Minor key encoding in 12 bits of three registers (object, address and + // scratch) OOOOAAAASSSS. + class ScratchBits: public BitField<uint32_t, 0, 4> {}; + class AddressBits: public BitField<uint32_t, 4, 4> {}; + class ObjectBits: public BitField<uint32_t, 8, 4> {}; + + Major MajorKey() { return RecordWrite; } + + int MinorKey() { + // Encode the registers. + return ObjectBits::encode(object_.code()) | + AddressBits::encode(addr_.code()) | + ScratchBits::encode(scratch_.code()); + } +}; + + +void RecordWriteStub::Generate(MacroAssembler* masm) { + RecordWriteHelper(masm, object_, addr_, scratch_); + masm->ret(0); +} + + +// Set the remembered set bit for [object+offset]. +// object is the object being stored into, value is the object being stored. +// If offset is zero, then the scratch register contains the array index into +// the elements array represented as a Smi. +// All registers are clobbered by the operation. +void MacroAssembler::RecordWrite(Register object, + int offset, + Register value, + Register scratch) { + // First, check if a remembered set write is even needed. The tests below + // catch stores of Smis and stores into young gen (which does not have space + // for the remembered set bits. + Label done; + + // Test that the object address is not in the new space. We cannot + // set remembered set bits in the new space. + movq(value, object); + ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask()))); + and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask()))); + movq(kScratchRegister, ExternalReference::new_space_start()); + cmpq(value, kScratchRegister); + j(equal, &done); + + if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) { + // Compute the bit offset in the remembered set, leave it in 'value'. + lea(value, Operand(object, offset)); + ASSERT(is_int32(Page::kPageAlignmentMask)); + and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask))); + shr(value, Immediate(kObjectAlignmentBits)); + + // Compute the page address from the heap object pointer, leave it in + // 'object' (immediate value is sign extended). + and_(object, Immediate(~Page::kPageAlignmentMask)); + + // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction + // to limit code size. We should probably evaluate this decision by + // measuring the performance of an equivalent implementation using + // "simpler" instructions + bts(Operand(object, Page::kRSetOffset), value); + } else { + Register dst = scratch; + if (offset != 0) { + lea(dst, Operand(object, offset)); + } else { + // array access: calculate the destination address in the same manner as + // KeyedStoreIC::GenerateGeneric. Multiply a smi by 4 to get an offset + // into an array of pointers. + lea(dst, Operand(object, dst, times_half_pointer_size, + FixedArray::kHeaderSize - kHeapObjectTag)); + } + // If we are already generating a shared stub, not inlining the + // record write code isn't going to save us any memory. + if (generating_stub()) { + RecordWriteHelper(this, object, dst, value); + } else { + RecordWriteStub stub(object, dst, value); + CallStub(&stub); + } + } + + bind(&done); } @@ -71,9 +224,9 @@ void MacroAssembler::NegativeZeroTest(Register result, Register op, Label* then_label) { Label ok; - testq(result, result); + testl(result, result); j(not_zero, &ok); - testq(op, op); + testl(op, op); j(sign, then_label); bind(&ok); } @@ -151,6 +304,13 @@ void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { void MacroAssembler::TailCallRuntime(ExternalReference const& ext, int num_arguments) { + // ----------- S t a t e ------------- + // -- rsp[0] : return address + // -- rsp[8] : argument num_arguments - 1 + // ... + // -- rsp[8 * num_arguments] : argument 0 (receiver) + // ----------------------------------- + // TODO(1236192): Most runtime routines don't need the number of // arguments passed in because it is constant. At some point we // should remove this need and make the runtime routine entry code @@ -311,6 +471,17 @@ void MacroAssembler::Push(Handle<Object> source) { } +void MacroAssembler::Push(Smi* source) { + if (IsUnsafeSmi(source)) { + LoadUnsafeSmi(kScratchRegister, source); + push(kScratchRegister); + } else { + int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source)); + push(Immediate(smi)); + } +} + + void MacroAssembler::Jump(ExternalReference ext) { movq(kScratchRegister, ext); jmp(kScratchRegister); @@ -356,6 +527,7 @@ void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) { ASSERT(RelocInfo::IsCodeTarget(rmode)); movq(kScratchRegister, code_object, rmode); #ifdef DEBUG + // Patch target is kPointer size bytes *before* target label. Label target; bind(&target); #endif |