summaryrefslogtreecommitdiffstats
path: root/V8Binding/v8/src/arm
diff options
context:
space:
mode:
authorFeng Qian <fqian@google.com>2009-06-19 10:54:19 -0700
committerFeng Qian <fqian@google.com>2009-06-19 10:54:19 -0700
commit5c6ed9782f6ba890faf887aa2a434c6ec0d8db69 (patch)
treeb0ee81af078307f558400c69e89169f6555e0fc9 /V8Binding/v8/src/arm
parent7f65b2f4c873a32f4d8e3c3ad8d339c57173a7b5 (diff)
downloadexternal_webkit-5c6ed9782f6ba890faf887aa2a434c6ec0d8db69.zip
external_webkit-5c6ed9782f6ba890faf887aa2a434c6ec0d8db69.tar.gz
external_webkit-5c6ed9782f6ba890faf887aa2a434c6ec0d8db69.tar.bz2
Drop in v8 r2121
From: "http://v8.googlecode.com/svn/trunk@2121", It matches "svn://chrome-svn/chrome/branches/187/src@18043"
Diffstat (limited to 'V8Binding/v8/src/arm')
-rw-r--r--V8Binding/v8/src/arm/assembler-arm-inl.h250
-rw-r--r--V8Binding/v8/src/arm/assembler-arm.cc1474
-rw-r--r--V8Binding/v8/src/arm/assembler-arm.h788
-rw-r--r--V8Binding/v8/src/arm/builtins-arm.cc700
-rw-r--r--V8Binding/v8/src/arm/codegen-arm-inl.h46
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.cc5199
-rw-r--r--V8Binding/v8/src/arm/codegen-arm.h459
-rw-r--r--V8Binding/v8/src/arm/constants-arm.h241
-rw-r--r--V8Binding/v8/src/arm/cpu-arm.cc125
-rw-r--r--V8Binding/v8/src/arm/debug-arm.cc197
-rw-r--r--V8Binding/v8/src/arm/disasm-arm.cc901
-rw-r--r--V8Binding/v8/src/arm/frames-arm.cc118
-rw-r--r--V8Binding/v8/src/arm/frames-arm.h380
-rw-r--r--V8Binding/v8/src/arm/ic-arm.cc807
-rw-r--r--V8Binding/v8/src/arm/jump-target-arm.cc324
-rw-r--r--V8Binding/v8/src/arm/macro-assembler-arm.cc959
-rw-r--r--V8Binding/v8/src/arm/macro-assembler-arm.h314
-rw-r--r--V8Binding/v8/src/arm/regexp-macro-assembler-arm.cc44
-rw-r--r--V8Binding/v8/src/arm/regexp-macro-assembler-arm.h42
-rw-r--r--V8Binding/v8/src/arm/register-allocator-arm-inl.h103
-rw-r--r--V8Binding/v8/src/arm/register-allocator-arm.cc59
-rw-r--r--V8Binding/v8/src/arm/register-allocator-arm.h43
-rw-r--r--V8Binding/v8/src/arm/simulator-arm.cc1688
-rw-r--r--V8Binding/v8/src/arm/simulator-arm.h205
-rw-r--r--V8Binding/v8/src/arm/stub-cache-arm.cc1148
-rw-r--r--V8Binding/v8/src/arm/virtual-frame-arm.cc439
-rw-r--r--V8Binding/v8/src/arm/virtual-frame-arm.h536
27 files changed, 17589 insertions, 0 deletions
diff --git a/V8Binding/v8/src/arm/assembler-arm-inl.h b/V8Binding/v8/src/arm/assembler-arm-inl.h
new file mode 100644
index 0000000..824a5fd
--- /dev/null
+++ b/V8Binding/v8/src/arm/assembler-arm-inl.h
@@ -0,0 +1,250 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
+#define V8_ARM_ASSEMBLER_ARM_INL_H_
+
+#include "arm/assembler-arm.h"
+#include "cpu.h"
+
+
+namespace v8 {
+namespace internal {
+
+Condition NegateCondition(Condition cc) {
+ ASSERT(cc != al);
+ return static_cast<Condition>(cc ^ ne);
+}
+
+
+void RelocInfo::apply(int delta) {
+ if (RelocInfo::IsInternalReference(rmode_)) {
+ // absolute code pointer inside code object moves with the code object.
+ int32_t* p = reinterpret_cast<int32_t*>(pc_);
+ *p += delta; // relocate entry
+ }
+ // We do not use pc relative addressing on ARM, so there is
+ // nothing else to do.
+}
+
+
+Address RelocInfo::target_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ return Assembler::target_address_at(pc_);
+}
+
+
+Address RelocInfo::target_address_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_address(Address target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
+ Assembler::set_target_address_at(pc_, target);
+}
+
+
+Object* RelocInfo::target_object() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+}
+
+
+Object** RelocInfo::target_object_address() {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
+}
+
+
+void RelocInfo::set_target_object(Object* target) {
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
+}
+
+
+Address* RelocInfo::target_reference_address() {
+ ASSERT(rmode_ == EXTERNAL_REFERENCE);
+ return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
+}
+
+
+Address RelocInfo::call_address() {
+ ASSERT(IsCallInstruction());
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+ ASSERT(IsCallInstruction());
+ UNIMPLEMENTED();
+}
+
+
+Object* RelocInfo::call_object() {
+ ASSERT(IsCallInstruction());
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+Object** RelocInfo::call_object_address() {
+ ASSERT(IsCallInstruction());
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+ ASSERT(IsCallInstruction());
+ UNIMPLEMENTED();
+}
+
+
+bool RelocInfo::IsCallInstruction() {
+ UNIMPLEMENTED();
+ return false;
+}
+
+
+Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
+ rm_ = no_reg;
+ imm32_ = immediate;
+ rmode_ = rmode;
+}
+
+
+Operand::Operand(const char* s) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(s);
+ rmode_ = RelocInfo::EMBEDDED_STRING;
+}
+
+
+Operand::Operand(const ExternalReference& f) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(f.address());
+ rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+
+Operand::Operand(Object** opp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(opp);
+ rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Context** cpp) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<int32_t>(cpp);
+ rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Smi* value) {
+ rm_ = no_reg;
+ imm32_ = reinterpret_cast<intptr_t>(value);
+ rmode_ = RelocInfo::NONE;
+}
+
+
+Operand::Operand(Register rm) {
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+}
+
+
+bool Operand::is_reg() const {
+ return rm_.is_valid() &&
+ rs_.is(no_reg) &&
+ shift_op_ == LSL &&
+ shift_imm_ == 0;
+}
+
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+ if (pc_offset() > next_buffer_check_) {
+ CheckConstPool(false, true);
+ }
+}
+
+
+void Assembler::emit(Instr x) {
+ CheckBuffer();
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+}
+
+
+Address Assembler::target_address_address_at(Address pc) {
+ Instr instr = Memory::int32_at(pc);
+ // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+ ASSERT((instr & 0x0f7f0000) == 0x051f0000);
+ int offset = instr & 0xfff; // offset_12 is unsigned
+ if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
+ // Verify that the constant pool comes after the instruction referencing it.
+ ASSERT(offset >= -4);
+ return pc + offset + 8;
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+ return Memory::Address_at(target_address_address_at(pc));
+}
+
+
+void Assembler::set_target_address_at(Address pc, Address target) {
+ Memory::Address_at(target_address_address_at(pc)) = target;
+ // Intuitively, we would think it is necessary to flush the instruction cache
+ // after patching a target address in the code as follows:
+ // CPU::FlushICache(pc, sizeof(target));
+ // However, on ARM, no instruction was actually patched by the assignment
+ // above; the target address is not part of an instruction, it is patched in
+ // the constant pool and is read via a data access; the instruction accessing
+ // this address in the constant pool remains unchanged.
+}
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
diff --git a/V8Binding/v8/src/arm/assembler-arm.cc b/V8Binding/v8/src/arm/assembler-arm.cc
new file mode 100644
index 0000000..6ec8f46
--- /dev/null
+++ b/V8Binding/v8/src/arm/assembler-arm.cc
@@ -0,0 +1,1474 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+#include "v8.h"
+
+#include "arm/assembler-arm-inl.h"
+#include "serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and CRegister
+
+Register no_reg = { -1 };
+
+Register r0 = { 0 };
+Register r1 = { 1 };
+Register r2 = { 2 };
+Register r3 = { 3 };
+Register r4 = { 4 };
+Register r5 = { 5 };
+Register r6 = { 6 };
+Register r7 = { 7 };
+Register r8 = { 8 };
+Register r9 = { 9 };
+Register r10 = { 10 };
+Register fp = { 11 };
+Register ip = { 12 };
+Register sp = { 13 };
+Register lr = { 14 };
+Register pc = { 15 };
+
+
+CRegister no_creg = { -1 };
+
+CRegister cr0 = { 0 };
+CRegister cr1 = { 1 };
+CRegister cr2 = { 2 };
+CRegister cr3 = { 3 };
+CRegister cr4 = { 4 };
+CRegister cr5 = { 5 };
+CRegister cr6 = { 6 };
+CRegister cr7 = { 7 };
+CRegister cr8 = { 8 };
+CRegister cr9 = { 9 };
+CRegister cr10 = { 10 };
+CRegister cr11 = { 11 };
+CRegister cr12 = { 12 };
+CRegister cr13 = { 13 };
+CRegister cr14 = { 14 };
+CRegister cr15 = { 15 };
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 0;
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+ // Patch the code at the current address with the supplied instructions.
+ UNIMPLEMENTED();
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+ // Patch the code at the current address with a call to the target.
+ UNIMPLEMENTED();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-arm-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+ rm_ = no_reg;
+ // Verify all Objects referred by code are NOT in new space.
+ Object* obj = *handle;
+ ASSERT(!Heap::InNewSpace(obj));
+ if (obj->IsHeapObject()) {
+ imm32_ = reinterpret_cast<intptr_t>(handle.location());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+ } else {
+ // no relocation needed
+ imm32_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
+ ASSERT(is_uint5(shift_imm));
+ ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ if (shift_op == RRX) {
+ // encoded as ROR with shift_imm == 0
+ ASSERT(shift_imm == 0);
+ shift_op_ = ROR;
+ shift_imm_ = 0;
+ }
+}
+
+
+Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
+ ASSERT(shift_op != RRX);
+ rm_ = rm;
+ rs_ = no_reg;
+ shift_op_ = shift_op;
+ rs_ = rs;
+}
+
+
+MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
+ rn_ = rn;
+ rm_ = no_reg;
+ offset_ = offset;
+ am_ = am;
+}
+
+MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = LSL;
+ shift_imm_ = 0;
+ am_ = am;
+}
+
+
+MemOperand::MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am) {
+ ASSERT(is_uint5(shift_imm));
+ rn_ = rn;
+ rm_ = rm;
+ shift_op_ = shift_op;
+ shift_imm_ = shift_imm & 31;
+ am_ = am;
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Assembler
+
+// Instruction encoding bits
+enum {
+ H = 1 << 5, // halfword (or byte)
+ S6 = 1 << 6, // signed (or unsigned)
+ L = 1 << 20, // load (or store)
+ S = 1 << 20, // set condition code (or leave unchanged)
+ W = 1 << 21, // writeback base register (or leave unchanged)
+ A = 1 << 21, // accumulate in multiply instruction (or not)
+ B = 1 << 22, // unsigned byte (or word)
+ N = 1 << 22, // long (or short)
+ U = 1 << 23, // positive (or negative) offset/index
+ P = 1 << 24, // offset/pre-indexed addressing (or post-indexed addressing)
+ I = 1 << 25, // immediate shifter operand (or not)
+
+ B4 = 1 << 4,
+ B5 = 1 << 5,
+ B7 = 1 << 7,
+ B8 = 1 << 8,
+ B12 = 1 << 12,
+ B16 = 1 << 16,
+ B20 = 1 << 20,
+ B21 = 1 << 21,
+ B22 = 1 << 22,
+ B23 = 1 << 23,
+ B24 = 1 << 24,
+ B25 = 1 << 25,
+ B26 = 1 << 26,
+ B27 = 1 << 27,
+
+ // Instruction bit masks
+ RdMask = 15 << 12, // in str instruction
+ CondMask = 15 << 28,
+ CoprocessorMask = 15 << 8,
+ OpCodeMask = 15 << 21, // in data-processing instructions
+ Imm24Mask = (1 << 24) - 1,
+ Off12Mask = (1 << 12) - 1,
+ // Reserved condition
+ nv = 15 << 28
+};
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+static const Instr kPopInstruction =
+ al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+static const Instr kPushRegPattern =
+ al | B26 | 4 | NegPreIndex | sp.code() * B16;
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+static const Instr kPopRegPattern =
+ al | B26 | L | 4 | PostIndex | sp.code() * B16;
+
+// spare_buffer_
+static const int kMinimalBufferSize = 4*KB;
+static byte* spare_buffer_ = NULL;
+
+Assembler::Assembler(void* buffer, int buffer_size) {
+ if (buffer == NULL) {
+ // do our own buffer management
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (spare_buffer_ != NULL) {
+ buffer = spare_buffer_;
+ spare_buffer_ = NULL;
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // use externally provided buffer instead
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // setup buffer pointers
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
+ num_prinfo_ = 0;
+ next_buffer_check_ = 0;
+ no_const_pool_before_ = 0;
+ last_const_pool_end_ = 0;
+ last_bound_pos_ = 0;
+ current_statement_position_ = RelocInfo::kNoPosition;
+ current_position_ = RelocInfo::kNoPosition;
+ written_statement_position_ = current_statement_position_;
+ written_position_ = current_position_;
+}
+
+
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
+ spare_buffer_ = buffer_;
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+ // emit constant pool if necessary
+ CheckConstPool(true, false);
+ ASSERT(num_prinfo_ == 0);
+
+ // setup desc
+ desc->buffer = buffer_;
+ desc->buffer_size = buffer_size_;
+ desc->instr_size = pc_offset();
+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+}
+
+
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+
+int Assembler::target_at(int pos) {
+ Instr instr = instr_at(pos);
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ int imm26 = ((instr & Imm24Mask) << 8) >> 6;
+ if ((instr & CondMask) == nv && (instr & B24) != 0)
+ // blx uses bit 24 to encode bit 2 of imm26
+ imm26 += 2;
+
+ return pos + 8 + imm26;
+}
+
+
+void Assembler::target_at_put(int pos, int target_pos) {
+ int imm26 = target_pos - pos - 8;
+ Instr instr = instr_at(pos);
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24
+ if ((instr & CondMask) == nv) {
+ // blx uses bit 24 to encode bit 2 of imm26
+ ASSERT((imm26 & 1) == 0);
+ instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
+ } else {
+ ASSERT((imm26 & 3) == 0);
+ instr &= ~Imm24Mask;
+ }
+ int imm24 = imm26 >> 2;
+ ASSERT(is_int24(imm24));
+ instr_at_put(pos, instr | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::print(Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l = *L;
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx
+ int cond = instr & CondMask;
+ const char* b;
+ const char* c;
+ if (cond == nv) {
+ b = "blx";
+ c = "";
+ } else {
+ if ((instr & B24) != 0)
+ b = "bl";
+ else
+ b = "b";
+
+ switch (cond) {
+ case eq: c = "eq"; break;
+ case ne: c = "ne"; break;
+ case hs: c = "hs"; break;
+ case lo: c = "lo"; break;
+ case mi: c = "mi"; break;
+ case pl: c = "pl"; break;
+ case vs: c = "vs"; break;
+ case vc: c = "vc"; break;
+ case hi: c = "hi"; break;
+ case ls: c = "ls"; break;
+ case ge: c = "ge"; break;
+ case lt: c = "lt"; break;
+ case gt: c = "gt"; break;
+ case le: c = "le"; break;
+ case al: c = ""; break;
+ default:
+ c = "";
+ UNREACHABLE();
+ }
+ }
+ PrintF("%s%s\n", b, c);
+ next(&l);
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ next(L); // call next before overwriting link with target at fixup_pos
+ target_at_put(fixup_pos, pos);
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_)
+ last_bound_pos_ = pos;
+}
+
+
+void Assembler::link_to(Label* L, Label* appendix) {
+ if (appendix->is_linked()) {
+ if (L->is_linked()) {
+ // append appendix to L's list
+ int fixup_pos;
+ int link = L->pos();
+ do {
+ fixup_pos = link;
+ link = target_at(fixup_pos);
+ } while (link > 0);
+ ASSERT(link == kEndOfChain);
+ target_at_put(fixup_pos, appendix->pos());
+ } else {
+ // L is empty, simply use appendix
+ *L = *appendix;
+ }
+ }
+ appendix->Unuse(); // appendix should not be used anymore
+}
+
+
+void Assembler::bind(Label* L) {
+ ASSERT(!L->is_bound()); // label can only be bound once
+ bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+ ASSERT(L->is_linked());
+ int link = target_at(L->pos());
+ if (link > 0) {
+ L->link_to(link);
+ } else {
+ ASSERT(link == kEndOfChain);
+ L->Unuse();
+ }
+}
+
+
+// Low-level code emission routines depending on the addressing mode
+static bool fits_shifter(uint32_t imm32,
+ uint32_t* rotate_imm,
+ uint32_t* immed_8,
+ Instr* instr) {
+ // imm32 must be unsigned
+ for (int rot = 0; rot < 16; rot++) {
+ uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
+ if ((imm8 <= 0xff)) {
+ *rotate_imm = rot;
+ *immed_8 = imm8;
+ return true;
+ }
+ }
+ // if the opcode is mov or mvn and if ~imm32 fits, change the opcode
+ if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
+ if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
+ *instr ^= 0x2*B21;
+ return true;
+ }
+ }
+ return false;
+}
+
+
+void Assembler::addrmod1(Instr instr,
+ Register rn,
+ Register rd,
+ const Operand& x) {
+ CheckBuffer();
+ ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
+ if (!x.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if ((x.rmode_ != RelocInfo::NONE &&
+ x.rmode_ != RelocInfo::EXTERNAL_REFERENCE) ||
+ !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
+ // The immediate operand cannot be encoded as a shifter operand, so load
+ // it first to register ip and change the original instruction to use ip.
+ // However, if the original instruction is a 'mov rd, x' (not setting the
+ // condition code), then replace it with a 'ldr rd, [pc]'
+ RecordRelocInfo(x.rmode_, x.imm32_);
+ CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
+ Condition cond = static_cast<Condition>(instr & CondMask);
+ if ((instr & ~CondMask) == 13*B21) { // mov, S not set
+ ldr(rd, MemOperand(pc, 0), cond);
+ } else {
+ ldr(ip, MemOperand(pc, 0), cond);
+ addrmod1(instr, rn, rd, Operand(ip));
+ }
+ return;
+ }
+ instr |= I | rotate_imm*B8 | immed_8;
+ } else if (!x.rs_.is_valid()) {
+ // immediate shift
+ instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ } else {
+ // register shift
+ ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
+ instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
+ }
+ emit(instr | rn.code()*B16 | rd.code()*B12);
+ if (rn.is(pc) || x.rm_.is(pc))
+ // block constant pool emission for one instruction after reading pc
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+}
+
+
+void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | B | L)) == B26);
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_12 = x.offset_;
+ if (offset_12 < 0) {
+ offset_12 = -offset_12;
+ am ^= U;
+ }
+ if (!is_uint12(offset_12)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_12 >= 0); // no masking needed
+ instr |= offset_12;
+ } else {
+ // register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized
+ ASSERT(!x.rm_.is(pc));
+ instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
+ ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
+ ASSERT(x.rn_.is_valid());
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ int offset_8 = x.offset_;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ if (!is_uint8(offset_8)) {
+ // immediate offset cannot be encoded, load it first to register ip
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.offset_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ }
+ ASSERT(offset_8 >= 0); // no masking needed
+ instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
+ } else if (x.shift_imm_ != 0) {
+ // scaled register offset not supported, load index first
+ // rn (and rd in a load) should never be ip, or will be trashed
+ ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+ mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
+ static_cast<Condition>(instr & CondMask));
+ addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
+ return;
+ } else {
+ // register offset
+ ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
+ instr |= x.rm_.code();
+ }
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+ emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
+}
+
+
+void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
+ ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
+ ASSERT(rl != 0);
+ ASSERT(!rn.is(pc));
+ emit(instr | rn.code()*B16 | rl);
+}
+
+
+void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
+ // unindexed addressing is not encoded by this function
+ ASSERT_EQ((B27 | B26),
+ (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
+ ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
+ int am = x.am_;
+ int offset_8 = x.offset_;
+ ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset
+ offset_8 >>= 2;
+ if (offset_8 < 0) {
+ offset_8 = -offset_8;
+ am ^= U;
+ }
+ ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
+ ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
+
+ // post-indexed addressing requires W == 1; different than in addrmod2/3
+ if ((am & P) == 0)
+ am |= W;
+
+ ASSERT(offset_8 >= 0); // no masking needed
+ emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
+}
+
+
+int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link
+ } else {
+ target_pos = kEndOfChain;
+ }
+ L->link_to(pc_offset());
+ }
+
+ // Block the emission of the constant pool, since the branch instruction must
+ // be emitted at the pc offset recorded by the label
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+
+ return target_pos - pc_offset() - 8;
+}
+
+
+// Branch instructions
+void Assembler::b(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | (imm24 & Imm24Mask));
+
+ if (cond == al)
+ // dead code is a good location to emit the constant pool
+ CheckConstPool(false, false);
+}
+
+
+void Assembler::bl(int branch_offset, Condition cond) {
+ ASSERT((branch_offset & 3) == 0);
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(int branch_offset) { // v5 and above
+ ASSERT((branch_offset & 1) == 0);
+ int h = ((branch_offset & 2) >> 1)*B24;
+ int imm24 = branch_offset >> 2;
+ ASSERT(is_int24(imm24));
+ emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
+}
+
+
+void Assembler::blx(Register target, Condition cond) { // v5 and above
+ ASSERT(!target.is(pc));
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
+}
+
+
+void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
+ ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
+ emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
+}
+
+
+// Data-processing instructions
+void Assembler::and_(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 0*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::eor(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 1*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sub(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 2*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsb(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 3*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::add(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 4*B21 | s, src1, dst, src2);
+
+ // Eliminate pattern: push(r), pop()
+ // str(src, MemOperand(sp, 4, NegPreIndex), al);
+ // add(sp, sp, Operand(kPointerSize));
+ // Both instructions can be eliminated.
+ int pattern_size = 2 * kInstrSize;
+ if (FLAG_push_pop_elimination &&
+ last_bound_pos_ <= (pc_offset() - pattern_size) &&
+ reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // pattern
+ instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
+ (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
+ pc_ -= 2 * kInstrSize;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::adc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 5*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::sbc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 6*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::rsc(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 7*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 8*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 9*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 10*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
+ addrmod1(cond | 11*B21 | S, src1, r0, src2);
+}
+
+
+void Assembler::orr(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 12*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
+ addrmod1(cond | 13*B21 | s, r0, dst, src);
+}
+
+
+void Assembler::bic(Register dst, Register src1, const Operand& src2,
+ SBit s, Condition cond) {
+ addrmod1(cond | 14*B21 | s, src1, dst, src2);
+}
+
+
+void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
+ addrmod1(cond | 15*B21 | s, r0, dst, src);
+}
+
+
+// Multiply instructions
+void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+ ASSERT(!dst.is(src1));
+ emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::mul(Register dst, Register src1, Register src2,
+ SBit s, Condition cond) {
+ ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dst.is(src1));
+ emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::smull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umlal(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+void Assembler::umull(Register dstL,
+ Register dstH,
+ Register src1,
+ Register src2,
+ SBit s,
+ Condition cond) {
+ ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+ ASSERT(!dstL.is(dstH) && !dstH.is(src1) && !src1.is(dstL));
+ emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
+ src2.code()*B8 | B7 | B4 | src1.code());
+}
+
+
+// Miscellaneous arithmetic instructions
+void Assembler::clz(Register dst, Register src, Condition cond) {
+ // v5 and above.
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
+ 15*B8 | B4 | src.code());
+}
+
+
+// Status register access instructions
+void Assembler::mrs(Register dst, SRegister s, Condition cond) {
+ ASSERT(!dst.is(pc));
+ emit(cond | B24 | s | 15*B16 | dst.code()*B12);
+}
+
+
+void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
+ Condition cond) {
+ ASSERT(fields >= B16 && fields < B20); // at least one field set
+ Instr instr;
+ if (!src.rm_.is_valid()) {
+ // immediate
+ uint32_t rotate_imm;
+ uint32_t immed_8;
+ if ((src.rmode_ != RelocInfo::NONE &&
+ src.rmode_ != RelocInfo::EXTERNAL_REFERENCE)||
+ !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
+ // immediate operand cannot be encoded, load it first to register ip
+ RecordRelocInfo(src.rmode_, src.imm32_);
+ ldr(ip, MemOperand(pc, 0), cond);
+ msr(fields, Operand(ip), cond);
+ return;
+ }
+ instr = I | rotate_imm*B8 | immed_8;
+ } else {
+ ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
+ instr = src.rm_.code();
+ }
+ emit(cond | instr | B24 | B21 | fields | 15*B12);
+}
+
+
+// Load/Store instructions
+void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
+ addrmod2(cond | B26 | L, dst, src);
+
+ // Eliminate pattern: push(r), pop(r)
+ // str(r, MemOperand(sp, 4, NegPreIndex), al)
+ // ldr(r, MemOperand(sp, 4, PostIndex), al)
+ // Both instructions can be eliminated.
+ int pattern_size = 2 * kInstrSize;
+ if (FLAG_push_pop_elimination &&
+ last_bound_pos_ <= (pc_offset() - pattern_size) &&
+ reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ // pattern
+ instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
+ instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
+ pc_ -= 2 * kInstrSize;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26, src, dst);
+
+ // Eliminate pattern: pop(), push(r)
+ // add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
+ // -> str r, [sp, 0], al
+ int pattern_size = 2 * kInstrSize;
+ if (FLAG_push_pop_elimination &&
+ last_bound_pos_ <= (pc_offset() - pattern_size) &&
+ reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
+ instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
+ instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
+ pc_ -= 2 * kInstrSize;
+ emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod2(cond | B26 | B | L, dst, src);
+}
+
+
+void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
+ addrmod2(cond | B26 | B, src, dst);
+}
+
+
+void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | H | B4, dst, src);
+}
+
+
+void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
+ addrmod3(cond | B7 | H | B4, src, dst);
+}
+
+
+void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | B4, dst, src);
+}
+
+
+void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
+ addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
+}
+
+
+// Load/Store multiple instructions
+void Assembler::ldm(BlockAddrMode am,
+ Register base,
+ RegList dst,
+ Condition cond) {
+ // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable
+ ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
+
+ addrmod4(cond | B27 | am | L, base, dst);
+
+ // emit the constant pool after a function return implemented by ldm ..{..pc}
+ if (cond == al && (dst & pc.bit()) != 0) {
+ // There is a slight chance that the ldm instruction was actually a call,
+ // in which case it would be wrong to return into the constant pool; we
+ // recognize this case by checking if the emission of the pool was blocked
+ // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
+ // the case, we emit a jump over the pool.
+ CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
+ }
+}
+
+
+void Assembler::stm(BlockAddrMode am,
+ Register base,
+ RegList src,
+ Condition cond) {
+ addrmod4(cond | B27 | am, base, src);
+}
+
+
+// Semaphore instructions
+void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+void Assembler::swpb(Register dst,
+ Register src,
+ Register base,
+ Condition cond) {
+ ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
+ ASSERT(!dst.is(base) && !src.is(base));
+ emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
+ B7 | B4 | src.code());
+}
+
+
+// Exception-generating instructions and debugging support
+void Assembler::stop(const char* msg) {
+#if !defined(__arm__)
+ // The simulator handles these special instructions and stops execution.
+ emit(15 << 28 | ((intptr_t) msg));
+#else
+ // Just issue a simple break instruction for now. Alternatively we could use
+ // the swi(0x9f0001) instruction on Linux.
+ bkpt(0);
+#endif
+}
+
+
+void Assembler::bkpt(uint32_t imm16) { // v5 and above
+ ASSERT(is_uint16(imm16));
+ emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
+}
+
+
+void Assembler::swi(uint32_t imm24, Condition cond) {
+ ASSERT(is_uint24(imm24));
+ emit(cond | 15*B24 | imm24);
+}
+
+
+// Coprocessor instructions
+void Assembler::cdp(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
+ crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
+}
+
+
+void Assembler::cdp2(Coprocessor coproc,
+ int opcode_1,
+ CRegister crd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mcr(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mcr2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::mrc(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2,
+ Condition cond) {
+ ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+ emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
+ rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
+}
+
+
+void Assembler::mrc2(Coprocessor coproc,
+ int opcode_1,
+ Register rd,
+ CRegister crn,
+ CRegister crm,
+ int opcode_2) { // v5 and above
+ mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
+}
+
+
+void Assembler::ldc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& src,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, src, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::ldc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ const MemOperand& dst,
+ LFlag l,
+ Condition cond) {
+ addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
+}
+
+
+void Assembler::stc(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l,
+ Condition cond) {
+ // unindexed addressing
+ ASSERT(is_uint8(option));
+ emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
+ coproc*B8 | (option & 255));
+}
+
+
+void Assembler::stc2(Coprocessor
+ coproc, CRegister crd,
+ const MemOperand& dst,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, dst, l, static_cast<Condition>(nv));
+}
+
+
+void Assembler::stc2(Coprocessor coproc,
+ CRegister crd,
+ Register rn,
+ int option,
+ LFlag l) { // v5 and above
+ stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
+}
+
+
+// Pseudo instructions
+void Assembler::lea(Register dst,
+ const MemOperand& x,
+ SBit s,
+ Condition cond) {
+ int am = x.am_;
+ if (!x.rm_.is_valid()) {
+ // immediate offset
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.offset_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.offset_), s, cond);
+ } else {
+ // Register offset (shift_imm_ and shift_op_ are 0) or scaled
+ // register offset the constructors make sure than both shift_imm_
+ // and shift_op_ are initialized.
+ ASSERT(!x.rm_.is(pc));
+ if ((am & P) == 0) // post indexing
+ mov(dst, Operand(x.rn_), s, cond);
+ else if ((am & U) == 0) // negative indexing
+ sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ else
+ add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
+ }
+}
+
+
+// Debugging
+void Assembler::RecordComment(const char* msg) {
+ if (FLAG_debug_code) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+ }
+}
+
+
+void Assembler::RecordPosition(int pos) {
+ if (pos == RelocInfo::kNoPosition) return;
+ ASSERT(pos >= 0);
+ current_position_ = pos;
+ WriteRecordedPositions();
+}
+
+
+void Assembler::RecordStatementPosition(int pos) {
+ if (pos == RelocInfo::kNoPosition) return;
+ ASSERT(pos >= 0);
+ current_statement_position_ = pos;
+ WriteRecordedPositions();
+}
+
+
+void Assembler::WriteRecordedPositions() {
+ // Write the statement position if it is different from what was written last
+ // time.
+ if (current_statement_position_ != written_statement_position_) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
+ written_statement_position_ = current_statement_position_;
+ }
+
+ // Write the position if it is different from what was written last time and
+ // also different from the written statement position.
+ if (current_position_ != written_position_ &&
+ current_position_ != written_statement_position_) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::POSITION, current_position_);
+ written_position_ = current_position_;
+ }
+}
+
+
+void Assembler::GrowBuffer() {
+ if (!own_buffer_) FATAL("external code buffer is too small");
+
+ // compute new buffer size
+ CodeDesc desc; // the new buffer
+ if (buffer_size_ < 4*KB) {
+ desc.buffer_size = 4*KB;
+ } else if (buffer_size_ < 1*MB) {
+ desc.buffer_size = 2*buffer_size_;
+ } else {
+ desc.buffer_size = buffer_size_ + 1*MB;
+ }
+ CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // setup new buffer
+ desc.buffer = NewArray<byte>(desc.buffer_size);
+
+ desc.instr_size = pc_offset();
+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+ // copy the data
+ int pc_delta = desc.buffer - buffer_;
+ int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ memmove(desc.buffer, buffer_, desc.instr_size);
+ memmove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
+
+ // switch buffers
+ DeleteArray(buffer_);
+ buffer_ = desc.buffer;
+ buffer_size_ = desc.buffer_size;
+ pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // none of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries
+
+ // relocate pending relocation entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION);
+ rinfo.set_pc(rinfo.pc() + pc_delta);
+ }
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
+ if (rmode >= RelocInfo::COMMENT && rmode <= RelocInfo::STATEMENT_POSITION) {
+ // adjust code for new modes
+ ASSERT(RelocInfo::IsComment(rmode) || RelocInfo::IsPosition(rmode));
+ // these modes do not need an entry in the constant pool
+ } else {
+ ASSERT(num_prinfo_ < kMaxNumPRInfo);
+ prinfo_[num_prinfo_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info
+ BlockConstPoolBefore(pc_offset() + kInstrSize);
+ }
+ if (rinfo.rmode() != RelocInfo::NONE) {
+ // Don't record external references unless the heap will be serialized.
+ if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+ !Serializer::enabled() &&
+ !FLAG_debug_code) {
+ return;
+ }
+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ reloc_info_writer.Write(&rinfo);
+ }
+}
+
+
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Calculate the offset of the next check. It will be overwritten
+ // when a const pool is generated or when const pools are being
+ // blocked for a specific range.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+
+ // There is nothing to do if there are no pending relocation info entries
+ if (num_prinfo_ == 0) return;
+
+ // We emit a constant pool at regular intervals of about kDistBetweenPools
+ // or when requested by parameter force_emit (e.g. after each function).
+ // We prefer not to emit a jump unless the max distance is reached or if we
+ // are running low on slots, which can happen if a lot of constants are being
+ // emitted (e.g. --debug-code and many static references).
+ int dist = pc_offset() - last_const_pool_end_;
+ if (!force_emit && dist < kMaxDistBetweenPools &&
+ (require_jump || dist < kDistBetweenPools) &&
+ // TODO(1236125): Cleanup the "magic" number below. We know that
+ // the code generation will test every kCheckConstIntervalInst.
+ // Thus we are safe as long as we generate less than 7 constant
+ // entries per instruction.
+ (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
+ return;
+ }
+
+ // If we did not return by now, we need to emit the constant pool soon.
+
+ // However, some small sequences of instructions must not be broken up by the
+ // insertion of a constant pool; such sequences are protected by setting
+ // no_const_pool_before_, which is checked here. Also, recursive calls to
+ // CheckConstPool are blocked by no_const_pool_before_.
+ if (pc_offset() < no_const_pool_before_) {
+ // Emission is currently blocked; make sure we try again as soon as possible
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Something is wrong if emission is forced and blocked at the same time
+ ASSERT(!force_emit);
+ return;
+ }
+
+ int jump_instr = require_jump ? kInstrSize : 0;
+
+ // Check that the code buffer is large enough before emitting the constant
+ // pool and relocation information (include the jump over the pool and the
+ // constant pool marker).
+ int max_needed_space =
+ jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
+ while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+
+ // Block recursive calls to CheckConstPool
+ BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
+ num_prinfo_*kInstrSize);
+ // Don't bother to check for the emit calls below.
+ next_buffer_check_ = no_const_pool_before_;
+
+ // Emit jump over constant pool if necessary
+ Label after_pool;
+ if (require_jump) b(&after_pool);
+
+ RecordComment("[ Constant Pool");
+
+ // Put down constant pool marker
+ // "Undefined instruction" as specified by A3.1 Instruction set encoding
+ emit(0x03000000 | num_prinfo_);
+
+ // Emit constant pool entries
+ for (int i = 0; i < num_prinfo_; i++) {
+ RelocInfo& rinfo = prinfo_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+ Instr instr = instr_at(rinfo.pc());
+ // Instruction to patch must be a ldr/str [pc, #offset]
+ // P and U set, B and W clear, Rn == pc, offset12 still 0
+ ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
+ (2*B25 | P | U | pc.code()*B16));
+ int delta = pc_ - rinfo.pc() - 8;
+ ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
+ if (delta < 0) {
+ instr &= ~U;
+ delta = -delta;
+ }
+ ASSERT(is_uint12(delta));
+ instr_at_put(rinfo.pc(), instr + delta);
+ emit(rinfo.data());
+ }
+ num_prinfo_ = 0;
+ last_const_pool_end_ = pc_offset();
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
+
+ // Since a constant pool was just emitted, move the check offset forward by
+ // the standard interval.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+}
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/assembler-arm.h b/V8Binding/v8/src/arm/assembler-arm.h
new file mode 100644
index 0000000..eeab4a7
--- /dev/null
+++ b/V8Binding/v8/src/arm/assembler-arm.h
@@ -0,0 +1,788 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+
+// A light-weight ARM Assembler
+// Generates user mode instructions for the ARM architecture up to version 5
+
+#ifndef V8_ARM_ASSEMBLER_ARM_H_
+#define V8_ARM_ASSEMBLER_ARM_H_
+
+#include "assembler.h"
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+//
+// Core register
+struct Register {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(Register reg) const { return code_ == reg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern Register no_reg;
+extern Register r0;
+extern Register r1;
+extern Register r2;
+extern Register r3;
+extern Register r4;
+extern Register r5;
+extern Register r6;
+extern Register r7;
+extern Register r8;
+extern Register r9;
+extern Register r10;
+extern Register fp;
+extern Register ip;
+extern Register sp;
+extern Register lr;
+extern Register pc;
+
+
+// Coprocessor register
+struct CRegister {
+ bool is_valid() const { return 0 <= code_ && code_ < 16; }
+ bool is(CRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+
+ // (unfortunately we can't make this private in a struct)
+ int code_;
+};
+
+
+extern CRegister no_creg;
+extern CRegister cr0;
+extern CRegister cr1;
+extern CRegister cr2;
+extern CRegister cr3;
+extern CRegister cr4;
+extern CRegister cr5;
+extern CRegister cr6;
+extern CRegister cr7;
+extern CRegister cr8;
+extern CRegister cr9;
+extern CRegister cr10;
+extern CRegister cr11;
+extern CRegister cr12;
+extern CRegister cr13;
+extern CRegister cr14;
+extern CRegister cr15;
+
+
+// Coprocessor number
+enum Coprocessor {
+ p0 = 0,
+ p1 = 1,
+ p2 = 2,
+ p3 = 3,
+ p4 = 4,
+ p5 = 5,
+ p6 = 6,
+ p7 = 7,
+ p8 = 8,
+ p9 = 9,
+ p10 = 10,
+ p11 = 11,
+ p12 = 12,
+ p13 = 13,
+ p14 = 14,
+ p15 = 15
+};
+
+
+// Condition field in instructions
+enum Condition {
+ eq = 0 << 28, // Z set equal.
+ ne = 1 << 28, // Z clear not equal.
+ cs = 2 << 28, // C set unsigned higher or same.
+ hs = 2 << 28, // C set unsigned higher or same.
+ cc = 3 << 28, // C clear unsigned lower.
+ lo = 3 << 28, // C clear unsigned lower.
+ mi = 4 << 28, // N set negative.
+ pl = 5 << 28, // N clear positive or zero.
+ vs = 6 << 28, // V set overflow.
+ vc = 7 << 28, // V clear no overflow.
+ hi = 8 << 28, // C set, Z clear unsigned higher.
+ ls = 9 << 28, // C clear or Z set unsigned lower or same.
+ ge = 10 << 28, // N == V greater or equal.
+ lt = 11 << 28, // N != V less than.
+ gt = 12 << 28, // Z clear, N == V greater than.
+ le = 13 << 28, // Z set or N != V less then or equal
+ al = 14 << 28 // always.
+};
+
+
+// Returns the equivalent of !cc.
+INLINE(Condition NegateCondition(Condition cc));
+
+
+// Corresponds to transposing the operands of a comparison.
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case lo:
+ return hi;
+ case hi:
+ return lo;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case lt:
+ return gt;
+ case gt:
+ return lt;
+ case ge:
+ return le;
+ case le:
+ return ge;
+ default:
+ return cc;
+ };
+}
+
+
+// Branch hints are not used on the ARM. They are defined so that they can
+// appear in shared function signatures, but will be ignored in ARM
+// implementations.
+enum Hint { no_hint };
+
+// Hints are not used on the arm. Negating is trivial.
+inline Hint NegateHint(Hint ignored) { return no_hint; }
+
+
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants
+
+// Shifter operand shift operation
+enum ShiftOp {
+ LSL = 0 << 5,
+ LSR = 1 << 5,
+ ASR = 2 << 5,
+ ROR = 3 << 5,
+ RRX = -1
+};
+
+
+// Condition code updating mode
+enum SBit {
+ SetCC = 1 << 20, // set condition code
+ LeaveCC = 0 << 20 // leave condition code unchanged
+};
+
+
+// Status register selection
+enum SRegister {
+ CPSR = 0 << 22,
+ SPSR = 1 << 22
+};
+
+
+// Status register fields
+enum SRegisterField {
+ CPSR_c = CPSR | 1 << 16,
+ CPSR_x = CPSR | 1 << 17,
+ CPSR_s = CPSR | 1 << 18,
+ CPSR_f = CPSR | 1 << 19,
+ SPSR_c = SPSR | 1 << 16,
+ SPSR_x = SPSR | 1 << 17,
+ SPSR_s = SPSR | 1 << 18,
+ SPSR_f = SPSR | 1 << 19
+};
+
+// Status register field mask (or'ed SRegisterField enum values)
+typedef uint32_t SRegisterFieldMask;
+
+
+// Memory operand addressing mode
+enum AddrMode {
+ // bit encoding P U W
+ Offset = (8|4|0) << 21, // offset (without writeback to base)
+ PreIndex = (8|4|1) << 21, // pre-indexed addressing with writeback
+ PostIndex = (0|4|0) << 21, // post-indexed addressing with writeback
+ NegOffset = (8|0|0) << 21, // negative offset (without writeback to base)
+ NegPreIndex = (8|0|1) << 21, // negative pre-indexed with writeback
+ NegPostIndex = (0|0|0) << 21 // negative post-indexed with writeback
+};
+
+
+// Load/store multiple addressing mode
+enum BlockAddrMode {
+ // bit encoding P U W
+ da = (0|0|0) << 21, // decrement after
+ ia = (0|4|0) << 21, // increment after
+ db = (8|0|0) << 21, // decrement before
+ ib = (8|4|0) << 21, // increment before
+ da_w = (0|0|1) << 21, // decrement after with writeback to base
+ ia_w = (0|4|1) << 21, // increment after with writeback to base
+ db_w = (8|0|1) << 21, // decrement before with writeback to base
+ ib_w = (8|4|1) << 21 // increment before with writeback to base
+};
+
+
+// Coprocessor load/store operand size
+enum LFlag {
+ Long = 1 << 22, // long load/store coprocessor
+ Short = 0 << 22 // short load/store coprocessor
+};
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+// Class Operand represents a shifter operand in data processing instructions
+class Operand BASE_EMBEDDED {
+ public:
+ // immediate
+ INLINE(explicit Operand(int32_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE));
+ INLINE(explicit Operand(const ExternalReference& f));
+ INLINE(explicit Operand(const char* s));
+ INLINE(explicit Operand(Object** opp));
+ INLINE(explicit Operand(Context** cpp));
+ explicit Operand(Handle<Object> handle);
+ INLINE(explicit Operand(Smi* value));
+
+ // rm
+ INLINE(explicit Operand(Register rm));
+
+ // rm <shift_op> shift_imm
+ explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
+
+ // rm <shift_op> rs
+ explicit Operand(Register rm, ShiftOp shift_op, Register rs);
+
+ // Return true if this is a register operand.
+ INLINE(bool is_reg() const);
+
+ Register rm() const { return rm_; }
+
+ private:
+ Register rm_;
+ Register rs_;
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ int32_t imm32_; // valid if rm_ == no_reg
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+};
+
+
+// Class MemOperand represents a memory operand in load and store instructions
+class MemOperand BASE_EMBEDDED {
+ public:
+ // [rn +/- offset] Offset/NegOffset
+ // [rn +/- offset]! PreIndex/NegPreIndex
+ // [rn], +/- offset PostIndex/NegPostIndex
+ // offset is any signed 32-bit value; offset is first loaded to register ip if
+ // it does not fit the addressing mode (12-bit unsigned and sign bit)
+ explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
+
+ // [rn +/- rm] Offset/NegOffset
+ // [rn +/- rm]! PreIndex/NegPreIndex
+ // [rn], +/- rm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm, AddrMode am = Offset);
+
+ // [rn +/- rm <shift_op> shift_imm] Offset/NegOffset
+ // [rn +/- rm <shift_op> shift_imm]! PreIndex/NegPreIndex
+ // [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
+ explicit MemOperand(Register rn, Register rm,
+ ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
+
+ private:
+ Register rn_; // base
+ Register rm_; // register offset
+ int32_t offset_; // valid if rm_ == no_reg
+ ShiftOp shift_op_;
+ int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
+ AddrMode am_; // bits P, U, and W
+
+ friend class Assembler;
+};
+
+
+typedef int32_t Instr;
+
+
+class Assembler : public Malloced {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is NULL, the assembler allocates and grows its own
+ // buffer, and buffer_size determines the initial buffer size. The buffer is
+ // owned by the assembler and deallocated upon destruction of the assembler.
+ //
+ // If the provided buffer is not NULL, the assembler uses the provided buffer
+ // for code generation and assumes its size to be buffer_size. If the buffer
+ // is too small, a fatal error occurs. No deallocation of the buffer is done
+ // upon destruction of the assembler.
+ Assembler(void* buffer, int buffer_size);
+ ~Assembler();
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor
+ // desc. GetCode() is idempotent; it returns the same result if no other
+ // Assembler functions are invoked in between GetCode() calls.
+ void GetCode(CodeDesc* desc);
+
+ // Label operations & relative jumps (PPUM Appendix D)
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+
+ void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Returns the branch offset to the given label from the current code position
+ // Links the label to the current position if it is still unbound
+ // Manages the jump elimination optimization if the second parameter is true.
+ int branch_offset(Label* L, bool jump_elimination_allowed);
+
+ // Return the address in the constant pool of the code target address used by
+ // the branch/call instruction at pc.
+ INLINE(static Address target_address_address_at(Address pc));
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ INLINE(static Address target_address_at(Address pc));
+ INLINE(static void set_target_address_at(Address pc, Address target));
+
+ // Distance between the instruction referring to the address of the call
+ // target (ldr pc, [target addr in const pool]) and the return address
+ static const int kTargetAddrToReturnAddrDist = sizeof(Instr);
+
+
+ // ---------------------------------------------------------------------------
+ // Code generation
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+
+ // Branch instructions
+ void b(int branch_offset, Condition cond = al);
+ void bl(int branch_offset, Condition cond = al);
+ void blx(int branch_offset); // v5 and above
+ void blx(Register target, Condition cond = al); // v5 and above
+ void bx(Register target, Condition cond = al); // v5 and above, plus v4t
+
+ // Convenience branch instructions using labels
+ void b(Label* L, Condition cond = al) {
+ b(branch_offset(L, cond == al), cond);
+ }
+ void b(Condition cond, Label* L) { b(branch_offset(L, cond == al), cond); }
+ void bl(Label* L, Condition cond = al) { bl(branch_offset(L, false), cond); }
+ void bl(Condition cond, Label* L) { bl(branch_offset(L, false), cond); }
+ void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
+
+ // Data-processing instructions
+ void and_(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void eor(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sub(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+ void sub(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al) {
+ sub(dst, src1, Operand(src2), s, cond);
+ }
+
+ void rsb(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void add(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void adc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void sbc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void rsc(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void tst(Register src1, const Operand& src2, Condition cond = al);
+ void tst(Register src1, Register src2, Condition cond = al) {
+ tst(src1, Operand(src2), cond);
+ }
+
+ void teq(Register src1, const Operand& src2, Condition cond = al);
+
+ void cmp(Register src1, const Operand& src2, Condition cond = al);
+ void cmp(Register src1, Register src2, Condition cond = al) {
+ cmp(src1, Operand(src2), cond);
+ }
+
+ void cmn(Register src1, const Operand& src2, Condition cond = al);
+
+ void orr(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+ void orr(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al) {
+ orr(dst, src1, Operand(src2), s, cond);
+ }
+
+ void mov(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+ void mov(Register dst, Register src, SBit s = LeaveCC, Condition cond = al) {
+ mov(dst, Operand(src), s, cond);
+ }
+
+ void bic(Register dst, Register src1, const Operand& src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mvn(Register dst, const Operand& src,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Multiply instructions
+
+ void mla(Register dst, Register src1, Register src2, Register srcA,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void mul(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void smull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umlal(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ void umull(Register dstL, Register dstH, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Miscellaneous arithmetic instructions
+
+ void clz(Register dst, Register src, Condition cond = al); // v5 and above
+
+ // Status register access instructions
+
+ void mrs(Register dst, SRegister s, Condition cond = al);
+ void msr(SRegisterFieldMask fields, const Operand& src, Condition cond = al);
+
+ // Load/Store instructions
+ void ldr(Register dst, const MemOperand& src, Condition cond = al);
+ void str(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrb(Register dst, const MemOperand& src, Condition cond = al);
+ void strb(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrh(Register dst, const MemOperand& src, Condition cond = al);
+ void strh(Register src, const MemOperand& dst, Condition cond = al);
+ void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
+ void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
+
+ // Load/Store multiple instructions
+ void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
+ void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
+
+ // Semaphore instructions
+ void swp(Register dst, Register src, Register base, Condition cond = al);
+ void swpb(Register dst, Register src, Register base, Condition cond = al);
+
+ // Exception-generating instructions and debugging support
+ void stop(const char* msg);
+
+ void bkpt(uint32_t imm16); // v5 and above
+ void swi(uint32_t imm24, Condition cond = al);
+
+ // Coprocessor instructions
+
+ void cdp(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2, Condition cond = al);
+
+ void cdp2(Coprocessor coproc, int opcode_1,
+ CRegister crd, CRegister crn, CRegister crm,
+ int opcode_2); // v5 and above
+
+ void mcr(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mcr2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void mrc(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0, Condition cond = al);
+
+ void mrc2(Coprocessor coproc, int opcode_1,
+ Register rd, CRegister crn, CRegister crm,
+ int opcode_2 = 0); // v5 and above
+
+ void ldc(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short, Condition cond = al);
+ void ldc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
+ LFlag l = Short); // v5 and above
+ void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short, Condition cond = al);
+ void stc(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short, Condition cond = al);
+
+ void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
+ LFlag l = Short); // v5 and above
+ void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
+ LFlag l = Short); // v5 and above
+
+ // Pseudo instructions
+ void nop() { mov(r0, Operand(r0)); }
+
+ void push(Register src, Condition cond = al) {
+ str(src, MemOperand(sp, 4, NegPreIndex), cond);
+ }
+
+ void pop(Register dst) {
+ ldr(dst, MemOperand(sp, 4, PostIndex), al);
+ }
+
+ void pop() {
+ add(sp, sp, Operand(kPointerSize));
+ }
+
+ // Load effective address of memory operand x into register dst
+ void lea(Register dst, const MemOperand& x,
+ SBit s = LeaveCC, Condition cond = al);
+
+ // Jump unconditionally to given label.
+ void jmp(Label* L) { b(L, al); }
+
+
+ // Debugging
+
+ // Record a comment relocation entry that can be used by a disassembler.
+ // Use --debug_code to enable.
+ void RecordComment(const char* msg);
+
+ void RecordPosition(int pos);
+ void RecordStatementPosition(int pos);
+ void WriteRecordedPositions();
+
+ int pc_offset() const { return pc_ - buffer_; }
+ int current_position() const { return current_position_; }
+ int current_statement_position() const { return current_position_; }
+
+ protected:
+ int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Read/patch instructions
+ Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+ void instr_at_put(byte* pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+ }
+
+ // Decode branch instruction at pos and return branch target pos
+ int target_at(int pos);
+
+ // Patch branch instruction at pos to branch to given branch target pos
+ void target_at_put(int pos, int target_pos);
+
+ // Check if is time to emit a constant pool for pending reloc info entries
+ void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Block the emission of the constant pool before pc_offset
+ void BlockConstPoolBefore(int pc_offset) {
+ if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+ }
+
+ private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes
+ static const int kBufferCheckInterval = 1*KB/2;
+ int next_buffer_check_; // pc offset of next buffer check
+
+ // Code generation
+ static const int kInstrSize = sizeof(Instr); // signed size
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static const int kGap = 32;
+ byte* pc_; // the program counter; moves forward
+
+ // Constant pool generation
+ // Pools are emitted in the instruction stream, preferably after unconditional
+ // jumps or after returns from functions (in dead code locations).
+ // If a long code sequence does not contain unconditional jumps, it is
+ // necessary to emit the constant pool before the pool gets too far from the
+ // location it is accessed from. In this case, we emit a jump over the emitted
+ // constant pool.
+ // Constants in the pool may be addresses of functions that gets relocated;
+ // if so, a relocation info entry is associated to the constant pool entry.
+
+ // Repeated checking whether the constant pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated. That also means that the sizing of the buffers is not
+ // an exact science, and that we rely on some slop to not overrun buffers.
+ static const int kCheckConstIntervalInst = 32;
+ static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+
+ // Pools are emitted after function return and in dead code at (more or less)
+ // regular intervals of kDistBetweenPools bytes
+ static const int kDistBetweenPools = 1*KB;
+
+ // Constants in pools are accessed via pc relative addressing, which can
+ // reach +/-4KB thereby defining a maximum distance between the instruction
+ // and the accessed constant. We satisfy this constraint by limiting the
+ // distance between pools.
+ static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
+
+ // Emission of the constant pool may be blocked in some code sequences
+ int no_const_pool_before_; // block emission before this pc offset
+
+ // Keep track of the last emitted pool to guarantee a maximal distance
+ int last_const_pool_end_; // pc offset following the last constant pool
+
+ // Relocation info generation
+ // Each relocation is encoded as a variable size value
+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+ // Relocation info records are also used during code generation as temporary
+ // containers for constants and code target addresses until they are emitted
+ // to the constant pool. These pending relocation info records are temporarily
+ // stored in a separate buffer until a constant pool is emitted.
+ // If every instruction in a long sequence is accessing the pool, we need one
+ // pending relocation entry per instruction.
+ static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+ RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
+ int num_prinfo_; // number of pending reloc info entries in the buffer
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // source position information
+ int current_position_;
+ int current_statement_position_;
+ int written_position_;
+ int written_statement_position_;
+
+ // Code emission
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+
+ // Instruction generation
+ void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
+ void addrmod2(Instr instr, Register rd, const MemOperand& x);
+ void addrmod3(Instr instr, Register rd, const MemOperand& x);
+ void addrmod4(Instr instr, Register rn, RegList rl);
+ void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
+
+ // Labels
+ void print(Label* L);
+ void bind_to(Label* L, int pos);
+ void link_to(Label* L, Label* appendix);
+ void next(Label* L);
+
+ // Record reloc info for current pc_
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/V8Binding/v8/src/arm/builtins-arm.cc b/V8Binding/v8/src/arm/builtins-arm.cc
new file mode 100644
index 0000000..588798b
--- /dev/null
+++ b/V8Binding/v8/src/arm/builtins-arm.cc
@@ -0,0 +1,700 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
+ // TODO(1238487): Don't pass the function in a static variable.
+ __ mov(ip, Operand(ExternalReference::builtin_passed_function()));
+ __ str(r1, MemOperand(ip, 0));
+
+ // The actual argument count has already been loaded into register
+ // r0, but JumpToBuiltin expects r0 to contain the number of
+ // arguments including the receiver.
+ __ add(r0, r0, Operand(1));
+ __ JumpToBuiltin(ExternalReference(id));
+}
+
+
+void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments
+ // -- r1 : constructor function
+ // -- lr : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ Label non_function_call;
+ // Check that the function is not a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &non_function_call);
+ // Check that the function is a JSFunction.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ b(ne, &non_function_call);
+
+ // Enter a construct frame.
+ __ EnterConstructFrame();
+
+ // Preserve the two incoming parameters
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ push(r0); // smi-tagged arguments count
+ __ push(r1); // constructor function
+
+ // Allocate the new receiver object.
+ __ push(r1); // argument for Runtime_NewObject
+ __ CallRuntime(Runtime::kNewObject, 1);
+ __ push(r0); // save the receiver
+
+ // Push the function and the allocated receiver from the stack.
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, kPointerSize));
+ __ push(r1); // function
+ __ push(r0); // receiver
+
+ // Reload the number of arguments from the stack.
+ // r1: constructor function
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ ldr(r3, MemOperand(sp, 4 * kPointerSize));
+
+ // Setup pointer to last argument.
+ __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Setup number of arguments for function call below
+ __ mov(r0, Operand(r3, LSR, kSmiTagSize));
+
+ // Copy arguments and receiver to the expression stack.
+ // r0: number of arguments
+ // r2: address of last argument (caller sp)
+ // r1: constructor function
+ // r3: number of arguments (smi-tagged)
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ Label loop, entry;
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(ip, MemOperand(r2, r3, LSL, kPointerSizeLog2 - 1));
+ __ push(ip);
+ __ bind(&entry);
+ __ sub(r3, r3, Operand(2), SetCC);
+ __ b(ge, &loop);
+
+ // Call the function.
+ // r0: number of arguments
+ // r1: constructor function
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION);
+
+ // Pop the function from the stack.
+ // sp[0]: constructor function
+ // sp[2]: receiver
+ // sp[3]: constructor function
+ // sp[4]: number of arguments (smi-tagged)
+ __ pop();
+
+ // Restore context from the frame.
+ // r0: result
+ // sp[0]: receiver
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, exit;
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ cmp(r3, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(ge, &exit);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp));
+
+ // Remove receiver from the stack, remove caller arguments, and
+ // return.
+ __ bind(&exit);
+ // r0: result
+ // sp[0]: receiver (newly allocated object)
+ // sp[1]: constructor function
+ // sp[2]: number of arguments (smi-tagged)
+ __ ldr(r1, MemOperand(sp, 2 * kPointerSize));
+ __ LeaveConstructFrame();
+ __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
+ __ add(sp, sp, Operand(kPointerSize));
+ __ Jump(lr);
+
+ // r0: number of arguments
+ // r1: called object
+ __ bind(&non_function_call);
+
+ // Set expected number of arguments to zero (not changing r0).
+ __ mov(r2, Operand(0));
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // Called from Generate_JS_Entry
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // r4: argv
+ // r5-r7, cp may be clobbered
+
+ // Clear the context before we push it when entering the JS frame.
+ __ mov(cp, Operand(0));
+
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Setup the context from the function argument.
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Push the function and the receiver onto the stack.
+ __ push(r1);
+ __ push(r2);
+
+ // Copy arguments to the stack in a loop.
+ // r1: function
+ // r3: argc
+ // r4: argv, i.e. points to first arg
+ Label loop, entry;
+ __ add(r2, r4, Operand(r3, LSL, kPointerSizeLog2));
+ // r2 points past last arg.
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(r0, MemOperand(r4, kPointerSize, PostIndex)); // read next parameter
+ __ ldr(r0, MemOperand(r0)); // dereference handle
+ __ push(r0); // push parameter
+ __ bind(&entry);
+ __ cmp(r4, Operand(r2));
+ __ b(ne, &loop);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ mov(r4, Operand(Factory::undefined_value()));
+ __ mov(r5, Operand(r4));
+ __ mov(r6, Operand(r4));
+ __ mov(r7, Operand(r4));
+ if (kR9Available == 1) {
+ __ mov(r9, Operand(r4));
+ }
+
+ // Invoke the code and pass argc as r0.
+ __ mov(r0, Operand(r3));
+ if (is_construct) {
+ __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
+ RelocInfo::CODE_TARGET);
+ } else {
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, actual, CALL_FUNCTION);
+ }
+
+ // Exit the JS frame and remove the parameters (except function), and return.
+ // Respect ABI stack constraint.
+ __ LeaveInternalFrame();
+ __ Jump(lr);
+
+ // r0: result
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+ // 1. Make sure we have at least one argument.
+ // r0: actual number of argument
+ { Label done;
+ __ tst(r0, Operand(r0));
+ __ b(ne, &done);
+ __ mov(r2, Operand(Factory::undefined_value()));
+ __ push(r2);
+ __ add(r0, r0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 2. Get the function to call from the stack.
+ // r0: actual number of argument
+ { Label done, non_function, function;
+ __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &non_function);
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ b(eq, &function);
+
+ // Non-function called: Clear the function to force exception.
+ __ bind(&non_function);
+ __ mov(r1, Operand(0));
+ __ b(&done);
+
+ // Change the context eagerly because it will be used below to get the
+ // right global object.
+ __ bind(&function);
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ __ bind(&done);
+ }
+
+ // 3. Make sure first argument is an object; convert if necessary.
+ // r0: actual number of arguments
+ // r1: function
+ { Label call_to_object, use_global_receiver, patch_receiver, done;
+ __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ ldr(r2, MemOperand(r2, -kPointerSize));
+
+ // r0: actual number of arguments
+ // r1: function
+ // r2: first argument
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(eq, &call_to_object);
+
+ __ mov(r3, Operand(Factory::null_value()));
+ __ cmp(r2, r3);
+ __ b(eq, &use_global_receiver);
+ __ mov(r3, Operand(Factory::undefined_value()));
+ __ cmp(r2, r3);
+ __ b(eq, &use_global_receiver);
+
+ __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ cmp(r3, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, &call_to_object);
+ __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
+ __ b(le, &done);
+
+ __ bind(&call_to_object);
+ __ EnterInternalFrame();
+
+ // Store number of arguments and function across the call into the runtime.
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ push(r0);
+ __ push(r1);
+
+ __ push(r2);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ mov(r2, r0);
+
+ // Restore number of arguments and function.
+ __ pop(r1);
+ __ pop(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+
+ __ LeaveInternalFrame();
+ __ b(&patch_receiver);
+
+ // Use the global receiver object from the called function as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalIndex =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ ldr(r2, FieldMemOperand(cp, kGlobalIndex));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+
+ __ bind(&patch_receiver);
+ __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ str(r2, MemOperand(r3, -kPointerSize));
+
+ __ bind(&done);
+ }
+
+ // 4. Shift stuff one slot down the stack
+ // r0: actual number of arguments (including call() receiver)
+ // r1: function
+ { Label loop;
+ // Calculate the copy start address (destination). Copy end address is sp.
+ __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
+ __ add(r2, r2, Operand(kPointerSize)); // copy receiver too
+
+ __ bind(&loop);
+ __ ldr(ip, MemOperand(r2, -kPointerSize));
+ __ str(ip, MemOperand(r2));
+ __ sub(r2, r2, Operand(kPointerSize));
+ __ cmp(r2, sp);
+ __ b(ne, &loop);
+ }
+
+ // 5. Adjust the actual number of arguments and remove the top element.
+ // r0: actual number of arguments (including call() receiver)
+ // r1: function
+ __ sub(r0, r0, Operand(1));
+ __ add(sp, sp, Operand(kPointerSize));
+
+ // 6. Get the code for the function or the non-function builtin.
+ // If number of expected arguments matches, then call. Otherwise restart
+ // the arguments adaptor stub.
+ // r0: actual number of arguments
+ // r1: function
+ { Label invoke;
+ __ tst(r1, r1);
+ __ b(ne, &invoke);
+ __ mov(r2, Operand(0)); // expected arguments is 0 for CALL_NON_FUNCTION
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&invoke);
+ __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2,
+ FieldMemOperand(r3,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ __ ldr(r3,
+ MemOperand(r3, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+ __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ cmp(r2, r0); // Check formal and actual parameter counts.
+ __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET, ne);
+
+ // 7. Jump to the code in r3 without checking arguments.
+ ParameterCount expected(0);
+ __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
+ }
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ const int kIndexOffset = -5 * kPointerSize;
+ const int kLimitOffset = -4 * kPointerSize;
+ const int kArgsOffset = 2 * kPointerSize;
+ const int kRecvOffset = 3 * kPointerSize;
+ const int kFunctionOffset = 4 * kPointerSize;
+
+ __ EnterInternalFrame();
+
+ __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(r0);
+ __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
+
+ Label no_preemption, retry_preemption;
+ __ bind(&retry_preemption);
+ ExternalReference stack_guard_limit_address =
+ ExternalReference::address_of_stack_guard_limit();
+ __ mov(r2, Operand(stack_guard_limit_address));
+ __ ldr(r2, MemOperand(r2));
+ __ cmp(sp, r2);
+ __ b(hi, &no_preemption);
+
+ // We have encountered a preemption or stack overflow already before we push
+ // the array contents. Save r0 which is the Smi-tagged length of the array.
+ __ push(r0);
+
+ // Runtime routines expect at least one argument, so give it a Smi.
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ __ push(r0);
+ __ CallRuntime(Runtime::kStackGuard, 1);
+
+ // Since we returned, it wasn't a stack overflow. Restore r0 and try again.
+ __ pop(r0);
+ __ b(&retry_preemption);
+
+ __ bind(&no_preemption);
+
+ // Eagerly check for stack-overflow before starting to push the arguments.
+ // r0: number of arguments.
+ // r2: stack limit.
+ Label okay;
+ __ sub(r2, sp, r2);
+
+ __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ b(hi, &okay);
+
+ // Out of stack space.
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ push(r1);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
+
+ // Push current limit and index.
+ __ bind(&okay);
+ __ push(r0); // limit
+ __ mov(r1, Operand(0)); // initial index
+ __ push(r1);
+
+ // Change context eagerly to get the right global object if necessary.
+ __ ldr(r0, MemOperand(fp, kFunctionOffset));
+ __ ldr(cp, FieldMemOperand(r0, JSFunction::kContextOffset));
+
+ // Compute the receiver.
+ Label call_to_object, use_global_receiver, push_receiver;
+ __ ldr(r0, MemOperand(fp, kRecvOffset));
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &call_to_object);
+ __ mov(r1, Operand(Factory::null_value()));
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
+ __ mov(r1, Operand(Factory::undefined_value()));
+ __ cmp(r0, r1);
+ __ b(eq, &use_global_receiver);
+
+ // Check if the receiver is already a JavaScript object.
+ // r0: receiver
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, &call_to_object);
+ __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ __ b(le, &push_receiver);
+
+ // Convert the receiver to a regular object.
+ // r0: receiver
+ __ bind(&call_to_object);
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
+ __ b(&push_receiver);
+
+ // Use the current global receiver object as the receiver.
+ __ bind(&use_global_receiver);
+ const int kGlobalOffset =
+ Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ __ ldr(r0, FieldMemOperand(cp, kGlobalOffset));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+
+ // Push the receiver.
+ // r0: receiver
+ __ bind(&push_receiver);
+ __ push(r0);
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ b(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // r0: current argument index
+ __ bind(&loop);
+ __ ldr(r1, MemOperand(fp, kArgsOffset));
+ __ push(r1);
+ __ push(r0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(r0);
+
+ // Use inline caching to access the arguments.
+ __ ldr(r0, MemOperand(fp, kIndexOffset));
+ __ add(r0, r0, Operand(1 << kSmiTagSize));
+ __ str(r0, MemOperand(fp, kIndexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ ldr(r1, MemOperand(fp, kLimitOffset));
+ __ cmp(r0, r1);
+ __ b(ne, &loop);
+
+ // Invoke the function.
+ ParameterCount actual(r0);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+ __ InvokeFunction(r1, actual, CALL_FUNCTION);
+
+ // Tear down the internal frame and remove function, receiver and args.
+ __ LeaveInternalFrame();
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Jump(lr);
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ mov(r4, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(3 * kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : result being passed through
+ // -----------------------------------
+ // Get the number of arguments passed (as a smi), tear down the frame and
+ // then tear down the parameters.
+ __ ldr(r1, MemOperand(fp, -3 * kPointerSize));
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+ __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : actual number of arguments
+ // -- r1 : function (passed through to callee)
+ // -- r2 : expected number of arguments
+ // -- r3 : code entry to call
+ // -----------------------------------
+
+ Label invoke, dont_adapt_arguments;
+
+ Label enough, too_few;
+ __ cmp(r0, Operand(r2));
+ __ b(lt, &too_few);
+ __ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+ __ b(eq, &dont_adapt_arguments);
+
+ { // Enough parameters: actual >= expected
+ __ bind(&enough);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into r0 and copy end address into r2.
+ // r0: actual number of arguments as a smi
+ // r1: function
+ // r2: expected number of arguments
+ // r3: code entry to call
+ __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ // adjust for return address and receiver
+ __ add(r0, r0, Operand(2 * kPointerSize));
+ __ sub(r2, r0, Operand(r2, LSL, kPointerSizeLog2));
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r0: copy start address
+ // r1: function
+ // r2: copy end address
+ // r3: code entry to call
+
+ Label copy;
+ __ bind(&copy);
+ __ ldr(ip, MemOperand(r0, 0));
+ __ push(ip);
+ __ cmp(r0, r2); // Compare before moving to next argument.
+ __ sub(r0, r0, Operand(kPointerSize));
+ __ b(ne, &copy);
+
+ __ b(&invoke);
+ }
+
+ { // Too few parameters: Actual < expected
+ __ bind(&too_few);
+ EnterArgumentsAdaptorFrame(masm);
+
+ // Calculate copy start address into r0 and copy end address is fp.
+ // r0: actual number of arguments as a smi
+ // r1: function
+ // r2: expected number of arguments
+ // r3: code entry to call
+ __ add(r0, fp, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+ // Copy the arguments (including the receiver) to the new stack frame.
+ // r0: copy start address
+ // r1: function
+ // r2: expected number of arguments
+ // r3: code entry to call
+ Label copy;
+ __ bind(&copy);
+ // Adjust load for return address and receiver.
+ __ ldr(ip, MemOperand(r0, 2 * kPointerSize));
+ __ push(ip);
+ __ cmp(r0, fp); // Compare before moving to next argument.
+ __ sub(r0, r0, Operand(kPointerSize));
+ __ b(ne, &copy);
+
+ // Fill the remaining expected arguments with undefined.
+ // r1: function
+ // r2: expected number of arguments
+ // r3: code entry to call
+ __ mov(ip, Operand(Factory::undefined_value()));
+ __ sub(r2, fp, Operand(r2, LSL, kPointerSizeLog2));
+ __ sub(r2, r2, Operand(4 * kPointerSize)); // Adjust for frame.
+
+ Label fill;
+ __ bind(&fill);
+ __ push(ip);
+ __ cmp(sp, r2);
+ __ b(ne, &fill);
+ }
+
+ // Call the entry point.
+ __ bind(&invoke);
+ __ Call(r3);
+
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
+ __ Jump(lr);
+
+
+ // -------------------------------------------
+ // Dont adapt arguments.
+ // -------------------------------------------
+ __ bind(&dont_adapt_arguments);
+ __ Jump(r3);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/codegen-arm-inl.h b/V8Binding/v8/src/arm/codegen-arm-inl.h
new file mode 100644
index 0000000..544331a
--- /dev/null
+++ b/V8Binding/v8/src/arm/codegen-arm-inl.h
@@ -0,0 +1,46 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_ARM_CODEGEN_ARM_INL_H_
+#define V8_ARM_CODEGEN_ARM_INL_H_
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// Platform-specific inline functions.
+
+void DeferredCode::Jump() { __ jmp(&entry_label_); }
+void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_CODEGEN_ARM_INL_H_
diff --git a/V8Binding/v8/src/arm/codegen-arm.cc b/V8Binding/v8/src/arm/codegen-arm.cc
new file mode 100644
index 0000000..7428d3b
--- /dev/null
+++ b/V8Binding/v8/src/arm/codegen-arm.cc
@@ -0,0 +1,5199 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "parser.h"
+#include "register-allocator-inl.h"
+#include "runtime.h"
+#include "scopes.h"
+
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ push(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
+ __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+ }
+ }
+}
+
+
+void DeferredCode::RestoreRegisters() {
+ // Restore registers in reverse order due to the stack.
+ for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
+ int action = registers_[i];
+ if (action == kPush) {
+ __ pop(RegisterAllocator::ToRegister(i));
+ } else if (action != kIgnore) {
+ action &= ~kSyncedFlag;
+ __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
+ }
+ }
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenState implementation.
+
+CodeGenState::CodeGenState(CodeGenerator* owner)
+ : owner_(owner),
+ typeof_state_(NOT_INSIDE_TYPEOF),
+ true_target_(NULL),
+ false_target_(NULL),
+ previous_(NULL) {
+ owner_->set_state(this);
+}
+
+
+CodeGenState::CodeGenState(CodeGenerator* owner,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target)
+ : owner_(owner),
+ typeof_state_(typeof_state),
+ true_target_(true_target),
+ false_target_(false_target),
+ previous_(owner->state()) {
+ owner_->set_state(this);
+}
+
+
+CodeGenState::~CodeGenState() {
+ ASSERT(owner_->state() == this);
+ owner_->set_state(previous_);
+}
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator implementation
+
+CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+ bool is_eval)
+ : is_eval_(is_eval),
+ script_(script),
+ deferred_(8),
+ masm_(new MacroAssembler(NULL, buffer_size)),
+ scope_(NULL),
+ frame_(NULL),
+ allocator_(NULL),
+ cc_reg_(al),
+ state_(NULL),
+ function_return_is_shadowed_(false),
+ in_spilled_code_(false) {
+}
+
+
+// Calling conventions:
+// fp: caller's frame pointer
+// sp: stack pointer
+// r1: called JS function
+// cp: callee's context
+
+void CodeGenerator::GenCode(FunctionLiteral* fun) {
+ ZoneList<Statement*>* body = fun->body();
+
+ // Initialize state.
+ ASSERT(scope_ == NULL);
+ scope_ = fun->scope();
+ ASSERT(allocator_ == NULL);
+ RegisterAllocator register_allocator(this);
+ allocator_ = &register_allocator;
+ ASSERT(frame_ == NULL);
+ frame_ = new VirtualFrame();
+ cc_reg_ = al;
+ set_in_spilled_code(false);
+ {
+ CodeGenState state(this);
+
+ // Entry:
+ // Stack: receiver, arguments
+ // lr: return address
+ // fp: caller's frame pointer
+ // sp: stack pointer
+ // r1: called JS function
+ // cp: callee's context
+ allocator_->Initialize();
+ frame_->Enter();
+ // tos: code slot
+#ifdef DEBUG
+ if (strlen(FLAG_stop_at) > 0 &&
+ fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
+ frame_->SpillAll();
+ __ stop("stop-at");
+ }
+#endif
+
+ // Allocate space for locals and initialize them.
+ frame_->AllocateStackSlots();
+ // Initialize the function return target after the locals are set
+ // up, because it needs the expected frame height from the frame.
+ function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
+ function_return_is_shadowed_ = false;
+
+ VirtualFrame::SpilledScope spilled_scope;
+ if (scope_->num_heap_slots() > 0) {
+ // Allocate local context.
+ // Get outer context and create a new context based on it.
+ __ ldr(r0, frame_->Function());
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
+
+#ifdef DEBUG
+ JumpTarget verified_true;
+ __ cmp(r0, Operand(cp));
+ verified_true.Branch(eq);
+ __ stop("NewContext: r0 is expected to be the same as cp");
+ verified_true.Bind();
+#endif
+ // Update context local.
+ __ str(cp, frame_->Context());
+ }
+
+ // TODO(1241774): Improve this code:
+ // 1) only needed if we have a context
+ // 2) no need to recompute context ptr every single time
+ // 3) don't copy parameter operand code from SlotOperand!
+ {
+ Comment cmnt2(masm_, "[ copy context parameters into .context");
+
+ // Note that iteration order is relevant here! If we have the same
+ // parameter twice (e.g., function (x, y, x)), and that parameter
+ // needs to be copied into the context, it must be the last argument
+ // passed to the parameter that needs to be copied. This is a rare
+ // case so we don't check for it, instead we rely on the copying
+ // order: such a parameter is copied repeatedly into the same
+ // context location and thus the last value is what is seen inside
+ // the function.
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
+ Slot* slot = par->slot();
+ if (slot != NULL && slot->type() == Slot::CONTEXT) {
+ ASSERT(!scope_->is_global_scope()); // no parameters in global scope
+ __ ldr(r1, frame_->ParameterAt(i));
+ // Loads r2 with context; used below in RecordWrite.
+ __ str(r1, SlotOperand(slot, r2));
+ // Load the offset into r3.
+ int slot_offset =
+ FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(slot_offset));
+ __ RecordWrite(r2, r3, r1);
+ }
+ }
+ }
+
+ // Store the arguments object. This must happen after context
+ // initialization because the arguments object may be stored in the
+ // context.
+ if (scope_->arguments() != NULL) {
+ ASSERT(scope_->arguments_shadow() != NULL);
+ Comment cmnt(masm_, "[ allocate arguments object");
+ { Reference shadow_ref(this, scope_->arguments_shadow());
+ { Reference arguments_ref(this, scope_->arguments());
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ ldr(r2, frame_->Function());
+ // The receiver is below the arguments, the return address,
+ // and the frame pointer on the stack.
+ const int kReceiverDisplacement = 2 + scope_->num_parameters();
+ __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ frame_->Adjust(3);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
+ frame_->CallStub(&stub, 3);
+ frame_->EmitPush(r0);
+ arguments_ref.SetValue(NOT_CONST_INIT);
+ }
+ shadow_ref.SetValue(NOT_CONST_INIT);
+ }
+ frame_->Drop(); // Value is no longer needed.
+ }
+
+ // Generate code to 'execute' declarations and initialize functions
+ // (source elements). In case of an illegal redeclaration we need to
+ // handle that instead of processing the declarations.
+ if (scope_->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ illegal redeclarations");
+ scope_->VisitIllegalRedeclaration(this);
+ } else {
+ Comment cmnt(masm_, "[ declarations");
+ ProcessDeclarations(scope_->declarations());
+ // Bail out if a stack-overflow exception occurred when processing
+ // declarations.
+ if (HasStackOverflow()) return;
+ }
+
+ if (FLAG_trace) {
+ frame_->CallRuntime(Runtime::kTraceEnter, 0);
+ // Ignore the return value.
+ }
+ CheckStack();
+
+ // Compile the body of the function in a vanilla state. Don't
+ // bother compiling all the code if the scope has an illegal
+ // redeclaration.
+ if (!scope_->HasIllegalRedeclaration()) {
+ Comment cmnt(masm_, "[ function body");
+#ifdef DEBUG
+ bool is_builtin = Bootstrapper::IsActive();
+ bool should_trace =
+ is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
+ if (should_trace) {
+ frame_->CallRuntime(Runtime::kDebugTrace, 0);
+ // Ignore the return value.
+ }
+#endif
+ VisitStatementsAndSpill(body);
+ }
+ }
+
+ // Generate the return sequence if necessary.
+ if (frame_ != NULL || function_return_.is_linked()) {
+ // exit
+ // r0: result
+ // sp: stack pointer
+ // fp: frame pointer
+ // pp: parameter pointer
+ // cp: callee's context
+ __ mov(r0, Operand(Factory::undefined_value()));
+
+ function_return_.Bind();
+ if (FLAG_trace) {
+ // Push the return value on the stack as the parameter.
+ // Runtime::TraceExit returns the parameter as it is.
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kTraceExit, 1);
+ }
+
+ // Tear down the frame which will restore the caller's frame pointer and
+ // the link register.
+ frame_->Exit();
+
+ __ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
+ __ Jump(lr);
+ }
+
+ // Code generation state must be reset.
+ ASSERT(!has_cc());
+ ASSERT(state_ == NULL);
+ ASSERT(!function_return_is_shadowed_);
+ function_return_.Unuse();
+ DeleteFrame();
+
+ // Process any deferred code using the register allocator.
+ if (!HasStackOverflow()) {
+ ProcessDeferred();
+ }
+
+ allocator_ = NULL;
+ scope_ = NULL;
+}
+
+
+MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return frame_->ParameterAt(index);
+
+ case Slot::LOCAL:
+ return frame_->LocalAt(index);
+
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(cp)); // do not overwrite context register
+ Register context = cp;
+ int chain_length = scope()->ContextChainLength(slot->var()->scope());
+ for (int i = 0; i < chain_length; i++) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return MemOperand(r0, 0);
+ }
+}
+
+
+MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
+ Slot* slot,
+ Register tmp,
+ Register tmp2,
+ JumpTarget* slow) {
+ ASSERT(slot->type() == Slot::CONTEXT);
+ Register context = cp;
+
+ for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ tst(tmp2, tmp2);
+ slow->Branch(ne);
+ }
+ __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ }
+ // Check that last extension is NULL.
+ __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ tst(tmp2, tmp2);
+ slow->Branch(ne);
+ __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, slot->index());
+}
+
+
+void CodeGenerator::LoadConditionAndSpill(Expression* expression,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_control) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ LoadCondition(expression, typeof_state, true_target, false_target,
+ force_control);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+}
+
+
+// Loads a value on TOS. If it is a boolean value, the result may have been
+// (partially) translated into branches, or it may have set the condition
+// code register. If force_cc is set, the value is forced to set the
+// condition code register and no value is pushed. If the condition code
+// register was set, has_cc() is true and cc_reg_ contains the condition to
+// test for 'true'.
+void CodeGenerator::LoadCondition(Expression* x,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_cc) {
+ ASSERT(!in_spilled_code());
+ ASSERT(!has_cc());
+ int original_height = frame_->height();
+
+ { CodeGenState new_state(this, typeof_state, true_target, false_target);
+ Visit(x);
+
+ // If we hit a stack overflow, we may not have actually visited
+ // the expression. In that case, we ensure that we have a
+ // valid-looking frame state because we will continue to generate
+ // code as we unwind the C++ stack.
+ //
+ // It's possible to have both a stack overflow and a valid frame
+ // state (eg, a subexpression overflowed, visiting it returned
+ // with a dummied frame state, and visiting this expression
+ // returned with a normal-looking state).
+ if (HasStackOverflow() &&
+ has_valid_frame() &&
+ !has_cc() &&
+ frame_->height() == original_height) {
+ true_target->Jump();
+ }
+ }
+ if (force_cc && frame_ != NULL && !has_cc()) {
+ // Convert the TOS value to a boolean in the condition code register.
+ ToBoolean(true_target, false_target);
+ }
+ ASSERT(!force_cc || !has_valid_frame() || has_cc());
+ ASSERT(!has_valid_frame() ||
+ (has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+void CodeGenerator::LoadAndSpill(Expression* expression,
+ TypeofState typeof_state) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Load(expression, typeof_state);
+ frame_->SpillAll();
+ set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ JumpTarget true_target;
+ JumpTarget false_target;
+ LoadCondition(x, typeof_state, &true_target, &false_target, false);
+
+ if (has_cc()) {
+ // Convert cc_reg_ into a boolean value.
+ JumpTarget loaded;
+ JumpTarget materialize_true;
+ materialize_true.Branch(cc_reg_);
+ __ mov(r0, Operand(Factory::false_value()));
+ frame_->EmitPush(r0);
+ loaded.Jump();
+ materialize_true.Bind();
+ __ mov(r0, Operand(Factory::true_value()));
+ frame_->EmitPush(r0);
+ loaded.Bind();
+ cc_reg_ = al;
+ }
+
+ if (true_target.is_linked() || false_target.is_linked()) {
+ // We have at least one condition value that has been "translated"
+ // into a branch, thus it needs to be loaded explicitly.
+ JumpTarget loaded;
+ if (frame_ != NULL) {
+ loaded.Jump(); // Don't lose the current TOS.
+ }
+ bool both = true_target.is_linked() && false_target.is_linked();
+ // Load "true" if necessary.
+ if (true_target.is_linked()) {
+ true_target.Bind();
+ __ mov(r0, Operand(Factory::true_value()));
+ frame_->EmitPush(r0);
+ }
+ // If both "true" and "false" need to be loaded jump across the code for
+ // "false".
+ if (both) {
+ loaded.Jump();
+ }
+ // Load "false" if necessary.
+ if (false_target.is_linked()) {
+ false_target.Bind();
+ __ mov(r0, Operand(Factory::false_value()));
+ frame_->EmitPush(r0);
+ }
+ // A value is loaded on all paths reaching this point.
+ loaded.Bind();
+ }
+ ASSERT(has_valid_frame());
+ ASSERT(!has_cc());
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadGlobal() {
+ VirtualFrame::SpilledScope spilled_scope;
+ __ ldr(r0, GlobalObject());
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::LoadGlobalReceiver(Register scratch) {
+ VirtualFrame::SpilledScope spilled_scope;
+ __ ldr(scratch, ContextOperand(cp, Context::GLOBAL_INDEX));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset));
+ frame_->EmitPush(scratch);
+}
+
+
+// TODO(1241834): Get rid of this function in favor of just using Load, now
+// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
+// variables w/o reference errors elsewhere.
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
+ VirtualFrame::SpilledScope spilled_scope;
+ Variable* variable = x->AsVariableProxy()->AsVariable();
+ if (variable != NULL && !variable->is_this() && variable->is_global()) {
+ // NOTE: This is somewhat nasty. We force the compiler to load
+ // the variable as if through '<global>.<variable>' to make sure we
+ // do not get reference errors.
+ Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
+ Literal key(variable->name());
+ // TODO(1241834): Fetch the position from the variable instead of using
+ // no position.
+ Property property(&global, &key, RelocInfo::kNoPosition);
+ LoadAndSpill(&property);
+ } else {
+ LoadAndSpill(x, INSIDE_TYPEOF);
+ }
+}
+
+
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
+ : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
+ cgen->LoadReference(this);
+}
+
+
+Reference::~Reference() {
+ cgen_->UnloadReference(this);
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ LoadReference");
+ Expression* e = ref->expression();
+ Property* property = e->AsProperty();
+ Variable* var = e->AsVariableProxy()->AsVariable();
+
+ if (property != NULL) {
+ // The expression is either a property or a variable proxy that rewrites
+ // to a property.
+ LoadAndSpill(property->obj());
+ // We use a named reference if the key is a literal symbol, unless it is
+ // a string that can be legally parsed as an integer. This is because
+ // otherwise we will not get into the slow case code that handles [] on
+ // String objects.
+ Literal* literal = property->key()->AsLiteral();
+ uint32_t dummy;
+ if (literal != NULL &&
+ literal->handle()->IsSymbol() &&
+ !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ ref->set_type(Reference::NAMED);
+ } else {
+ LoadAndSpill(property->key());
+ ref->set_type(Reference::KEYED);
+ }
+ } else if (var != NULL) {
+ // The expression is a variable proxy that does not rewrite to a
+ // property. Global variables are treated as named property references.
+ if (var->is_global()) {
+ LoadGlobal();
+ ref->set_type(Reference::NAMED);
+ } else {
+ ASSERT(var->slot() != NULL);
+ ref->set_type(Reference::SLOT);
+ }
+ } else {
+ // Anything else is a runtime error.
+ LoadAndSpill(e);
+ frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
+ }
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+ VirtualFrame::SpilledScope spilled_scope;
+ // Pop a reference from the stack while preserving TOS.
+ Comment cmnt(masm_, "[ UnloadReference");
+ int size = ref->size();
+ if (size > 0) {
+ frame_->EmitPop(r0);
+ frame_->Drop(size);
+ frame_->EmitPush(r0);
+ }
+}
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
+// register to a boolean in the condition code register. The code
+// may jump to 'false_target' in case the register converts to 'false'.
+void CodeGenerator::ToBoolean(JumpTarget* true_target,
+ JumpTarget* false_target) {
+ VirtualFrame::SpilledScope spilled_scope;
+ // Note: The generated code snippet does not change stack variables.
+ // Only the condition code should be set.
+ frame_->EmitPop(r0);
+
+ // Fast case checks
+
+ // Check if the value is 'false'.
+ __ cmp(r0, Operand(Factory::false_value()));
+ false_target->Branch(eq);
+
+ // Check if the value is 'true'.
+ __ cmp(r0, Operand(Factory::true_value()));
+ true_target->Branch(eq);
+
+ // Check if the value is 'undefined'.
+ __ cmp(r0, Operand(Factory::undefined_value()));
+ false_target->Branch(eq);
+
+ // Check if the value is a smi.
+ __ cmp(r0, Operand(Smi::FromInt(0)));
+ false_target->Branch(eq);
+ __ tst(r0, Operand(kSmiTagMask));
+ true_target->Branch(eq);
+
+ // Slow case: call the runtime.
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kToBool, 1);
+ // Convert the result (r0) to a condition code.
+ __ cmp(r0, Operand(Factory::false_value()));
+
+ cc_reg_ = ne;
+}
+
+
+class GenericBinaryOpStub : public CodeStub {
+ public:
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode)
+ : op_(op), mode_(mode) { }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 14> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() {
+ switch (op_) {
+ case Token::ADD: return "GenericBinaryOpStub_ADD";
+ case Token::SUB: return "GenericBinaryOpStub_SUB";
+ case Token::MUL: return "GenericBinaryOpStub_MUL";
+ case Token::DIV: return "GenericBinaryOpStub_DIV";
+ case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
+ case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
+ case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
+ case Token::SAR: return "GenericBinaryOpStub_SAR";
+ case Token::SHL: return "GenericBinaryOpStub_SHL";
+ case Token::SHR: return "GenericBinaryOpStub_SHR";
+ default: return "GenericBinaryOpStub";
+ }
+ }
+
+#ifdef DEBUG
+ void Print() { PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_)); }
+#endif
+};
+
+
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+ OverwriteMode overwrite_mode) {
+ VirtualFrame::SpilledScope spilled_scope;
+ // sp[0] : y
+ // sp[1] : x
+ // result : r0
+
+ // Stub is entered with a call: 'return address' is in lr.
+ switch (op) {
+ case Token::ADD: // fall through.
+ case Token::SUB: // fall through.
+ case Token::MUL:
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ frame_->EmitPop(r0); // r0 : y
+ frame_->EmitPop(r1); // r1 : x
+ GenericBinaryOpStub stub(op, overwrite_mode);
+ frame_->CallStub(&stub, 0);
+ break;
+ }
+
+ case Token::DIV: {
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1));
+ frame_->InvokeBuiltin(Builtins::DIV, CALL_JS, &arg_count, 2);
+ break;
+ }
+
+ case Token::MOD: {
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1));
+ frame_->InvokeBuiltin(Builtins::MOD, CALL_JS, &arg_count, 2);
+ break;
+ }
+
+ case Token::COMMA:
+ frame_->EmitPop(r0);
+ // simply discard left value
+ frame_->Drop();
+ break;
+
+ default:
+ // Other cases should have been handled before this point.
+ UNREACHABLE();
+ break;
+ }
+}
+
+
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+ DeferredInlineSmiOperation(Token::Value op,
+ int value,
+ bool reversed,
+ OverwriteMode overwrite_mode)
+ : op_(op),
+ value_(value),
+ reversed_(reversed),
+ overwrite_mode_(overwrite_mode) {
+ set_comment("[ DeferredInlinedSmiOperation");
+ }
+
+ virtual void Generate();
+
+ private:
+ Token::Value op_;
+ int value_;
+ bool reversed_;
+ OverwriteMode overwrite_mode_;
+};
+
+
+void DeferredInlineSmiOperation::Generate() {
+ switch (op_) {
+ case Token::ADD: {
+ // Revert optimistic add.
+ if (reversed_) {
+ __ sub(r0, r0, Operand(Smi::FromInt(value_)));
+ __ mov(r1, Operand(Smi::FromInt(value_)));
+ } else {
+ __ sub(r1, r0, Operand(Smi::FromInt(value_)));
+ __ mov(r0, Operand(Smi::FromInt(value_)));
+ }
+ break;
+ }
+
+ case Token::SUB: {
+ // Revert optimistic sub.
+ if (reversed_) {
+ __ rsb(r0, r0, Operand(Smi::FromInt(value_)));
+ __ mov(r1, Operand(Smi::FromInt(value_)));
+ } else {
+ __ add(r1, r0, Operand(Smi::FromInt(value_)));
+ __ mov(r0, Operand(Smi::FromInt(value_)));
+ }
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ if (reversed_) {
+ __ mov(r1, Operand(Smi::FromInt(value_)));
+ } else {
+ __ mov(r1, Operand(r0));
+ __ mov(r0, Operand(Smi::FromInt(value_)));
+ }
+ break;
+ }
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ if (!reversed_) {
+ __ mov(r1, Operand(r0));
+ __ mov(r0, Operand(Smi::FromInt(value_)));
+ } else {
+ UNREACHABLE(); // Should have been handled in SmiOperation.
+ }
+ break;
+ }
+
+ default:
+ // Other cases should have been handled before this point.
+ UNREACHABLE();
+ break;
+ }
+
+ GenericBinaryOpStub stub(op_, overwrite_mode_);
+ __ CallStub(&stub);
+}
+
+
+void CodeGenerator::SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode mode) {
+ VirtualFrame::SpilledScope spilled_scope;
+ // NOTE: This is an attempt to inline (a bit) more of the code for
+ // some possible smi operations (like + and -) when (at least) one
+ // of the operands is a literal smi. With this optimization, the
+ // performance of the system is increased by ~15%, and the generated
+ // code size is increased by ~1% (measured on a combination of
+ // different benchmarks).
+
+ // sp[0] : operand
+
+ int int_value = Smi::cast(*value)->value();
+
+ JumpTarget exit;
+ frame_->EmitPop(r0);
+
+ switch (op) {
+ case Token::ADD: {
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+
+ __ add(r0, r0, Operand(value), SetCC);
+ deferred->Branch(vs);
+ __ tst(r0, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ deferred->BindExit();
+ break;
+ }
+
+ case Token::SUB: {
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+
+ if (reversed) {
+ __ rsb(r0, r0, Operand(value), SetCC);
+ } else {
+ __ sub(r0, r0, Operand(value), SetCC);
+ }
+ deferred->Branch(vs);
+ __ tst(r0, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ deferred->BindExit();
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND: {
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, int_value, reversed, mode);
+ __ tst(r0, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ switch (op) {
+ case Token::BIT_OR: __ orr(r0, r0, Operand(value)); break;
+ case Token::BIT_XOR: __ eor(r0, r0, Operand(value)); break;
+ case Token::BIT_AND: __ and_(r0, r0, Operand(value)); break;
+ default: UNREACHABLE();
+ }
+ deferred->BindExit();
+ break;
+ }
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ if (reversed) {
+ __ mov(ip, Operand(value));
+ frame_->EmitPush(ip);
+ frame_->EmitPush(r0);
+ GenericBinaryOperation(op, mode);
+
+ } else {
+ int shift_value = int_value & 0x1f; // least significant 5 bits
+ DeferredCode* deferred =
+ new DeferredInlineSmiOperation(op, shift_value, false, mode);
+ __ tst(r0, Operand(kSmiTagMask));
+ deferred->Branch(ne);
+ __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
+ switch (op) {
+ case Token::SHL: {
+ __ mov(r2, Operand(r2, LSL, shift_value));
+ // check that the *unsigned* result fits in a smi
+ __ add(r3, r2, Operand(0x40000000), SetCC);
+ deferred->Branch(mi);
+ break;
+ }
+ case Token::SHR: {
+ // LSR by immediate 0 means shifting 32 bits.
+ if (shift_value != 0) {
+ __ mov(r2, Operand(r2, LSR, shift_value));
+ }
+ // check that the *unsigned* result fits in a smi
+ // neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging
+ // - 0x40000000: this number would convert to negative when
+ // smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi
+ __ and_(r3, r2, Operand(0xc0000000), SetCC);
+ deferred->Branch(ne);
+ break;
+ }
+ case Token::SAR: {
+ if (shift_value != 0) {
+ // ASR by immediate 0 means shifting 32 bits.
+ __ mov(r2, Operand(r2, ASR, shift_value));
+ }
+ break;
+ }
+ default: UNREACHABLE();
+ }
+ __ mov(r0, Operand(r2, LSL, kSmiTagSize));
+ deferred->BindExit();
+ }
+ break;
+ }
+
+ default:
+ if (!reversed) {
+ frame_->EmitPush(r0);
+ __ mov(r0, Operand(value));
+ frame_->EmitPush(r0);
+ } else {
+ __ mov(ip, Operand(value));
+ frame_->EmitPush(ip);
+ frame_->EmitPush(r0);
+ }
+ GenericBinaryOperation(op, mode);
+ break;
+ }
+
+ exit.Bind();
+}
+
+
+void CodeGenerator::Comparison(Condition cc, bool strict) {
+ VirtualFrame::SpilledScope spilled_scope;
+ // sp[0] : y
+ // sp[1] : x
+ // result : cc register
+
+ // Strict only makes sense for equality comparisons.
+ ASSERT(!strict || cc == eq);
+
+ JumpTarget exit;
+ JumpTarget smi;
+ // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
+ if (cc == gt || cc == le) {
+ cc = ReverseCondition(cc);
+ frame_->EmitPop(r1);
+ frame_->EmitPop(r0);
+ } else {
+ frame_->EmitPop(r0);
+ frame_->EmitPop(r1);
+ }
+ __ orr(r2, r0, Operand(r1));
+ __ tst(r2, Operand(kSmiTagMask));
+ smi.Branch(eq);
+
+ // Perform non-smi comparison by runtime call.
+ frame_->EmitPush(r1);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript native;
+ int arg_count = 1;
+ if (cc == eq) {
+ native = strict ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ native = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc == lt || cc == le) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc == gt || cc == ge); // remaining cases
+ ncr = LESS;
+ }
+ frame_->EmitPush(r0);
+ arg_count++;
+ __ mov(r0, Operand(Smi::FromInt(ncr)));
+ }
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ frame_->EmitPush(r0);
+ Result arg_count_register = allocator_->Allocate(r0);
+ ASSERT(arg_count_register.is_valid());
+ __ mov(arg_count_register.reg(), Operand(arg_count));
+ Result result = frame_->InvokeBuiltin(native,
+ CALL_JS,
+ &arg_count_register,
+ arg_count + 1);
+ __ cmp(result.reg(), Operand(0));
+ result.Unuse();
+ exit.Jump();
+
+ // test smi equality by pointer comparison.
+ smi.Bind();
+ __ cmp(r1, Operand(r0));
+
+ exit.Bind();
+ cc_reg_ = cc;
+}
+
+
+class CallFunctionStub: public CodeStub {
+ public:
+ CallFunctionStub(int argc, InLoopFlag in_loop)
+ : argc_(argc), in_loop_(in_loop) {}
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ int argc_;
+ InLoopFlag in_loop_;
+
+#if defined(DEBUG)
+ void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
+#endif // defined(DEBUG)
+
+ Major MajorKey() { return CallFunction; }
+ int MinorKey() { return argc_; }
+ InLoopFlag InLoop() { return in_loop_; }
+};
+
+
+// Call the function on the stack with the given arguments.
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ int position) {
+ VirtualFrame::SpilledScope spilled_scope;
+ // Push the arguments ("left-to-right") on the stack.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // Record the position for debugging purposes.
+ CodeForSourcePosition(position);
+
+ // Use the shared code stub to call the function.
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ frame_->CallStub(&call_function, arg_count + 1);
+
+ // Restore context and pop function from the stack.
+ __ ldr(cp, frame_->Context());
+ frame_->Drop(); // discard the TOS
+}
+
+
+void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(has_cc());
+ Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
+ target->Branch(cc);
+ cc_reg_ = al;
+}
+
+
+void CodeGenerator::CheckStack() {
+ VirtualFrame::SpilledScope spilled_scope;
+ if (FLAG_check_stack) {
+ Comment cmnt(masm_, "[ check stack");
+ StackCheckStub stub;
+ frame_->CallStub(&stub, 0);
+ }
+}
+
+
+void CodeGenerator::VisitAndSpill(Statement* statement) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ Visit(statement);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
+ ASSERT(in_spilled_code());
+ set_in_spilled_code(false);
+ VisitStatements(statements);
+ if (frame_ != NULL) {
+ frame_->SpillAll();
+ }
+ set_in_spilled_code(true);
+}
+
+
+void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
+ VisitAndSpill(statements->at(i));
+ }
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitBlock(Block* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Block");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ VisitStatementsAndSpill(node->statements());
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ node->break_target()->Unuse();
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ VirtualFrame::SpilledScope spilled_scope;
+ __ mov(r0, Operand(pairs));
+ frame_->EmitPush(r0);
+ frame_->EmitPush(cp);
+ __ mov(r0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
+ // The result is discarded.
+}
+
+
+void CodeGenerator::VisitDeclaration(Declaration* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Declaration");
+ CodeForStatementPosition(node);
+ Variable* var = node->proxy()->var();
+ ASSERT(var != NULL); // must have been resolved
+ Slot* slot = var->slot();
+
+ // If it was not possible to allocate the variable at compile time,
+ // we need to "declare" it at runtime to make sure it actually
+ // exists in the local context.
+ if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // Variables with a "LOOKUP" slot were introduced as non-locals
+ // during variable resolution and must have mode DYNAMIC.
+ ASSERT(var->is_dynamic());
+ // For now, just do a runtime call.
+ frame_->EmitPush(cp);
+ __ mov(r0, Operand(var->name()));
+ frame_->EmitPush(r0);
+ // Declaration nodes are always declared in only two modes.
+ ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
+ PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
+ __ mov(r0, Operand(Smi::FromInt(attr)));
+ frame_->EmitPush(r0);
+ // Push initial value, if any.
+ // Note: For variables we must not push an initial value (such as
+ // 'undefined') because we may have a (legal) redeclaration and we
+ // must not destroy the current value.
+ if (node->mode() == Variable::CONST) {
+ __ mov(r0, Operand(Factory::the_hole_value()));
+ frame_->EmitPush(r0);
+ } else if (node->fun() != NULL) {
+ LoadAndSpill(node->fun());
+ } else {
+ __ mov(r0, Operand(0)); // no initial value!
+ frame_->EmitPush(r0);
+ }
+ frame_->CallRuntime(Runtime::kDeclareContextSlot, 4);
+ // Ignore the return value (declarations are statements).
+ ASSERT(frame_->height() == original_height);
+ return;
+ }
+
+ ASSERT(!var->is_global());
+
+ // If we have a function or a constant, we need to initialize the variable.
+ Expression* val = NULL;
+ if (node->mode() == Variable::CONST) {
+ val = new Literal(Factory::the_hole_value());
+ } else {
+ val = node->fun(); // NULL if we don't have a function
+ }
+
+ if (val != NULL) {
+ {
+ // Set initial value.
+ Reference target(this, node->proxy());
+ LoadAndSpill(val);
+ target.SetValue(NOT_CONST_INIT);
+ // The reference is removed from the stack (preserving TOS) when
+ // it goes out of scope.
+ }
+ // Get rid of the assigned value (declarations are statements).
+ frame_->Drop();
+ }
+ ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ExpressionStatement");
+ CodeForStatementPosition(node);
+ Expression* expression = node->expression();
+ expression->MarkAsStatement();
+ LoadAndSpill(expression);
+ frame_->Drop();
+ ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "// EmptyStatement");
+ CodeForStatementPosition(node);
+ // nothing to do
+ ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ IfStatement");
+ // Generate different code depending on which parts of the if statement
+ // are present or not.
+ bool has_then_stm = node->HasThenStatement();
+ bool has_else_stm = node->HasElseStatement();
+
+ CodeForStatementPosition(node);
+
+ JumpTarget exit;
+ if (has_then_stm && has_else_stm) {
+ Comment cmnt(masm_, "[ IfThenElse");
+ JumpTarget then;
+ JumpTarget else_;
+ // if (cond)
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &then, &else_, true);
+ if (frame_ != NULL) {
+ Branch(false, &else_);
+ }
+ // then
+ if (frame_ != NULL || then.is_linked()) {
+ then.Bind();
+ VisitAndSpill(node->then_statement());
+ }
+ if (frame_ != NULL) {
+ exit.Jump();
+ }
+ // else
+ if (else_.is_linked()) {
+ else_.Bind();
+ VisitAndSpill(node->else_statement());
+ }
+
+ } else if (has_then_stm) {
+ Comment cmnt(masm_, "[ IfThen");
+ ASSERT(!has_else_stm);
+ JumpTarget then;
+ // if (cond)
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &then, &exit, true);
+ if (frame_ != NULL) {
+ Branch(false, &exit);
+ }
+ // then
+ if (frame_ != NULL || then.is_linked()) {
+ then.Bind();
+ VisitAndSpill(node->then_statement());
+ }
+
+ } else if (has_else_stm) {
+ Comment cmnt(masm_, "[ IfElse");
+ ASSERT(!has_then_stm);
+ JumpTarget else_;
+ // if (!cond)
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &exit, &else_, true);
+ if (frame_ != NULL) {
+ Branch(true, &exit);
+ }
+ // else
+ if (frame_ != NULL || else_.is_linked()) {
+ else_.Bind();
+ VisitAndSpill(node->else_statement());
+ }
+
+ } else {
+ Comment cmnt(masm_, "[ If");
+ ASSERT(!has_then_stm && !has_else_stm);
+ // if (cond)
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &exit, &exit, false);
+ if (frame_ != NULL) {
+ if (has_cc()) {
+ cc_reg_ = al;
+ } else {
+ frame_->Drop();
+ }
+ }
+ }
+
+ // end
+ if (exit.is_linked()) {
+ exit.Bind();
+ }
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ContinueStatement");
+ CodeForStatementPosition(node);
+ node->target()->continue_target()->Jump();
+}
+
+
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ BreakStatement");
+ CodeForStatementPosition(node);
+ node->target()->break_target()->Jump();
+}
+
+
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ReturnStatement");
+
+ if (function_return_is_shadowed_) {
+ CodeForStatementPosition(node);
+ LoadAndSpill(node->expression());
+ frame_->EmitPop(r0);
+ function_return_.Jump();
+ } else {
+ // Load the returned value.
+ CodeForStatementPosition(node);
+ LoadAndSpill(node->expression());
+
+ // Pop the result from the frame and prepare the frame for
+ // returning thus making it easier to merge.
+ frame_->EmitPop(r0);
+ frame_->PrepareForReturn();
+
+ function_return_.Jump();
+ }
+}
+
+
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ WithEnterStatement");
+ CodeForStatementPosition(node);
+ LoadAndSpill(node->expression());
+ if (node->is_catch_block()) {
+ frame_->CallRuntime(Runtime::kPushCatchContext, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kPushContext, 1);
+ }
+#ifdef DEBUG
+ JumpTarget verified_true;
+ __ cmp(r0, Operand(cp));
+ verified_true.Branch(eq);
+ __ stop("PushContext: r0 is expected to be the same as cp");
+ verified_true.Bind();
+#endif
+ // Update context local.
+ __ str(cp, frame_->Context());
+ ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ WithExitStatement");
+ CodeForStatementPosition(node);
+ // Pop context.
+ __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
+ // Update context local.
+ __ str(cp, frame_->Context());
+ ASSERT(frame_->height() == original_height);
+}
+
+
+int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
+ return kFastSwitchMaxOverheadFactor;
+}
+
+int CodeGenerator::FastCaseSwitchMinCaseCount() {
+ return kFastSwitchMinCaseCount;
+}
+
+
+void CodeGenerator::GenerateFastCaseSwitchJumpTable(
+ SwitchStatement* node,
+ int min_index,
+ int range,
+ Label* default_label,
+ Vector<Label*> case_targets,
+ Vector<Label> case_labels) {
+ VirtualFrame::SpilledScope spilled_scope;
+ JumpTarget setup_default;
+ JumpTarget is_smi;
+
+ // A non-null default label pointer indicates a default case among
+ // the case labels. Otherwise we use the break target as a
+ // "default" for failure to hit the jump table.
+ JumpTarget* default_target =
+ (default_label == NULL) ? node->break_target() : &setup_default;
+
+ ASSERT(kSmiTag == 0 && kSmiTagSize <= 2);
+ frame_->EmitPop(r0);
+
+ // Test for a Smi value in a HeapNumber.
+ __ tst(r0, Operand(kSmiTagMask));
+ is_smi.Branch(eq);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
+ default_target->Branch(ne);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kNumberToSmi, 1);
+ is_smi.Bind();
+
+ if (min_index != 0) {
+ // Small positive numbers can be immediate operands.
+ if (min_index < 0) {
+ // If min_index is Smi::kMinValue, -min_index is not a Smi.
+ if (Smi::IsValid(-min_index)) {
+ __ add(r0, r0, Operand(Smi::FromInt(-min_index)));
+ } else {
+ __ add(r0, r0, Operand(Smi::FromInt(-min_index - 1)));
+ __ add(r0, r0, Operand(Smi::FromInt(1)));
+ }
+ } else {
+ __ sub(r0, r0, Operand(Smi::FromInt(min_index)));
+ }
+ }
+ __ tst(r0, Operand(0x80000000 | kSmiTagMask));
+ default_target->Branch(ne);
+ __ cmp(r0, Operand(Smi::FromInt(range)));
+ default_target->Branch(ge);
+ VirtualFrame* start_frame = new VirtualFrame(frame_);
+ __ SmiJumpTable(r0, case_targets);
+
+ GenerateFastCaseSwitchCases(node, case_labels, start_frame);
+
+ // If there was a default case among the case labels, we need to
+ // emit code to jump to it from the default target used for failure
+ // to hit the jump table.
+ if (default_label != NULL) {
+ if (has_valid_frame()) {
+ node->break_target()->Jump();
+ }
+ setup_default.Bind();
+ frame_->MergeTo(start_frame);
+ __ b(default_label);
+ DeleteFrame();
+ }
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+}
+
+
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ SwitchStatement");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ LoadAndSpill(node->tag());
+ if (TryGenerateFastCaseSwitchStatement(node)) {
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+ return;
+ }
+
+ JumpTarget next_test;
+ JumpTarget fall_through;
+ JumpTarget default_entry;
+ JumpTarget default_exit(JumpTarget::BIDIRECTIONAL);
+ ZoneList<CaseClause*>* cases = node->cases();
+ int length = cases->length();
+ CaseClause* default_clause = NULL;
+
+ for (int i = 0; i < length; i++) {
+ CaseClause* clause = cases->at(i);
+ if (clause->is_default()) {
+ // Remember the default clause and compile it at the end.
+ default_clause = clause;
+ continue;
+ }
+
+ Comment cmnt(masm_, "[ Case clause");
+ // Compile the test.
+ next_test.Bind();
+ next_test.Unuse();
+ // Duplicate TOS.
+ __ ldr(r0, frame_->Top());
+ frame_->EmitPush(r0);
+ LoadAndSpill(clause->label());
+ Comparison(eq, true);
+ Branch(false, &next_test);
+
+ // Before entering the body from the test, remove the switch value from
+ // the stack.
+ frame_->Drop();
+
+ // Label the body so that fall through is enabled.
+ if (i > 0 && cases->at(i - 1)->is_default()) {
+ default_exit.Bind();
+ } else {
+ fall_through.Bind();
+ fall_through.Unuse();
+ }
+ VisitStatementsAndSpill(clause->statements());
+
+ // If control flow can fall through from the body, jump to the next body
+ // or the end of the statement.
+ if (frame_ != NULL) {
+ if (i < length - 1 && cases->at(i + 1)->is_default()) {
+ default_entry.Jump();
+ } else {
+ fall_through.Jump();
+ }
+ }
+ }
+
+ // The final "test" removes the switch value.
+ next_test.Bind();
+ frame_->Drop();
+
+ // If there is a default clause, compile it.
+ if (default_clause != NULL) {
+ Comment cmnt(masm_, "[ Default clause");
+ default_entry.Bind();
+ VisitStatementsAndSpill(default_clause->statements());
+ // If control flow can fall out of the default and there is a case after
+ // it, jup to that case's body.
+ if (frame_ != NULL && default_exit.is_bound()) {
+ default_exit.Jump();
+ }
+ }
+
+ if (fall_through.is_linked()) {
+ fall_through.Bind();
+ }
+
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ node->break_target()->Unuse();
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ LoopStatement");
+ CodeForStatementPosition(node);
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ // Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
+ // known result for the test expression, with no side effects.
+ enum { ALWAYS_TRUE, ALWAYS_FALSE, DONT_KNOW } info = DONT_KNOW;
+ if (node->cond() == NULL) {
+ ASSERT(node->type() == LoopStatement::FOR_LOOP);
+ info = ALWAYS_TRUE;
+ } else {
+ Literal* lit = node->cond()->AsLiteral();
+ if (lit != NULL) {
+ if (lit->IsTrue()) {
+ info = ALWAYS_TRUE;
+ } else if (lit->IsFalse()) {
+ info = ALWAYS_FALSE;
+ }
+ }
+ }
+
+ switch (node->type()) {
+ case LoopStatement::DO_LOOP: {
+ JumpTarget body(JumpTarget::BIDIRECTIONAL);
+
+ // Label the top of the loop for the backward CFG edge. If the test
+ // is always true we can use the continue target, and if the test is
+ // always false there is no need.
+ if (info == ALWAYS_TRUE) {
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else if (info == ALWAYS_FALSE) {
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ } else {
+ ASSERT(info == DONT_KNOW);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ body.Bind();
+ }
+
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ VisitAndSpill(node->body());
+
+ // Compile the test.
+ if (info == ALWAYS_TRUE) {
+ if (has_valid_frame()) {
+ // If control can fall off the end of the body, jump back to the
+ // top.
+ node->continue_target()->Jump();
+ }
+ } else if (info == ALWAYS_FALSE) {
+ // If we have a continue in the body, we only have to bind its jump
+ // target.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ } else {
+ ASSERT(info == DONT_KNOW);
+ // We have to compile the test expression if it can be reached by
+ // control flow falling out of the body or via continue.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+ &body, node->break_target(), true);
+ if (has_valid_frame()) {
+ // A invalid frame here indicates that control did not
+ // fall out of the test expression.
+ Branch(true, &body);
+ }
+ }
+ }
+ break;
+ }
+
+ case LoopStatement::WHILE_LOOP: {
+ // If the test is never true and has no side effects there is no need
+ // to compile the test or body.
+ if (info == ALWAYS_FALSE) break;
+
+ // Label the top of the loop with the continue target for the backward
+ // CFG edge.
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+
+ if (info == DONT_KNOW) {
+ JumpTarget body;
+ LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+ &body, node->break_target(), true);
+ if (has_valid_frame()) {
+ // A NULL frame indicates that control did not fall out of the
+ // test expression.
+ Branch(false, node->break_target());
+ }
+ if (has_valid_frame() || body.is_linked()) {
+ body.Bind();
+ }
+ }
+
+ if (has_valid_frame()) {
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ VisitAndSpill(node->body());
+
+ // If control flow can fall out of the body, jump back to the top.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ }
+ break;
+ }
+
+ case LoopStatement::FOR_LOOP: {
+ JumpTarget loop(JumpTarget::BIDIRECTIONAL);
+
+ if (node->init() != NULL) {
+ VisitAndSpill(node->init());
+ }
+
+ // There is no need to compile the test or body.
+ if (info == ALWAYS_FALSE) break;
+
+ // If there is no update statement, label the top of the loop with the
+ // continue target, otherwise with the loop target.
+ if (node->next() == NULL) {
+ node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL);
+ node->continue_target()->Bind();
+ } else {
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ loop.Bind();
+ }
+
+ // If the test is always true, there is no need to compile it.
+ if (info == DONT_KNOW) {
+ JumpTarget body;
+ LoadConditionAndSpill(node->cond(), NOT_INSIDE_TYPEOF,
+ &body, node->break_target(), true);
+ if (has_valid_frame()) {
+ Branch(false, node->break_target());
+ }
+ if (has_valid_frame() || body.is_linked()) {
+ body.Bind();
+ }
+ }
+
+ if (has_valid_frame()) {
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ VisitAndSpill(node->body());
+
+ if (node->next() == NULL) {
+ // If there is no update statement and control flow can fall out
+ // of the loop, jump directly to the continue label.
+ if (has_valid_frame()) {
+ node->continue_target()->Jump();
+ }
+ } else {
+ // If there is an update statement and control flow can reach it
+ // via falling out of the body of the loop or continuing, we
+ // compile the update statement.
+ if (node->continue_target()->is_linked()) {
+ node->continue_target()->Bind();
+ }
+ if (has_valid_frame()) {
+ // Record source position of the statement as this code which is
+ // after the code for the body actually belongs to the loop
+ // statement and not the body.
+ CodeForStatementPosition(node);
+ VisitAndSpill(node->next());
+ loop.Jump();
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ if (node->break_target()->is_linked()) {
+ node->break_target()->Bind();
+ }
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ForInStatement");
+ CodeForStatementPosition(node);
+
+ JumpTarget primitive;
+ JumpTarget jsobject;
+ JumpTarget fixed_array;
+ JumpTarget entry(JumpTarget::BIDIRECTIONAL);
+ JumpTarget end_del_check;
+ JumpTarget exit;
+
+ // Get the object to enumerate over (converted to JSObject).
+ LoadAndSpill(node->enumerable());
+
+ // Both SpiderMonkey and kjs ignore null and undefined in contrast
+ // to the specification. 12.6.4 mandates a call to ToObject.
+ frame_->EmitPop(r0);
+ __ cmp(r0, Operand(Factory::undefined_value()));
+ exit.Branch(eq);
+ __ cmp(r0, Operand(Factory::null_value()));
+ exit.Branch(eq);
+
+ // Stack layout in body:
+ // [iteration counter (Smi)]
+ // [length of array]
+ // [FixedArray]
+ // [Map or 0]
+ // [Object]
+
+ // Check if enumerable is already a JSObject
+ __ tst(r0, Operand(kSmiTagMask));
+ primitive.Branch(eq);
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ jsobject.Branch(hs);
+
+ primitive.Bind();
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(0));
+ frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1);
+
+ jsobject.Bind();
+ // Get the set of properties (as a FixedArray or Map).
+ frame_->EmitPush(r0); // duplicate the object being enumerated
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+ // If we got a Map, we can do a fast modification check.
+ // Otherwise, we got a FixedArray, and we have to do a slow check.
+ __ mov(r2, Operand(r0));
+ __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Factory::meta_map()));
+ fixed_array.Branch(ne);
+
+ // Get enum cache
+ __ mov(r1, Operand(r0));
+ __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset));
+ __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset));
+ __ ldr(r2,
+ FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+ frame_->EmitPush(r0); // map
+ frame_->EmitPush(r2); // enum cache bridge cache
+ __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ frame_->EmitPush(r0);
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ frame_->EmitPush(r0);
+ entry.Jump();
+
+ fixed_array.Bind();
+ __ mov(r1, Operand(Smi::FromInt(0)));
+ frame_->EmitPush(r1); // insert 0 in place of Map
+ frame_->EmitPush(r0);
+
+ // Push the length of the array and the initial index onto the stack.
+ __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ frame_->EmitPush(r0);
+ __ mov(r0, Operand(Smi::FromInt(0))); // init index
+ frame_->EmitPush(r0);
+
+ // Condition.
+ entry.Bind();
+ // sp[0] : index
+ // sp[1] : array/enum cache length
+ // sp[2] : array or enum cache
+ // sp[3] : 0 or map
+ // sp[4] : enumerable
+ // Grab the current frame's height for the break and continue
+ // targets only after all the state is pushed on the frame.
+ node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
+ node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY);
+
+ __ ldr(r0, frame_->ElementAt(0)); // load the current count
+ __ ldr(r1, frame_->ElementAt(1)); // load the length
+ __ cmp(r0, Operand(r1)); // compare to the array length
+ node->break_target()->Branch(hs);
+
+ __ ldr(r0, frame_->ElementAt(0));
+
+ // Get the i'th entry of the array.
+ __ ldr(r2, frame_->ElementAt(2));
+ __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+ // Get Map or 0.
+ __ ldr(r2, frame_->ElementAt(3));
+ // Check if this (still) matches the map of the enumerable.
+ // If not, we have to filter the key.
+ __ ldr(r1, frame_->ElementAt(4));
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(r2));
+ end_del_check.Branch(eq);
+
+ // Convert the entry to a string (or null if it isn't a property anymore).
+ __ ldr(r0, frame_->ElementAt(4)); // push enumerable
+ frame_->EmitPush(r0);
+ frame_->EmitPush(r3); // push entry
+ Result arg_count_register = allocator_->Allocate(r0);
+ ASSERT(arg_count_register.is_valid());
+ __ mov(arg_count_register.reg(), Operand(1));
+ Result result = frame_->InvokeBuiltin(Builtins::FILTER_KEY,
+ CALL_JS,
+ &arg_count_register,
+ 2);
+ __ mov(r3, Operand(result.reg()));
+ result.Unuse();
+
+ // If the property has been removed while iterating, we just skip it.
+ __ cmp(r3, Operand(Factory::null_value()));
+ node->continue_target()->Branch(eq);
+
+ end_del_check.Bind();
+ // Store the entry in the 'each' expression and take another spin in the
+ // loop. r3: i'th entry of the enum cache (or string there of)
+ frame_->EmitPush(r3); // push entry
+ { Reference each(this, node->each());
+ if (!each.is_illegal()) {
+ if (each.size() > 0) {
+ __ ldr(r0, frame_->ElementAt(each.size()));
+ frame_->EmitPush(r0);
+ }
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, r3 pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
+ if (each.size() > 0) {
+ // It's safe to pop the value lying on top of the reference before
+ // unloading the reference itself (which preserves the top of stack,
+ // ie, now the topmost value of the non-zero sized reference), since
+ // we will discard the top of stack after unloading the reference
+ // anyway.
+ frame_->EmitPop(r0);
+ }
+ }
+ }
+ // Discard the i'th entry pushed above or else the remainder of the
+ // reference, whichever is currently on top of the stack.
+ frame_->Drop();
+
+ // Body.
+ CheckStack(); // TODO(1222600): ignore if body contains calls.
+ VisitAndSpill(node->body());
+
+ // Next. Reestablish a spilled frame in case we are coming here via
+ // a continue in the body.
+ node->continue_target()->Bind();
+ frame_->SpillAll();
+ frame_->EmitPop(r0);
+ __ add(r0, r0, Operand(Smi::FromInt(1)));
+ frame_->EmitPush(r0);
+ entry.Jump();
+
+ // Cleanup. No need to spill because VirtualFrame::Drop is safe for
+ // any frame.
+ node->break_target()->Bind();
+ frame_->Drop(5);
+
+ // Exit.
+ exit.Bind();
+ node->continue_target()->Unuse();
+ node->break_target()->Unuse();
+ ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitTryCatch(TryCatch* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ TryCatch");
+ CodeForStatementPosition(node);
+
+ JumpTarget try_block;
+ JumpTarget exit;
+
+ try_block.Call();
+ // --- Catch block ---
+ frame_->EmitPush(r0);
+
+ // Store the caught exception in the catch variable.
+ { Reference ref(this, node->catch_var());
+ ASSERT(ref.is_slot());
+ // Here we make use of the convenient property that it doesn't matter
+ // whether a value is immediately on top of or underneath a zero-sized
+ // reference.
+ ref.SetValue(NOT_CONST_INIT);
+ }
+
+ // Remove the exception from the stack.
+ frame_->Drop();
+
+ VisitStatementsAndSpill(node->catch_block()->statements());
+ if (frame_ != NULL) {
+ exit.Jump();
+ }
+
+
+ // --- Try block ---
+ try_block.Bind();
+
+ frame_->PushTryHandler(TRY_CATCH_HANDLER);
+ int handler_height = frame_->height();
+
+ // Shadow the labels for all escapes from the try block, including
+ // returns. During shadowing, the original label is hidden as the
+ // LabelShadow and operations on the original actually affect the
+ // shadowing label.
+ //
+ // We should probably try to unify the escaping labels and the return
+ // label.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ VisitStatementsAndSpill(node->try_block()->statements());
+
+ // Stop the introduced shadowing and count the number of required unlinks.
+ // After shadowing stops, the original labels are unshadowed and the
+ // LabelShadows represent the formerly shadowing labels.
+ bool has_unlinks = false;
+ for (int i = 0; i < shadows.length(); i++) {
+ shadows[i]->StopShadowing();
+ has_unlinks = has_unlinks || shadows[i]->is_linked();
+ }
+ function_return_is_shadowed_ = function_return_was_shadowed;
+
+ // Get an external reference to the handler address.
+ ExternalReference handler_address(Top::k_handler_address);
+
+ // The next handler address is at kNextIndex in the stack.
+ const int kNextIndex = StackHandlerConstants::kNextOffset / kPointerSize;
+ // If we can fall off the end of the try block, unlink from try chain.
+ if (has_valid_frame()) {
+ __ ldr(r1, frame_->ElementAt(kNextIndex));
+ __ mov(r3, Operand(handler_address));
+ __ str(r1, MemOperand(r3));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize);
+ if (has_unlinks) {
+ exit.Jump();
+ }
+ }
+
+ // Generate unlink code for the (formerly) shadowing labels that have been
+ // jumped to. Deallocate each shadow target.
+ for (int i = 0; i < shadows.length(); i++) {
+ if (shadows[i]->is_linked()) {
+ // Unlink from try chain;
+ shadows[i]->Bind();
+ // Because we can be jumping here (to spilled code) from unspilled
+ // code, we need to reestablish a spilled frame at this block.
+ frame_->SpillAll();
+
+ // Reload sp from the top handler, because some statements that we
+ // break from (eg, for...in) may have left stuff on the stack.
+ __ mov(r3, Operand(handler_address));
+ __ ldr(sp, MemOperand(r3));
+ // The stack pointer was restored to just below the code slot
+ // (the topmost slot) in the handler.
+ frame_->Forget(frame_->height() - handler_height + 1);
+
+ // kNextIndex is off by one because the code slot has already
+ // been dropped.
+ __ ldr(r1, frame_->ElementAt(kNextIndex - 1));
+ __ str(r1, MemOperand(r3));
+ // The code slot has already been dropped from the handler.
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+
+ if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+ frame_->PrepareForReturn();
+ }
+ shadows[i]->other_target()->Jump();
+ }
+ }
+
+ exit.Bind();
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitTryFinally(TryFinally* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ TryFinally");
+ CodeForStatementPosition(node);
+
+ // State: Used to keep track of reason for entering the finally
+ // block. Should probably be extended to hold information for
+ // break/continue from within the try block.
+ enum { FALLING, THROWING, JUMPING };
+
+ JumpTarget try_block;
+ JumpTarget finally_block;
+
+ try_block.Call();
+
+ frame_->EmitPush(r0); // save exception object on the stack
+ // In case of thrown exceptions, this is where we continue.
+ __ mov(r2, Operand(Smi::FromInt(THROWING)));
+ finally_block.Jump();
+
+ // --- Try block ---
+ try_block.Bind();
+
+ frame_->PushTryHandler(TRY_FINALLY_HANDLER);
+ int handler_height = frame_->height();
+
+ // Shadow the labels for all escapes from the try block, including
+ // returns. Shadowing hides the original label as the LabelShadow and
+ // operations on the original actually affect the shadowing label.
+ //
+ // We should probably try to unify the escaping labels and the return
+ // label.
+ int nof_escapes = node->escaping_targets()->length();
+ List<ShadowTarget*> shadows(1 + nof_escapes);
+
+ // Add the shadow target for the function return.
+ static const int kReturnShadowIndex = 0;
+ shadows.Add(new ShadowTarget(&function_return_));
+ bool function_return_was_shadowed = function_return_is_shadowed_;
+ function_return_is_shadowed_ = true;
+ ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_);
+
+ // Add the remaining shadow targets.
+ for (int i = 0; i < nof_escapes; i++) {
+ shadows.Add(new ShadowTarget(node->escaping_targets()->at(i)));
+ }
+
+ // Generate code for the statements in the try block.
+ VisitStatementsAndSpill(node->try_block()->statements());
+
+ // Stop the introduced shadowing and count the number of required unlinks.
+ // After shadowing stops, the original labels are unshadowed and the
+ // LabelShadows represent the formerly shadowing labels.
+ int nof_unlinks = 0;
+ for (int i = 0; i < shadows.length(); i++) {
+ shadows[i]->StopShadowing();
+ if (shadows[i]->is_linked()) nof_unlinks++;
+ }
+ function_return_is_shadowed_ = function_return_was_shadowed;
+
+ // Get an external reference to the handler address.
+ ExternalReference handler_address(Top::k_handler_address);
+
+ // The next handler address is at kNextIndex in the stack.
+ const int kNextIndex = StackHandlerConstants::kNextOffset / kPointerSize;
+ // If we can fall off the end of the try block, unlink from the try
+ // chain and set the state on the frame to FALLING.
+ if (has_valid_frame()) {
+ __ ldr(r1, frame_->ElementAt(kNextIndex));
+ __ mov(r3, Operand(handler_address));
+ __ str(r1, MemOperand(r3));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize);
+
+ // Fake a top of stack value (unneeded when FALLING) and set the
+ // state in r2, then jump around the unlink blocks if any.
+ __ mov(r0, Operand(Factory::undefined_value()));
+ frame_->EmitPush(r0);
+ __ mov(r2, Operand(Smi::FromInt(FALLING)));
+ if (nof_unlinks > 0) {
+ finally_block.Jump();
+ }
+ }
+
+ // Generate code to unlink and set the state for the (formerly)
+ // shadowing targets that have been jumped to.
+ for (int i = 0; i < shadows.length(); i++) {
+ if (shadows[i]->is_linked()) {
+ // If we have come from the shadowed return, the return value is
+ // in (a non-refcounted reference to) r0. We must preserve it
+ // until it is pushed.
+ //
+ // Because we can be jumping here (to spilled code) from
+ // unspilled code, we need to reestablish a spilled frame at
+ // this block.
+ shadows[i]->Bind();
+ frame_->SpillAll();
+
+ // Reload sp from the top handler, because some statements that
+ // we break from (eg, for...in) may have left stuff on the
+ // stack.
+ __ mov(r3, Operand(handler_address));
+ __ ldr(sp, MemOperand(r3));
+ // The stack pointer was restored to the address slot in the handler.
+ ASSERT(StackHandlerConstants::kNextOffset == 1 * kPointerSize);
+ frame_->Forget(frame_->height() - handler_height + 1);
+
+ // Unlink this handler and drop it from the frame. The next
+ // handler address is now on top of the frame.
+ frame_->EmitPop(r1);
+ __ str(r1, MemOperand(r3));
+ // The top (code) and the second (handler) slot have both been
+ // dropped already.
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 2);
+
+ if (i == kReturnShadowIndex) {
+ // If this label shadowed the function return, materialize the
+ // return value on the stack.
+ frame_->EmitPush(r0);
+ } else {
+ // Fake TOS for targets that shadowed breaks and continues.
+ __ mov(r0, Operand(Factory::undefined_value()));
+ frame_->EmitPush(r0);
+ }
+ __ mov(r2, Operand(Smi::FromInt(JUMPING + i)));
+ if (--nof_unlinks > 0) {
+ // If this is not the last unlink block, jump around the next.
+ finally_block.Jump();
+ }
+ }
+ }
+
+ // --- Finally block ---
+ finally_block.Bind();
+
+ // Push the state on the stack.
+ frame_->EmitPush(r2);
+
+ // We keep two elements on the stack - the (possibly faked) result
+ // and the state - while evaluating the finally block.
+ //
+ // Generate code for the statements in the finally block.
+ VisitStatementsAndSpill(node->finally_block()->statements());
+
+ if (has_valid_frame()) {
+ // Restore state and return value or faked TOS.
+ frame_->EmitPop(r2);
+ frame_->EmitPop(r0);
+ }
+
+ // Generate code to jump to the right destination for all used
+ // formerly shadowing targets. Deallocate each shadow target.
+ for (int i = 0; i < shadows.length(); i++) {
+ if (has_valid_frame() && shadows[i]->is_bound()) {
+ JumpTarget* original = shadows[i]->other_target();
+ __ cmp(r2, Operand(Smi::FromInt(JUMPING + i)));
+ if (!function_return_is_shadowed_ && i == kReturnShadowIndex) {
+ JumpTarget skip;
+ skip.Branch(ne);
+ frame_->PrepareForReturn();
+ original->Jump();
+ skip.Bind();
+ } else {
+ original->Branch(eq);
+ }
+ }
+ }
+
+ if (has_valid_frame()) {
+ // Check if we need to rethrow the exception.
+ JumpTarget exit;
+ __ cmp(r2, Operand(Smi::FromInt(THROWING)));
+ exit.Branch(ne);
+
+ // Rethrow exception.
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kReThrow, 1);
+
+ // Done.
+ exit.Bind();
+ }
+ ASSERT(!has_valid_frame() || frame_->height() == original_height);
+}
+
+
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ DebuggerStatament");
+ CodeForStatementPosition(node);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ frame_->CallRuntime(Runtime::kDebugBreak, 0);
+#endif
+ // Ignore the return value.
+ ASSERT(frame_->height() == original_height);
+}
+
+
+void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(boilerplate->IsBoilerplate());
+
+ // Push the boilerplate on the stack.
+ __ mov(r0, Operand(boilerplate));
+ frame_->EmitPush(r0);
+
+ // Create a new closure.
+ frame_->EmitPush(cp);
+ frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ FunctionLiteral");
+
+ // Build the function boilerplate and instantiate it.
+ Handle<JSFunction> boilerplate = BuildBoilerplate(node);
+ // Check for stack-overflow exception.
+ if (HasStackOverflow()) {
+ ASSERT(frame_->height() == original_height);
+ return;
+ }
+ InstantiateBoilerplate(boilerplate);
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
+ FunctionBoilerplateLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
+ InstantiateBoilerplate(node->boilerplate());
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitConditional(Conditional* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Conditional");
+ JumpTarget then;
+ JumpTarget else_;
+ JumpTarget exit;
+ LoadConditionAndSpill(node->condition(), NOT_INSIDE_TYPEOF,
+ &then, &else_, true);
+ Branch(false, &else_);
+ then.Bind();
+ LoadAndSpill(node->then_expression(), typeof_state());
+ exit.Jump();
+ else_.Bind();
+ LoadAndSpill(node->else_expression(), typeof_state());
+ exit.Bind();
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ VirtualFrame::SpilledScope spilled_scope;
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ JumpTarget slow;
+ JumpTarget done;
+
+ // Generate fast-case code for variables that might be shadowed by
+ // eval-introduced variables. Eval is used a lot without
+ // introducing variables. In those cases, we do not want to
+ // perform a runtime call for all variables in the scope
+ // containing the eval.
+ if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
+ LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
+ // If there was no control flow to slow, we can exit early.
+ if (!slow.is_linked()) {
+ frame_->EmitPush(r0);
+ return;
+ }
+
+ done.Jump();
+
+ } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
+ Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
+ // Only generate the fast case for locals that rewrite to slots.
+ // This rules out argument loads.
+ if (potential_slot != NULL) {
+ __ ldr(r0,
+ ContextSlotOperandCheckExtensions(potential_slot,
+ r1,
+ r2,
+ &slow));
+ if (potential_slot->var()->mode() == Variable::CONST) {
+ __ cmp(r0, Operand(Factory::the_hole_value()));
+ __ mov(r0, Operand(Factory::undefined_value()), LeaveCC, eq);
+ }
+ // There is always control flow to slow from
+ // ContextSlotOperandCheckExtensions so we have to jump around
+ // it.
+ done.Jump();
+ }
+ }
+
+ slow.Bind();
+ frame_->EmitPush(cp);
+ __ mov(r0, Operand(slot->var()->name()));
+ frame_->EmitPush(r0);
+
+ if (typeof_state == INSIDE_TYPEOF) {
+ frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ } else {
+ frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ }
+
+ done.Bind();
+ frame_->EmitPush(r0);
+
+ } else {
+ // Note: We would like to keep the assert below, but it fires because of
+ // some nasty code in LoadTypeofExpression() which should be removed...
+ // ASSERT(!slot->var()->is_dynamic());
+
+ // Special handling for locals allocated in registers.
+ __ ldr(r0, SlotOperand(slot, r2));
+ frame_->EmitPush(r0);
+ if (slot->var()->mode() == Variable::CONST) {
+ // Const slots may contain 'the hole' value (the constant hasn't been
+ // initialized yet) which needs to be converted into the 'undefined'
+ // value.
+ Comment cmnt(masm_, "[ Unhole const");
+ frame_->EmitPop(r0);
+ __ cmp(r0, Operand(Factory::the_hole_value()));
+ __ mov(r0, Operand(Factory::undefined_value()), LeaveCC, eq);
+ frame_->EmitPush(r0);
+ }
+ }
+}
+
+
+void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
+ TypeofState typeof_state,
+ Register tmp,
+ Register tmp2,
+ JumpTarget* slow) {
+ // Check that no extension objects have been created by calls to
+ // eval from the current scope to the global scope.
+ Register context = cp;
+ Scope* s = scope();
+ while (s != NULL) {
+ if (s->num_heap_slots() > 0) {
+ if (s->calls_eval()) {
+ // Check that extension is NULL.
+ __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ tst(tmp2, tmp2);
+ slow->Branch(ne);
+ }
+ // Load next context in chain.
+ __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // If no outer scope calls eval, we do not need to check more
+ // context extensions.
+ if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
+ s = s->outer_scope();
+ }
+
+ if (s->is_eval_scope()) {
+ Label next, fast;
+ if (!context.is(tmp)) {
+ __ mov(tmp, Operand(context));
+ }
+ __ bind(&next);
+ // Terminate at global context.
+ __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
+ __ cmp(tmp2, Operand(Factory::global_context_map()));
+ __ b(eq, &fast);
+ // Check that extension is NULL.
+ __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX));
+ __ tst(tmp2, tmp2);
+ slow->Branch(ne);
+ // Load next context in chain.
+ __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX));
+ __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+ __ b(&next);
+ __ bind(&fast);
+ }
+
+ // All extension objects were empty and it is safe to use a global
+ // load IC call.
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Load the global object.
+ LoadGlobal();
+ // Setup the name register.
+ Result name = allocator_->Allocate(r2);
+ ASSERT(name.is_valid()); // We are in spilled code.
+ __ mov(name.reg(), Operand(slot->var()->name()));
+ // Call IC stub.
+ if (typeof_state == INSIDE_TYPEOF) {
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0);
+ } else {
+ frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0);
+ }
+
+ // Drop the global object. The result is in r0.
+ frame_->Drop();
+}
+
+
+void CodeGenerator::VisitSlot(Slot* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Slot");
+ LoadFromSlot(node, typeof_state());
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ VariableProxy");
+
+ Variable* var = node->var();
+ Expression* expr = var->rewrite();
+ if (expr != NULL) {
+ Visit(expr);
+ } else {
+ ASSERT(var->is_global());
+ Reference ref(this, node);
+ ref.GetValueAndSpill(typeof_state());
+ }
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitLiteral(Literal* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Literal");
+ __ mov(r0, Operand(node->handle()));
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ RexExp Literal");
+
+ // Retrieve the literal array and check the allocated entry.
+
+ // Load the function of this activation.
+ __ ldr(r1, frame_->Function());
+
+ // Load the literals array of the function.
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ ldr(r2, FieldMemOperand(r1, literal_offset));
+
+ JumpTarget done;
+ __ cmp(r2, Operand(Factory::undefined_value()));
+ done.Branch(ne);
+
+ // If the entry is undefined we call the runtime system to computed
+ // the literal.
+ frame_->EmitPush(r1); // literal array (0)
+ __ mov(r0, Operand(Smi::FromInt(node->literal_index())));
+ frame_->EmitPush(r0); // literal index (1)
+ __ mov(r0, Operand(node->pattern())); // RegExp pattern (2)
+ frame_->EmitPush(r0);
+ __ mov(r0, Operand(node->flags())); // RegExp flags (3)
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+ __ mov(r2, Operand(r0));
+
+ done.Bind();
+ // Push the literal.
+ frame_->EmitPush(r2);
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+// This deferred code stub will be used for creating the boilerplate
+// by calling Runtime_CreateObjectLiteralBoilerplate.
+// Each created boilerplate is stored in the JSFunction and they are
+// therefore context dependent.
+class DeferredObjectLiteral: public DeferredCode {
+ public:
+ explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
+ set_comment("[ DeferredObjectLiteral");
+ }
+
+ virtual void Generate();
+
+ private:
+ ObjectLiteral* node_;
+};
+
+
+void DeferredObjectLiteral::Generate() {
+ // Argument is passed in r1.
+
+ // If the entry is undefined we call the runtime system to compute
+ // the literal.
+ // Literal array (0).
+ __ push(r1);
+ // Literal index (1).
+ __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
+ __ push(r0);
+ // Constant properties (2).
+ __ mov(r0, Operand(node_->constant_properties()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
+ __ mov(r2, Operand(r0));
+ // Result is returned in r2.
+}
+
+
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ObjectLiteral");
+
+ DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
+
+ // Retrieve the literal array and check the allocated entry.
+
+ // Load the function of this activation.
+ __ ldr(r1, frame_->Function());
+
+ // Load the literals array of the function.
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ ldr(r2, FieldMemOperand(r1, literal_offset));
+
+ // Check whether we need to materialize the object literal boilerplate.
+ // If so, jump to the deferred code.
+ __ cmp(r2, Operand(Factory::undefined_value()));
+ deferred->Branch(eq);
+ deferred->BindExit();
+
+ // Push the object literal boilerplate.
+ frame_->EmitPush(r2);
+
+ // Clone the boilerplate object.
+ Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+ if (node->depth() == 1) {
+ clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ }
+ frame_->CallRuntime(clone_function_id, 1);
+ frame_->EmitPush(r0); // save the result
+ // r0: cloned object literal
+
+ for (int i = 0; i < node->properties()->length(); i++) {
+ ObjectLiteral::Property* property = node->properties()->at(i);
+ Literal* key = property->key();
+ Expression* value = property->value();
+ switch (property->kind()) {
+ case ObjectLiteral::Property::CONSTANT:
+ break;
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+ if (CompileTimeValue::IsCompileTimeValue(property->value())) break;
+ // else fall through
+ case ObjectLiteral::Property::COMPUTED: // fall through
+ case ObjectLiteral::Property::PROTOTYPE: {
+ frame_->EmitPush(r0); // dup the result
+ LoadAndSpill(key);
+ LoadAndSpill(value);
+ frame_->CallRuntime(Runtime::kSetProperty, 3);
+ // restore r0
+ __ ldr(r0, frame_->Top());
+ break;
+ }
+ case ObjectLiteral::Property::SETTER: {
+ frame_->EmitPush(r0);
+ LoadAndSpill(key);
+ __ mov(r0, Operand(Smi::FromInt(1)));
+ frame_->EmitPush(r0);
+ LoadAndSpill(value);
+ frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ __ ldr(r0, frame_->Top());
+ break;
+ }
+ case ObjectLiteral::Property::GETTER: {
+ frame_->EmitPush(r0);
+ LoadAndSpill(key);
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ frame_->EmitPush(r0);
+ LoadAndSpill(value);
+ frame_->CallRuntime(Runtime::kDefineAccessor, 4);
+ __ ldr(r0, frame_->Top());
+ break;
+ }
+ }
+ }
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+// This deferred code stub will be used for creating the boilerplate
+// by calling Runtime_CreateArrayLiteralBoilerplate.
+// Each created boilerplate is stored in the JSFunction and they are
+// therefore context dependent.
+class DeferredArrayLiteral: public DeferredCode {
+ public:
+ explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
+ set_comment("[ DeferredArrayLiteral");
+ }
+
+ virtual void Generate();
+
+ private:
+ ArrayLiteral* node_;
+};
+
+
+void DeferredArrayLiteral::Generate() {
+ // Argument is passed in r1.
+
+ // If the entry is undefined we call the runtime system to computed
+ // the literal.
+ // Literal array (0).
+ __ push(r1);
+ // Literal index (1).
+ __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
+ __ push(r0);
+ // Constant properties (2).
+ __ mov(r0, Operand(node_->literals()));
+ __ push(r0);
+ __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
+ __ mov(r2, Operand(r0));
+ // Result is returned in r2.
+}
+
+
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ ArrayLiteral");
+
+ DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
+
+ // Retrieve the literal array and check the allocated entry.
+
+ // Load the function of this activation.
+ __ ldr(r1, frame_->Function());
+
+ // Load the literals array of the function.
+ __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
+
+ // Load the literal at the ast saved index.
+ int literal_offset =
+ FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
+ __ ldr(r2, FieldMemOperand(r1, literal_offset));
+
+ // Check whether we need to materialize the object literal boilerplate.
+ // If so, jump to the deferred code.
+ __ cmp(r2, Operand(Factory::undefined_value()));
+ deferred->Branch(eq);
+ deferred->BindExit();
+
+ // Push the object literal boilerplate.
+ frame_->EmitPush(r2);
+
+ // Clone the boilerplate object.
+ Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
+ if (node->depth() == 1) {
+ clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ }
+ frame_->CallRuntime(clone_function_id, 1);
+ frame_->EmitPush(r0); // save the result
+ // r0: cloned object literal
+
+ // Generate code to set the elements in the array that are not
+ // literals.
+ for (int i = 0; i < node->values()->length(); i++) {
+ Expression* value = node->values()->at(i);
+
+ // If value is a literal the property value is already set in the
+ // boilerplate object.
+ if (value->AsLiteral() != NULL) continue;
+ // If value is a materialized literal the property value is already set
+ // in the boilerplate object if it is simple.
+ if (CompileTimeValue::IsCompileTimeValue(value)) continue;
+
+ // The property must be set by generated code.
+ LoadAndSpill(value);
+ frame_->EmitPop(r0);
+
+ // Fetch the object literal.
+ __ ldr(r1, frame_->Top());
+ // Get the elements array.
+ __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+
+ // Write to the indexed properties array.
+ int offset = i * kPointerSize + Array::kHeaderSize;
+ __ str(r0, FieldMemOperand(r1, offset));
+
+ // Update the write barrier for the array address.
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r1, r3, r2);
+ }
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ ASSERT(!in_spilled_code());
+ VirtualFrame::SpilledScope spilled_scope;
+ // Call runtime routine to allocate the catch extension object and
+ // assign the exception value to the catch variable.
+ Comment cmnt(masm_, "[ CatchExtensionObject");
+ LoadAndSpill(node->key());
+ LoadAndSpill(node->value());
+ Result result =
+ frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+ frame_->EmitPush(result.reg());
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitAssignment(Assignment* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Assignment");
+ CodeForStatementPosition(node);
+
+ { Reference target(this, node->target());
+ if (target.is_illegal()) {
+ // Fool the virtual frame into thinking that we left the assignment's
+ // value on the frame.
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
+ return;
+ }
+
+ if (node->op() == Token::ASSIGN ||
+ node->op() == Token::INIT_VAR ||
+ node->op() == Token::INIT_CONST) {
+ LoadAndSpill(node->value());
+
+ } else {
+ // +=, *= and similar binary assignments.
+ // Get the old value of the lhs.
+ target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ Literal* literal = node->value()->AsLiteral();
+ bool overwrite =
+ (node->value()->AsBinaryOperation() != NULL &&
+ node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+ if (literal != NULL && literal->handle()->IsSmi()) {
+ SmiOperation(node->binary_op(),
+ literal->handle(),
+ false,
+ overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ frame_->EmitPush(r0);
+
+ } else {
+ LoadAndSpill(node->value());
+ GenericBinaryOperation(node->binary_op(),
+ overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
+ frame_->EmitPush(r0);
+ }
+ }
+
+ Variable* var = node->target()->AsVariableProxy()->AsVariable();
+ if (var != NULL &&
+ (var->mode() == Variable::CONST) &&
+ node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
+ // Assignment ignored - leave the value on the stack.
+
+ } else {
+ CodeForSourcePosition(node->position());
+ if (node->op() == Token::INIT_CONST) {
+ // Dynamic constant initializations must use the function context
+ // and initialize the actual constant declared. Dynamic variable
+ // initializations are simply assignments and use SetValue.
+ target.SetValue(CONST_INIT);
+ } else {
+ target.SetValue(NOT_CONST_INIT);
+ }
+ }
+ }
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitThrow(Throw* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Throw");
+
+ LoadAndSpill(node->exception());
+ CodeForSourcePosition(node->position());
+ frame_->CallRuntime(Runtime::kThrow, 1);
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitProperty(Property* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Property");
+
+ { Reference property(this, node);
+ property.GetValueAndSpill(typeof_state());
+ }
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitCall(Call* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ Call");
+
+ ZoneList<Expression*>* args = node->arguments();
+
+ CodeForStatementPosition(node);
+ // Standard function call.
+
+ // Check if the function is a variable or a property.
+ Expression* function = node->expression();
+ Variable* var = function->AsVariableProxy()->AsVariable();
+ Property* property = function->AsProperty();
+
+ // ------------------------------------------------------------------------
+ // Fast-case: Use inline caching.
+ // ---
+ // According to ECMA-262, section 11.2.3, page 44, the function to call
+ // must be resolved after the arguments have been evaluated. The IC code
+ // automatically handles this by loading the arguments before the function
+ // is resolved in cache misses (this also holds for megamorphic calls).
+ // ------------------------------------------------------------------------
+
+ if (var != NULL && !var->is_this() && var->is_global()) {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is global
+ // ----------------------------------
+
+ // Push the name of the function and the receiver onto the stack.
+ __ mov(r0, Operand(var->name()));
+ frame_->EmitPush(r0);
+
+ // Pass the global object as the receiver and let the IC stub
+ // patch the stack to use the global proxy as 'this' in the
+ // invoked function.
+ LoadGlobal();
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // Setup the receiver register and call the IC initialization code.
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+ CodeForSourcePosition(node->position());
+ frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
+ arg_count + 1);
+ __ ldr(cp, frame_->Context());
+ // Remove the function from the stack.
+ frame_->Drop();
+ frame_->EmitPush(r0);
+
+ } else if (var != NULL && var->slot() != NULL &&
+ var->slot()->type() == Slot::LOOKUP) {
+ // ----------------------------------
+ // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj
+ // ----------------------------------
+
+ // Load the function
+ frame_->EmitPush(cp);
+ __ mov(r0, Operand(var->name()));
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
+ // r0: slot value; r1: receiver
+
+ // Load the receiver.
+ frame_->EmitPush(r0); // function
+ frame_->EmitPush(r1); // receiver
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ frame_->EmitPush(r0);
+
+ } else if (property != NULL) {
+ // Check if the key is a literal string.
+ Literal* literal = property->key()->AsLiteral();
+
+ if (literal != NULL && literal->handle()->IsSymbol()) {
+ // ------------------------------------------------------------------
+ // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
+ // ------------------------------------------------------------------
+
+ // Push the name of the function and the receiver onto the stack.
+ __ mov(r0, Operand(literal->handle()));
+ frame_->EmitPush(r0);
+ LoadAndSpill(property->obj());
+
+ // Load the arguments.
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // Set the receiver register and call the IC initialization code.
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+ CodeForSourcePosition(node->position());
+ frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
+ __ ldr(cp, frame_->Context());
+
+ // Remove the function from the stack.
+ frame_->Drop();
+
+ frame_->EmitPush(r0); // push after get rid of function from the stack
+
+ } else {
+ // -------------------------------------------
+ // JavaScript example: 'array[index](1, 2, 3)'
+ // -------------------------------------------
+
+ // Load the function to call from the property through a reference.
+ Reference ref(this, property);
+ ref.GetValueAndSpill(NOT_INSIDE_TYPEOF); // receiver
+
+ // Pass receiver to called function.
+ if (property->is_synthetic()) {
+ LoadGlobalReceiver(r0);
+ } else {
+ __ ldr(r0, frame_->ElementAt(ref.size()));
+ frame_->EmitPush(r0);
+ }
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ frame_->EmitPush(r0);
+ }
+
+ } else {
+ // ----------------------------------
+ // JavaScript example: 'foo(1, 2, 3)' // foo is not global
+ // ----------------------------------
+
+ // Load the function.
+ LoadAndSpill(function);
+
+ // Pass the global proxy as the receiver.
+ LoadGlobalReceiver(r0);
+
+ // Call the function.
+ CallWithArguments(args, node->position());
+ frame_->EmitPush(r0);
+ }
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitCallEval(CallEval* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ CallEval");
+
+ // In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
+ // the function we need to call and the receiver of the call.
+ // Then we call the resolved function using the given arguments.
+
+ ZoneList<Expression*>* args = node->arguments();
+ Expression* function = node->expression();
+
+ CodeForStatementPosition(node);
+
+ // Prepare stack for call to resolved function.
+ LoadAndSpill(function);
+ __ mov(r2, Operand(Factory::undefined_value()));
+ frame_->EmitPush(r2); // Slot for receiver
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // Prepare stack for call to ResolvePossiblyDirectEval.
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize + kPointerSize));
+ frame_->EmitPush(r1);
+ if (arg_count > 0) {
+ __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ frame_->EmitPush(r1);
+ } else {
+ frame_->EmitPush(r2);
+ }
+
+ // Resolve the call.
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+
+ // Touch up stack with the right values for the function and the receiver.
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
+ __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+ __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+
+ // Call the function.
+ CodeForSourcePosition(node->position());
+
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ CallFunctionStub call_function(arg_count, in_loop);
+ frame_->CallStub(&call_function, arg_count + 1);
+
+ __ ldr(cp, frame_->Context());
+ // Remove the function from the stack.
+ frame_->Drop();
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitCallNew(CallNew* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ CallNew");
+ CodeForStatementPosition(node);
+
+ // According to ECMA-262, section 11.2.2, page 44, the function
+ // expression in new calls must be evaluated before the
+ // arguments. This is different from ordinary calls, where the
+ // actual function to call is resolved after the arguments have been
+ // evaluated.
+
+ // Compute function to call and use the global object as the
+ // receiver. There is no need to use the global proxy here because
+ // it will always be replaced with a newly allocated object.
+ LoadAndSpill(node->expression());
+ LoadGlobal();
+
+ // Push the arguments ("left-to-right") on the stack.
+ ZoneList<Expression*>* args = node->arguments();
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ // r0: the number of arguments.
+ Result num_args = allocator_->Allocate(r0);
+ ASSERT(num_args.is_valid());
+ __ mov(num_args.reg(), Operand(arg_count));
+
+ // Load the function into r1 as per calling convention.
+ Result function = allocator_->Allocate(r1);
+ ASSERT(function.is_valid());
+ __ ldr(function.reg(), frame_->ElementAt(arg_count + 1));
+
+ // Call the construct call builtin that handles allocation and
+ // constructor invocation.
+ CodeForSourcePosition(node->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall));
+ Result result = frame_->CallCodeObject(ic,
+ RelocInfo::CONSTRUCT_CALL,
+ &num_args,
+ &function,
+ arg_count + 1);
+
+ // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)).
+ __ str(r0, frame_->Top());
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ JumpTarget leave;
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0); // r0 contains object.
+ // if (object->IsSmi()) return the object.
+ __ tst(r0, Operand(kSmiTagMask));
+ leave.Branch(eq);
+ // It is a heap object - get map.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ // if (!object->IsJSValue()) return the object.
+ __ cmp(r1, Operand(JS_VALUE_TYPE));
+ leave.Branch(ne);
+ // Load the value.
+ __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset));
+ leave.Bind();
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 2);
+ JumpTarget leave;
+ LoadAndSpill(args->at(0)); // Load the object.
+ LoadAndSpill(args->at(1)); // Load the value.
+ frame_->EmitPop(r0); // r0 contains value
+ frame_->EmitPop(r1); // r1 contains object
+ // if (object->IsSmi()) return object.
+ __ tst(r1, Operand(kSmiTagMask));
+ leave.Branch(eq);
+ // It is a heap object - get map.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ // if (!object->IsJSValue()) return object.
+ __ cmp(r2, Operand(JS_VALUE_TYPE));
+ leave.Branch(ne);
+ // Store the value.
+ __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+ // Update the write barrier.
+ __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag));
+ __ RecordWrite(r1, r2, r3);
+ // Leave.
+ leave.Bind();
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+ __ tst(r0, Operand(kSmiTagMask));
+ cc_reg_ = eq;
+}
+
+
+void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc.
+ ASSERT_EQ(args->length(), 3);
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ if (ShouldGenerateLog(args->at(0))) {
+ LoadAndSpill(args->at(1));
+ LoadAndSpill(args->at(2));
+ __ CallRuntime(Runtime::kLog, 2);
+ }
+#endif
+ __ mov(r0, Operand(Factory::undefined_value()));
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r0);
+ __ tst(r0, Operand(kSmiTagMask | 0x80000000));
+ cc_reg_ = eq;
+}
+
+
+// This should generate code that performs a charCodeAt() call or returns
+// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
+// It is not yet implemented on ARM, so it always goes to the slow case.
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 2);
+ __ mov(r0, Operand(Factory::undefined_value()));
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+ LoadAndSpill(args->at(0));
+ JumpTarget answer;
+ // We need the CC bits to come out as not_equal in the case where the
+ // object is a smi. This can't be done with the usual test opcode so
+ // we use XOR to get the right CC bits.
+ frame_->EmitPop(r0);
+ __ and_(r1, r0, Operand(kSmiTagMask));
+ __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
+ answer.Branch(ne);
+ // It is a heap object - get the map.
+ __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ // Check if the object is a JS array or not.
+ __ cmp(r1, Operand(JS_ARRAY_TYPE));
+ answer.Bind();
+ cc_reg_ = eq;
+}
+
+
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 0);
+
+ // Seed the result with the formal parameters count, which will be used
+ // in case no arguments adaptor frame is found below the current frame.
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+
+ // Call the shared stub to get to the arguments.length.
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
+ frame_->CallStub(&stub, 0);
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 1);
+
+ // Satisfy contract with ArgumentsAccessStub:
+ // Load the key into r1 and the formal parameters count into r0.
+ LoadAndSpill(args->at(0));
+ frame_->EmitPop(r1);
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+
+ // Call the shared stub to get to arguments[key].
+ ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
+ frame_->CallStub(&stub, 0);
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope;
+ ASSERT(args->length() == 2);
+
+ // Load the two objects into registers and perform the comparison.
+ LoadAndSpill(args->at(0));
+ LoadAndSpill(args->at(1));
+ frame_->EmitPop(r0);
+ frame_->EmitPop(r1);
+ __ cmp(r0, Operand(r1));
+ cc_reg_ = eq;
+}
+
+
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ if (CheckForInlineRuntimeCall(node)) {
+ ASSERT((has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
+ return;
+ }
+
+ ZoneList<Expression*>* args = node->arguments();
+ Comment cmnt(masm_, "[ CallRuntime");
+ Runtime::Function* function = node->function();
+
+ if (function == NULL) {
+ // Prepare stack for calling JS runtime function.
+ __ mov(r0, Operand(node->name()));
+ frame_->EmitPush(r0);
+ // Push the builtins object found in the current global object.
+ __ ldr(r1, GlobalObject());
+ __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
+ frame_->EmitPush(r0);
+ }
+
+ // Push the arguments ("left-to-right").
+ int arg_count = args->length();
+ for (int i = 0; i < arg_count; i++) {
+ LoadAndSpill(args->at(i));
+ }
+
+ if (function == NULL) {
+ // Call the JS runtime function.
+ InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
+ Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
+ frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1);
+ __ ldr(cp, frame_->Context());
+ frame_->Drop();
+ frame_->EmitPush(r0);
+ } else {
+ // Call the C runtime function.
+ frame_->CallRuntime(function, arg_count);
+ frame_->EmitPush(r0);
+ }
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ UnaryOperation");
+
+ Token::Value op = node->op();
+
+ if (op == Token::NOT) {
+ LoadConditionAndSpill(node->expression(),
+ NOT_INSIDE_TYPEOF,
+ false_target(),
+ true_target(),
+ true);
+ cc_reg_ = NegateCondition(cc_reg_);
+
+ } else if (op == Token::DELETE) {
+ Property* property = node->expression()->AsProperty();
+ Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
+ if (property != NULL) {
+ LoadAndSpill(property->obj());
+ LoadAndSpill(property->key());
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+
+ } else if (variable != NULL) {
+ Slot* slot = variable->slot();
+ if (variable->is_global()) {
+ LoadGlobal();
+ __ mov(r0, Operand(variable->name()));
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+
+ } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ // lookup the context holding the named variable
+ frame_->EmitPush(cp);
+ __ mov(r0, Operand(variable->name()));
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kLookupContext, 2);
+ // r0: context
+ frame_->EmitPush(r0);
+ __ mov(r0, Operand(variable->name()));
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2);
+
+ } else {
+ // Default: Result of deleting non-global, not dynamically
+ // introduced variables is false.
+ __ mov(r0, Operand(Factory::false_value()));
+ }
+
+ } else {
+ // Default: Result of deleting expressions is true.
+ LoadAndSpill(node->expression()); // may have side-effects
+ frame_->Drop();
+ __ mov(r0, Operand(Factory::true_value()));
+ }
+ frame_->EmitPush(r0);
+
+ } else if (op == Token::TYPEOF) {
+ // Special case for loading the typeof expression; see comment on
+ // LoadTypeofExpression().
+ LoadTypeofExpression(node->expression());
+ frame_->CallRuntime(Runtime::kTypeof, 1);
+ frame_->EmitPush(r0); // r0 has result
+
+ } else {
+ LoadAndSpill(node->expression());
+ frame_->EmitPop(r0);
+ switch (op) {
+ case Token::NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ UNREACHABLE(); // handled above
+ break;
+
+ case Token::SUB: {
+ UnarySubStub stub;
+ frame_->CallStub(&stub, 0);
+ break;
+ }
+
+ case Token::BIT_NOT: {
+ // smi check
+ JumpTarget smi_label;
+ JumpTarget continue_label;
+ __ tst(r0, Operand(kSmiTagMask));
+ smi_label.Branch(eq);
+
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(0)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1);
+
+ continue_label.Jump();
+ smi_label.Bind();
+ __ mvn(r0, Operand(r0));
+ __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
+ continue_label.Bind();
+ break;
+ }
+
+ case Token::VOID:
+ // since the stack top is cached in r0, popping and then
+ // pushing a value can be done by just writing to r0.
+ __ mov(r0, Operand(Factory::undefined_value()));
+ break;
+
+ case Token::ADD: {
+ // Smi check.
+ JumpTarget continue_label;
+ __ tst(r0, Operand(kSmiTagMask));
+ continue_label.Branch(eq);
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(0)); // not counting receiver
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ continue_label.Bind();
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ frame_->EmitPush(r0); // r0 has result
+ }
+ ASSERT((has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ CountOperation");
+
+ bool is_postfix = node->is_postfix();
+ bool is_increment = node->op() == Token::INC;
+
+ Variable* var = node->expression()->AsVariableProxy()->AsVariable();
+ bool is_const = (var != NULL && var->mode() == Variable::CONST);
+
+ // Postfix: Make room for the result.
+ if (is_postfix) {
+ __ mov(r0, Operand(0));
+ frame_->EmitPush(r0);
+ }
+
+ { Reference target(this, node->expression());
+ if (target.is_illegal()) {
+ // Spoof the virtual frame to have the expected height (one higher
+ // than on entry).
+ if (!is_postfix) {
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ frame_->EmitPush(r0);
+ }
+ ASSERT(frame_->height() == original_height + 1);
+ return;
+ }
+ target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
+ frame_->EmitPop(r0);
+
+ JumpTarget slow;
+ JumpTarget exit;
+
+ // Load the value (1) into register r1.
+ __ mov(r1, Operand(Smi::FromInt(1)));
+
+ // Check for smi operand.
+ __ tst(r0, Operand(kSmiTagMask));
+ slow.Branch(ne);
+
+ // Postfix: Store the old value as the result.
+ if (is_postfix) {
+ __ str(r0, frame_->ElementAt(target.size()));
+ }
+
+ // Perform optimistic increment/decrement.
+ if (is_increment) {
+ __ add(r0, r0, Operand(r1), SetCC);
+ } else {
+ __ sub(r0, r0, Operand(r1), SetCC);
+ }
+
+ // If the increment/decrement didn't overflow, we're done.
+ exit.Branch(vc);
+
+ // Revert optimistic increment/decrement.
+ if (is_increment) {
+ __ sub(r0, r0, Operand(r1));
+ } else {
+ __ add(r0, r0, Operand(r1));
+ }
+
+ // Slow case: Convert to number.
+ slow.Bind();
+ {
+ // Convert the operand to a number.
+ frame_->EmitPush(r0);
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(0));
+ frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1);
+ }
+ if (is_postfix) {
+ // Postfix: store to result (on the stack).
+ __ str(r0, frame_->ElementAt(target.size()));
+ }
+
+ // Compute the new value.
+ __ mov(r1, Operand(Smi::FromInt(1)));
+ frame_->EmitPush(r0);
+ frame_->EmitPush(r1);
+ if (is_increment) {
+ frame_->CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ frame_->CallRuntime(Runtime::kNumberSub, 2);
+ }
+
+ // Store the new value in the target if not const.
+ exit.Bind();
+ frame_->EmitPush(r0);
+ if (!is_const) target.SetValue(NOT_CONST_INIT);
+ }
+
+ // Postfix: Discard the new value and use the old.
+ if (is_postfix) frame_->EmitPop(r0);
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ BinaryOperation");
+ Token::Value op = node->op();
+
+ // According to ECMA-262 section 11.11, page 58, the binary logical
+ // operators must yield the result of one of the two expressions
+ // before any ToBoolean() conversions. This means that the value
+ // produced by a && or || operator is not necessarily a boolean.
+
+ // NOTE: If the left hand side produces a materialized value (not in
+ // the CC register), we force the right hand side to do the
+ // same. This is necessary because we may have to branch to the exit
+ // after evaluating the left hand side (due to the shortcut
+ // semantics), but the compiler must (statically) know if the result
+ // of compiling the binary operation is materialized or not.
+
+ if (op == Token::AND) {
+ JumpTarget is_true;
+ LoadConditionAndSpill(node->left(),
+ NOT_INSIDE_TYPEOF,
+ &is_true,
+ false_target(),
+ false);
+ if (has_cc()) {
+ Branch(false, false_target());
+
+ // Evaluate right side expression.
+ is_true.Bind();
+ LoadConditionAndSpill(node->right(),
+ NOT_INSIDE_TYPEOF,
+ true_target(),
+ false_target(),
+ false);
+
+ } else {
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
+
+ __ ldr(r0, frame_->Top()); // dup the stack top
+ frame_->EmitPush(r0);
+ // Avoid popping the result if it converts to 'false' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ ToBoolean(&pop_and_continue, &exit);
+ Branch(false, &exit);
+
+ // Pop the result of evaluating the first part.
+ pop_and_continue.Bind();
+ frame_->EmitPop(r0);
+
+ // Evaluate right side expression.
+ is_true.Bind();
+ LoadAndSpill(node->right());
+
+ // Exit (always with a materialized value).
+ exit.Bind();
+ }
+
+ } else if (op == Token::OR) {
+ JumpTarget is_false;
+ LoadConditionAndSpill(node->left(),
+ NOT_INSIDE_TYPEOF,
+ true_target(),
+ &is_false,
+ false);
+ if (has_cc()) {
+ Branch(true, true_target());
+
+ // Evaluate right side expression.
+ is_false.Bind();
+ LoadConditionAndSpill(node->right(),
+ NOT_INSIDE_TYPEOF,
+ true_target(),
+ false_target(),
+ false);
+
+ } else {
+ JumpTarget pop_and_continue;
+ JumpTarget exit;
+
+ __ ldr(r0, frame_->Top());
+ frame_->EmitPush(r0);
+ // Avoid popping the result if it converts to 'true' using the
+ // standard ToBoolean() conversion as described in ECMA-262,
+ // section 9.2, page 30.
+ ToBoolean(&exit, &pop_and_continue);
+ Branch(true, &exit);
+
+ // Pop the result of evaluating the first part.
+ pop_and_continue.Bind();
+ frame_->EmitPop(r0);
+
+ // Evaluate right side expression.
+ is_false.Bind();
+ LoadAndSpill(node->right());
+
+ // Exit (always with a materialized value).
+ exit.Bind();
+ }
+
+ } else {
+ // Optimize for the case where (at least) one of the expressions
+ // is a literal small integer.
+ Literal* lliteral = node->left()->AsLiteral();
+ Literal* rliteral = node->right()->AsLiteral();
+ // NOTE: The code below assumes that the slow cases (calls to runtime)
+ // never return a constant/immutable object.
+ bool overwrite_left =
+ (node->left()->AsBinaryOperation() != NULL &&
+ node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
+ bool overwrite_right =
+ (node->right()->AsBinaryOperation() != NULL &&
+ node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
+
+ if (rliteral != NULL && rliteral->handle()->IsSmi()) {
+ LoadAndSpill(node->left());
+ SmiOperation(node->op(),
+ rliteral->handle(),
+ false,
+ overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
+
+ } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
+ LoadAndSpill(node->right());
+ SmiOperation(node->op(),
+ lliteral->handle(),
+ true,
+ overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
+
+ } else {
+ OverwriteMode overwrite_mode = NO_OVERWRITE;
+ if (overwrite_left) {
+ overwrite_mode = OVERWRITE_LEFT;
+ } else if (overwrite_right) {
+ overwrite_mode = OVERWRITE_RIGHT;
+ }
+ LoadAndSpill(node->left());
+ LoadAndSpill(node->right());
+ GenericBinaryOperation(node->op(), overwrite_mode);
+ }
+ frame_->EmitPush(r0);
+ }
+ ASSERT((has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ __ ldr(r0, frame_->Function());
+ frame_->EmitPush(r0);
+ ASSERT(frame_->height() == original_height + 1);
+}
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ CompareOperation");
+
+ // Get the expressions from the node.
+ Expression* left = node->left();
+ Expression* right = node->right();
+ Token::Value op = node->op();
+
+ // To make null checks efficient, we check if either left or right is the
+ // literal 'null'. If so, we optimize the code by inlining a null check
+ // instead of calling the (very) general runtime routine for checking
+ // equality.
+ if (op == Token::EQ || op == Token::EQ_STRICT) {
+ bool left_is_null =
+ left->AsLiteral() != NULL && left->AsLiteral()->IsNull();
+ bool right_is_null =
+ right->AsLiteral() != NULL && right->AsLiteral()->IsNull();
+ // The 'null' value can only be equal to 'null' or 'undefined'.
+ if (left_is_null || right_is_null) {
+ LoadAndSpill(left_is_null ? right : left);
+ frame_->EmitPop(r0);
+ __ cmp(r0, Operand(Factory::null_value()));
+
+ // The 'null' value is only equal to 'undefined' if using non-strict
+ // comparisons.
+ if (op != Token::EQ_STRICT) {
+ true_target()->Branch(eq);
+
+ __ cmp(r0, Operand(Factory::undefined_value()));
+ true_target()->Branch(eq);
+
+ __ tst(r0, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ // It can be an undetectable object.
+ __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset));
+ __ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r0, Operand(1 << Map::kIsUndetectable));
+ }
+
+ cc_reg_ = eq;
+ ASSERT(has_cc() && frame_->height() == original_height);
+ return;
+ }
+ }
+
+ // To make typeof testing for natives implemented in JavaScript really
+ // efficient, we generate special code for expressions of the form:
+ // 'typeof <expression> == <string>'.
+ UnaryOperation* operation = left->AsUnaryOperation();
+ if ((op == Token::EQ || op == Token::EQ_STRICT) &&
+ (operation != NULL && operation->op() == Token::TYPEOF) &&
+ (right->AsLiteral() != NULL &&
+ right->AsLiteral()->handle()->IsString())) {
+ Handle<String> check(String::cast(*right->AsLiteral()->handle()));
+
+ // Load the operand, move it to register r1.
+ LoadTypeofExpression(operation->expression());
+ frame_->EmitPop(r1);
+
+ if (check->Equals(Heap::number_symbol())) {
+ __ tst(r1, Operand(kSmiTagMask));
+ true_target()->Branch(eq);
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Factory::heap_number_map()));
+ cc_reg_ = eq;
+
+ } else if (check->Equals(Heap::string_symbol())) {
+ __ tst(r1, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+
+ // It can be an undetectable string object.
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+ false_target()->Branch(eq);
+
+ __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
+ cc_reg_ = lt;
+
+ } else if (check->Equals(Heap::boolean_symbol())) {
+ __ cmp(r1, Operand(Factory::true_value()));
+ true_target()->Branch(eq);
+ __ cmp(r1, Operand(Factory::false_value()));
+ cc_reg_ = eq;
+
+ } else if (check->Equals(Heap::undefined_symbol())) {
+ __ cmp(r1, Operand(Factory::undefined_value()));
+ true_target()->Branch(eq);
+
+ __ tst(r1, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ // It can be an undetectable object.
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset));
+ __ and_(r2, r2, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r2, Operand(1 << Map::kIsUndetectable));
+
+ cc_reg_ = eq;
+
+ } else if (check->Equals(Heap::function_symbol())) {
+ __ tst(r1, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+ __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(JS_FUNCTION_TYPE));
+ cc_reg_ = eq;
+
+ } else if (check->Equals(Heap::object_symbol())) {
+ __ tst(r1, Operand(kSmiTagMask));
+ false_target()->Branch(eq);
+
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Factory::null_value()));
+ true_target()->Branch(eq);
+
+ // It can be an undetectable object.
+ __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ and_(r1, r1, Operand(1 << Map::kIsUndetectable));
+ __ cmp(r1, Operand(1 << Map::kIsUndetectable));
+ false_target()->Branch(eq);
+
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ false_target()->Branch(lt);
+ __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE));
+ cc_reg_ = le;
+
+ } else {
+ // Uncommon case: typeof testing against a string literal that is
+ // never returned from the typeof operator.
+ false_target()->Jump();
+ }
+ ASSERT(!has_valid_frame() ||
+ (has_cc() && frame_->height() == original_height));
+ return;
+ }
+
+ LoadAndSpill(left);
+ LoadAndSpill(right);
+ switch (op) {
+ case Token::EQ:
+ Comparison(eq, false);
+ break;
+
+ case Token::LT:
+ Comparison(lt);
+ break;
+
+ case Token::GT:
+ Comparison(gt);
+ break;
+
+ case Token::LTE:
+ Comparison(le);
+ break;
+
+ case Token::GTE:
+ Comparison(ge);
+ break;
+
+ case Token::EQ_STRICT:
+ Comparison(eq, true);
+ break;
+
+ case Token::IN: {
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result result = frame_->InvokeBuiltin(Builtins::IN,
+ CALL_JS,
+ &arg_count,
+ 2);
+ frame_->EmitPush(result.reg());
+ break;
+ }
+
+ case Token::INSTANCEOF: {
+ Result arg_count = allocator_->Allocate(r0);
+ ASSERT(arg_count.is_valid());
+ __ mov(arg_count.reg(), Operand(1)); // not counting receiver
+ Result result = frame_->InvokeBuiltin(Builtins::INSTANCE_OF,
+ CALL_JS,
+ &arg_count,
+ 2);
+ __ tst(result.reg(), Operand(result.reg()));
+ cc_reg_ = eq;
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+ ASSERT((has_cc() && frame_->height() == original_height) ||
+ (!has_cc() && frame_->height() == original_height + 1));
+}
+
+
+#ifdef DEBUG
+bool CodeGenerator::HasValidEntryRegisters() { return true; }
+#endif
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+Handle<String> Reference::GetName() {
+ ASSERT(type_ == NAMED);
+ Property* property = expression_->AsProperty();
+ if (property == NULL) {
+ // Global variable reference treated as a named property reference.
+ VariableProxy* proxy = expression_->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+ return proxy->name();
+ } else {
+ Literal* raw_name = property->key()->AsLiteral();
+ ASSERT(raw_name != NULL);
+ return Handle<String>(String::cast(*raw_name->handle()));
+ }
+}
+
+
+void Reference::GetValueAndSpill(TypeofState typeof_state) {
+ ASSERT(cgen_->in_spilled_code());
+ cgen_->set_in_spilled_code(false);
+ GetValue(typeof_state);
+ cgen_->frame()->SpillAll();
+ cgen_->set_in_spilled_code(true);
+}
+
+
+void Reference::GetValue(TypeofState typeof_state) {
+ ASSERT(!cgen_->in_spilled_code());
+ ASSERT(cgen_->HasValidEntryRegisters());
+ ASSERT(!is_illegal());
+ ASSERT(!cgen_->has_cc());
+ MacroAssembler* masm = cgen_->masm();
+ Property* property = expression_->AsProperty();
+ if (property != NULL) {
+ cgen_->CodeForSourcePosition(property->position());
+ }
+
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Load from Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ cgen_->LoadFromSlot(slot, typeof_state);
+ break;
+ }
+
+ case NAMED: {
+ // TODO(1241834): Make sure that this it is safe to ignore the
+ // distinction between expressions in a typeof and not in a typeof. If
+ // there is a chance that reference errors can be thrown below, we
+ // must distinguish between the two kinds of loads (typeof expression
+ // loads must not throw a reference error).
+ VirtualFrame* frame = cgen_->frame();
+ Comment cmnt(masm, "[ Load from named Property");
+ Handle<String> name(GetName());
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Setup the name register.
+ Result name_reg = cgen_->allocator()->Allocate(r2);
+ ASSERT(name_reg.is_valid());
+ __ mov(name_reg.reg(), Operand(name));
+ ASSERT(var == NULL || var->is_global());
+ RelocInfo::Mode rmode = (var == NULL)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame->CallCodeObject(ic, rmode, &name_reg, 0);
+ frame->EmitPush(answer.reg());
+ break;
+ }
+
+ case KEYED: {
+ // TODO(1241834): Make sure that this it is safe to ignore the
+ // distinction between expressions in a typeof and not in a typeof.
+
+ // TODO(181): Implement inlined version of array indexing once
+ // loop nesting is properly tracked on ARM.
+ VirtualFrame* frame = cgen_->frame();
+ Comment cmnt(masm, "[ Load from keyed Property");
+ ASSERT(property != NULL);
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ ASSERT(var == NULL || var->is_global());
+ RelocInfo::Mode rmode = (var == NULL)
+ ? RelocInfo::CODE_TARGET
+ : RelocInfo::CODE_TARGET_CONTEXT;
+ Result answer = frame->CallCodeObject(ic, rmode, 0);
+ frame->EmitPush(answer.reg());
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void Reference::SetValue(InitState init_state) {
+ ASSERT(!is_illegal());
+ ASSERT(!cgen_->has_cc());
+ MacroAssembler* masm = cgen_->masm();
+ VirtualFrame* frame = cgen_->frame();
+ Property* property = expression_->AsProperty();
+ if (property != NULL) {
+ cgen_->CodeForSourcePosition(property->position());
+ }
+
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Store to Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ // For now, just do a runtime call.
+ frame->EmitPush(cp);
+ __ mov(r0, Operand(slot->var()->name()));
+ frame->EmitPush(r0);
+
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize
+ // const properties (introduced via eval("const foo = (some
+ // expr);")). Also, uses the current function context instead of
+ // the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the
+ // same time, because the const declaration may be at the end of
+ // the eval code (sigh...) and the const variable may have been
+ // used before (where its value is 'undefined'). Thus, we can only
+ // do the initialization when we actually encounter the expression
+ // and when the expression operands are defined and valid, and
+ // thus we need the split into 2 operations: declaration of the
+ // context slot followed by initialization.
+ frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ frame->CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling assignment expressions.
+ frame->EmitPush(r0);
+
+ } else {
+ ASSERT(!slot->var()->is_dynamic());
+
+ JumpTarget exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is
+ // executed, the code is identical to a normal store (see below).
+ Comment cmnt(masm, "[ Init const");
+ __ ldr(r2, cgen_->SlotOperand(slot, r2));
+ __ cmp(r2, Operand(Factory::the_hole_value()));
+ exit.Branch(ne);
+ }
+
+ // We must execute the store. Storing a variable must keep the
+ // (new) value on the stack. This is necessary for compiling
+ // assignment expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will
+ // initialize consts to 'the hole' value and by doing so, end up
+ // calling this code. r2 may be loaded with context; used below in
+ // RecordWrite.
+ frame->EmitPop(r0);
+ __ str(r0, cgen_->SlotOperand(slot, r2));
+ frame->EmitPush(r0);
+ if (slot->type() == Slot::CONTEXT) {
+ // Skip write barrier if the written value is a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ exit.Branch(eq);
+ // r2 is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, r1);
+ }
+ // If we definitely did not jump over the assignment, we do not need
+ // to bind the exit label. Doing so can defeat peephole
+ // optimization.
+ if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+ exit.Bind();
+ }
+ }
+ break;
+ }
+
+ case NAMED: {
+ Comment cmnt(masm, "[ Store to named Property");
+ // Call the appropriate IC code.
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ Handle<String> name(GetName());
+
+ Result value = cgen_->allocator()->Allocate(r0);
+ ASSERT(value.is_valid());
+ frame->EmitPop(value.reg());
+
+ // Setup the name register.
+ Result property_name = cgen_->allocator()->Allocate(r2);
+ ASSERT(property_name.is_valid());
+ __ mov(property_name.reg(), Operand(name));
+ Result answer = frame->CallCodeObject(ic,
+ RelocInfo::CODE_TARGET,
+ &value,
+ &property_name,
+ 0);
+ frame->EmitPush(answer.reg());
+ break;
+ }
+
+ case KEYED: {
+ Comment cmnt(masm, "[ Store to keyed Property");
+ Property* property = expression_->AsProperty();
+ ASSERT(property != NULL);
+ cgen_->CodeForSourcePosition(property->position());
+
+ // Call IC code.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ // TODO(1222589): Make the IC grab the values from the stack.
+ Result value = cgen_->allocator()->Allocate(r0);
+ ASSERT(value.is_valid());
+ frame->EmitPop(value.reg()); // value
+ Result result =
+ frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0);
+ frame->EmitPush(result.reg());
+ break;
+ }
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+static void AllocateHeapNumber(
+ MacroAssembler* masm,
+ Label* need_gc, // Jump here if young space is full.
+ Register result_reg, // The tagged address of the new heap number.
+ Register allocation_top_addr_reg, // A scratch register.
+ Register scratch2) { // Another scratch register.
+ ExternalReference allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ ExternalReference allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+
+ // allocat := the address of the allocation top variable.
+ __ mov(allocation_top_addr_reg, Operand(allocation_top));
+ // result_reg := the old allocation top.
+ __ ldr(result_reg, MemOperand(allocation_top_addr_reg));
+ // scratch2 := the address of the allocation limit.
+ __ mov(scratch2, Operand(allocation_limit));
+ // scratch2 := the allocation limit.
+ __ ldr(scratch2, MemOperand(scratch2));
+ // result_reg := the new allocation top.
+ __ add(result_reg, result_reg, Operand(HeapNumber::kSize));
+ // Compare new new allocation top and limit.
+ __ cmp(result_reg, Operand(scratch2));
+ // Branch if out of space in young generation.
+ __ b(hi, need_gc);
+ // Store new allocation top.
+ __ str(result_reg, MemOperand(allocation_top_addr_reg)); // store new top
+ // Tag and adjust back to start of new object.
+ __ sub(result_reg, result_reg, Operand(HeapNumber::kSize - kHeapObjectTag));
+ // Get heap number map into scratch2.
+ __ mov(scratch2, Operand(Factory::heap_number_map()));
+ // Store heap number map in new object.
+ __ str(scratch2, FieldMemOperand(result_reg, HeapObject::kMapOffset));
+}
+
+
+// We fall into this code if the operands were Smis, but the result was
+// not (eg. overflow). We branch into this code (to the not_smi label) if
+// the operands were not both Smi.
+static void HandleBinaryOpSlowCases(MacroAssembler* masm,
+ Label* not_smi,
+ const Builtins::JavaScript& builtin,
+ Token::Value operation,
+ int swi_number,
+ OverwriteMode mode) {
+ Label slow;
+ __ bind(&slow);
+ __ push(r1);
+ __ push(r0);
+ __ mov(r0, Operand(1)); // Set number of arguments.
+ __ InvokeBuiltin(builtin, JUMP_JS); // Tail call.
+
+ __ bind(not_smi);
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &slow); // We can't handle a Smi-double combination yet.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow); // We can't handle a Smi-double combination yet.
+ // Get map of r0 into r2.
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ // Get type of r0 into r3.
+ __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
+ __ b(ne, &slow);
+ // Get type of r1 into r3.
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Check they are both the same map (heap number map).
+ __ cmp(r2, r3);
+ __ b(ne, &slow);
+ // Both are doubles.
+ // Calling convention says that second double is in r2 and r3.
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
+
+ if (mode == NO_OVERWRITE) {
+ // Get address of new heap number into r5.
+ AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ __ push(lr);
+ __ push(r5);
+ } else if (mode == OVERWRITE_LEFT) {
+ __ push(lr);
+ __ push(r1);
+ } else {
+ ASSERT(mode == OVERWRITE_RIGHT);
+ __ push(lr);
+ __ push(r0);
+ }
+ // Calling convention says that first double is in r0 and r1.
+ __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
+ __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
+ // Call C routine that may not cause GC or other trouble.
+ __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
+#if !defined(__arm__)
+ // Notify the simulator that we are calling an add routine in C.
+ __ swi(swi_number);
+#else
+ // Actually call the add routine written in C.
+ __ Call(r5);
+#endif
+ // Store answer in the overwritable heap number.
+ __ pop(r4);
+#if !defined(__ARM_EABI__) && defined(__arm__)
+ // Double returned in fp coprocessor register 0 and 1, encoded as register
+ // cr8. Offsets must be divisible by 4 for coprocessor so we need to
+ // substract the tag from r4.
+ __ sub(r5, r4, Operand(kHeapObjectTag));
+ __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
+#else
+ // Double returned in fp coprocessor register 0 and 1.
+ __ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
+ __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
+#endif
+ __ mov(r0, Operand(r4));
+ // And we are done.
+ __ pop(pc);
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ // r1 : x
+ // r0 : y
+ // result : r0
+
+ // All ops need to know whether we are dealing with two Smis. Set up r2 to
+ // tell us that.
+ __ orr(r2, r1, Operand(r0)); // r2 = x | y;
+
+ switch (op_) {
+ case Token::ADD: {
+ Label not_smi;
+ // Fast path.
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &not_smi);
+ __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
+ // Return if no overflow.
+ __ Ret(vc);
+ __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
+
+ HandleBinaryOpSlowCases(masm,
+ &not_smi,
+ Builtins::ADD,
+ Token::ADD,
+ assembler::arm::simulator_fp_add,
+ mode_);
+ break;
+ }
+
+ case Token::SUB: {
+ Label not_smi;
+ // Fast path.
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &not_smi);
+ __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
+ // Return if no overflow.
+ __ Ret(vc);
+ __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
+
+ HandleBinaryOpSlowCases(masm,
+ &not_smi,
+ Builtins::SUB,
+ Token::SUB,
+ assembler::arm::simulator_fp_sub,
+ mode_);
+ break;
+ }
+
+ case Token::MUL: {
+ Label not_smi, slow;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &not_smi);
+ // Remove tag from one operand (but keep sign), so that result is Smi.
+ __ mov(ip, Operand(r0, ASR, kSmiTagSize));
+ // Do multiplication
+ __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
+ // Go slow on overflows (overflow bit is not set).
+ __ mov(ip, Operand(r3, ASR, 31));
+ __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
+ __ b(ne, &slow);
+ // Go slow on zero result to handle -0.
+ __ tst(r3, Operand(r3));
+ __ mov(r0, Operand(r3), LeaveCC, ne);
+ __ Ret(ne);
+ // Slow case.
+ __ bind(&slow);
+
+ HandleBinaryOpSlowCases(masm,
+ &not_smi,
+ Builtins::MUL,
+ Token::MUL,
+ assembler::arm::simulator_fp_mul,
+ mode_);
+ break;
+ }
+
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR: {
+ Label slow;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ switch (op_) {
+ case Token::BIT_OR: __ orr(r0, r0, Operand(r1)); break;
+ case Token::BIT_AND: __ and_(r0, r0, Operand(r1)); break;
+ case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
+ default: UNREACHABLE();
+ }
+ __ Ret();
+ __ bind(&slow);
+ __ push(r1); // restore stack
+ __ push(r0);
+ __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
+ switch (op_) {
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+
+ case Token::SHL:
+ case Token::SHR:
+ case Token::SAR: {
+ Label slow;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ // remove tags from operands (but keep sign)
+ __ mov(r3, Operand(r1, ASR, kSmiTagSize)); // x
+ __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // y
+ // use only the 5 least significant bits of the shift count
+ __ and_(r2, r2, Operand(0x1f));
+ // perform operation
+ switch (op_) {
+ case Token::SAR:
+ __ mov(r3, Operand(r3, ASR, r2));
+ // no checks of result necessary
+ break;
+
+ case Token::SHR:
+ __ mov(r3, Operand(r3, LSR, r2));
+ // check that the *unsigned* result fits in a smi
+ // neither of the two high-order bits can be set:
+ // - 0x80000000: high bit would be lost when smi tagging
+ // - 0x40000000: this number would convert to negative when
+ // smi tagging these two cases can only happen with shifts
+ // by 0 or 1 when handed a valid smi
+ __ and_(r2, r3, Operand(0xc0000000), SetCC);
+ __ b(ne, &slow);
+ break;
+
+ case Token::SHL:
+ __ mov(r3, Operand(r3, LSL, r2));
+ // check that the *signed* result fits in a smi
+ __ add(r2, r3, Operand(0x40000000), SetCC);
+ __ b(mi, &slow);
+ break;
+
+ default: UNREACHABLE();
+ }
+ // tag result and store it in r0
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ Ret();
+ // slow case
+ __ bind(&slow);
+ __ push(r1); // restore stack
+ __ push(r0);
+ __ mov(r0, Operand(1)); // 1 argument (not counting receiver).
+ switch (op_) {
+ case Token::SAR: __ InvokeBuiltin(Builtins::SAR, JUMP_JS); break;
+ case Token::SHR: __ InvokeBuiltin(Builtins::SHR, JUMP_JS); break;
+ case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break;
+ default: UNREACHABLE();
+ }
+ break;
+ }
+
+ default: UNREACHABLE();
+ }
+ // This code should be unreachable.
+ __ stop("Unreachable");
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ Label within_limit;
+ __ mov(ip, Operand(ExternalReference::address_of_stack_guard_limit()));
+ __ ldr(ip, MemOperand(ip));
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &within_limit);
+ // Do tail-call to runtime routine. Runtime routines expect at least one
+ // argument, so give it a Smi.
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ __ push(r0);
+ __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
+ __ bind(&within_limit);
+
+ __ StubReturn(1);
+}
+
+
+void UnarySubStub::Generate(MacroAssembler* masm) {
+ Label undo;
+ Label slow;
+ Label done;
+
+ // Enter runtime system if the value is not a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+
+ // Enter runtime system if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ cmp(r0, Operand(0));
+ __ b(eq, &slow);
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ rsb(r1, r0, Operand(0), SetCC);
+ __ b(vs, &slow);
+
+ // If result is a smi we are done.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ mov(r0, Operand(r1), LeaveCC, eq); // conditionally set r0 to result
+ __ b(eq, &done);
+
+ // Enter runtime system.
+ __ bind(&slow);
+ __ push(r0);
+ __ mov(r0, Operand(0)); // set number of arguments
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
+
+ __ bind(&done);
+ __ StubReturn(1);
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ // r0 holds exception
+ ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(sp, MemOperand(r3));
+ __ pop(r2); // pop next in chain
+ __ str(r2, MemOperand(r3));
+ // restore parameter- and frame-pointer and pop state.
+ __ ldm(ia_w, sp, r3.bit() | pp.bit() | fp.bit());
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ cmp(fp, Operand(0));
+ // Set cp to NULL if fp is NULL.
+ __ mov(cp, Operand(0), LeaveCC, eq);
+ // Restore cp otherwise.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+ __ pop(pc);
+}
+
+
+void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
+ // Fetch top stack handler.
+ __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ __ ldr(r3, MemOperand(r3));
+
+ // Unwind the handlers until the ENTRY handler is found.
+ Label loop, done;
+ __ bind(&loop);
+ // Load the type of the current stack handler.
+ const int kStateOffset = StackHandlerConstants::kAddressDisplacement +
+ StackHandlerConstants::kStateOffset;
+ __ ldr(r2, MemOperand(r3, kStateOffset));
+ __ cmp(r2, Operand(StackHandler::ENTRY));
+ __ b(eq, &done);
+ // Fetch the next handler in the list.
+ const int kNextOffset = StackHandlerConstants::kAddressDisplacement +
+ StackHandlerConstants::kNextOffset;
+ __ ldr(r3, MemOperand(r3, kNextOffset));
+ __ jmp(&loop);
+ __ bind(&done);
+
+ // Set the top handler address to next handler past the current ENTRY handler.
+ __ ldr(r0, MemOperand(r3, kNextOffset));
+ __ mov(r2, Operand(ExternalReference(Top::k_handler_address)));
+ __ str(r0, MemOperand(r2));
+
+ // Set external caught exception to false.
+ __ mov(r0, Operand(false));
+ ExternalReference external_caught(Top::k_external_caught_exception_address);
+ __ mov(r2, Operand(external_caught));
+ __ str(r0, MemOperand(r2));
+
+ // Set pending exception and r0 to out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r0, MemOperand(r2));
+
+ // Restore the stack to the address of the ENTRY handler
+ __ mov(sp, Operand(r3));
+
+ // Stack layout at this point. See also PushTryHandler
+ // r3, sp -> next handler
+ // state (ENTRY)
+ // pp
+ // fp
+ // lr
+
+ // Discard ENTRY state (r2 is not used), and restore parameter-
+ // and frame-pointer and pop state.
+ __ ldm(ia_w, sp, r2.bit() | r3.bit() | pp.bit() | fp.bit());
+ // Before returning we restore the context from the frame pointer if not NULL.
+ // The frame pointer is NULL in the exception handler of a JS entry frame.
+ __ cmp(fp, Operand(0));
+ // Set cp to NULL if fp is NULL.
+ __ mov(cp, Operand(0), LeaveCC, eq);
+ // Restore cp otherwise.
+ __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+ __ pop(pc);
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_out_of_memory_exception,
+ StackFrame::Type frame_type,
+ bool do_gc,
+ bool always_allocate) {
+ // r0: result parameter for PerformGC, if any
+ // r4: number of arguments including receiver (C callee-saved)
+ // r5: pointer to builtin function (C callee-saved)
+ // r6: pointer to the first argument (C callee-saved)
+
+ if (do_gc) {
+ // Passing r0.
+ __ Call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
+ }
+
+ ExternalReference scope_depth =
+ ExternalReference::heap_always_allocate_scope_depth();
+ if (always_allocate) {
+ __ mov(r0, Operand(scope_depth));
+ __ ldr(r1, MemOperand(r0));
+ __ add(r1, r1, Operand(1));
+ __ str(r1, MemOperand(r0));
+ }
+
+ // Call C built-in.
+ // r0 = argc, r1 = argv
+ __ mov(r0, Operand(r4));
+ __ mov(r1, Operand(r6));
+
+ // TODO(1242173): To let the GC traverse the return address of the exit
+ // frames, we need to know where the return address is. Right now,
+ // we push it on the stack to be able to find it again, but we never
+ // restore from it in case of changes, which makes it impossible to
+ // support moving the C entry code stub. This should be fixed, but currently
+ // this is OK because the CEntryStub gets generated so early in the V8 boot
+ // sequence that it is not moving ever.
+ __ add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4
+ __ push(lr);
+#if !defined(__arm__)
+ // Notify the simulator of the transition to C code.
+ __ swi(assembler::arm::call_rt_r5);
+#else /* !defined(__arm__) */
+ __ Jump(r5);
+#endif /* !defined(__arm__) */
+
+ if (always_allocate) {
+ // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
+ // though (contain the result).
+ __ mov(r2, Operand(scope_depth));
+ __ ldr(r3, MemOperand(r2));
+ __ sub(r3, r3, Operand(1));
+ __ str(r3, MemOperand(r2));
+ }
+
+ // check for failure result
+ Label failure_returned;
+ ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+ // Lower 2 bits of r2 are 0 iff r0 has failure tag.
+ __ add(r2, r0, Operand(1));
+ __ tst(r2, Operand(kFailureTagMask));
+ __ b(eq, &failure_returned);
+
+ // Exit C frame and return.
+ // r0:r1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ // pp: caller's parameter pointer pp (restored as C callee-saved)
+ __ LeaveExitFrame(frame_type);
+
+ // check if we should retry or throw exception
+ Label retry;
+ __ bind(&failure_returned);
+ ASSERT(Failure::RETRY_AFTER_GC == 0);
+ __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
+ __ b(eq, &retry);
+
+ Label continue_exception;
+ // If the returned failure is EXCEPTION then promote Top::pending_exception().
+ __ cmp(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ b(ne, &continue_exception);
+
+ // Retrieve the pending exception and clear the variable.
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r3, MemOperand(ip));
+ __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ ldr(r0, MemOperand(ip));
+ __ str(r3, MemOperand(ip));
+
+ __ bind(&continue_exception);
+ // Special handling of out of memory exception.
+ Failure* out_of_memory = Failure::OutOfMemoryException();
+ __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
+ __ b(eq, throw_out_of_memory_exception);
+
+ // Handle normal exception.
+ __ jmp(throw_normal_exception);
+
+ __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
+}
+
+
+void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // r0: number of arguments including receiver
+ // r1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's pp after C call)
+ // cp: current context (C callee-saved)
+ // pp: caller's parameter pointer pp (C callee-saved)
+
+ // NOTE: Invocations of builtins may return failure objects
+ // instead of a proper result. The builtin entry handles
+ // this by performing a garbage collection and retrying the
+ // builtin once.
+
+ StackFrame::Type frame_type = is_debug_break
+ ? StackFrame::EXIT_DEBUG
+ : StackFrame::EXIT;
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ __ EnterExitFrame(frame_type);
+
+ // r4: number of arguments (C callee-saved)
+ // r5: pointer to builtin function (C callee-saved)
+ // r6: pointer to first argument (C callee-saved)
+
+ Label throw_out_of_memory_exception;
+ Label throw_normal_exception;
+
+ // Call into the runtime system. Collect garbage before the call if
+ // running with --gc-greedy set.
+ if (FLAG_gc_greedy) {
+ Failure* failure = Failure::RetryAfterGC(0);
+ __ mov(r0, Operand(reinterpret_cast<intptr_t>(failure)));
+ }
+ GenerateCore(masm, &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ frame_type,
+ FLAG_gc_greedy,
+ false);
+
+ // Do space-specific GC and retry runtime call.
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ frame_type,
+ true,
+ false);
+
+ // Do full GC and retry runtime call one final time.
+ Failure* failure = Failure::InternalError();
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
+ GenerateCore(masm,
+ &throw_normal_exception,
+ &throw_out_of_memory_exception,
+ frame_type,
+ true,
+ true);
+
+ __ bind(&throw_out_of_memory_exception);
+ GenerateThrowOutOfMemory(masm);
+ // control flow for generated will not return.
+
+ __ bind(&throw_normal_exception);
+ GenerateThrowTOS(masm);
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // [sp+0]: argv
+
+ Label invoke, exit;
+
+ // Called from C, so do not pop argc and args on exit (preserve sp)
+ // No need to save register-passed args
+ // Save callee-saved registers (incl. cp, pp, and fp), sp, and lr
+ __ stm(db_w, sp, kCalleeSaved | lr.bit());
+
+ // Get address of argv, see stm above.
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ __ add(r4, sp, Operand((kNumCalleeSaved + 1)*kPointerSize));
+ __ ldr(r4, MemOperand(r4)); // argv
+
+ // Push a frame with special values setup to mark it as an entry frame.
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // r4: argv
+ int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+ __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ mov(r7, Operand(~ArgumentsAdaptorFrame::SENTINEL));
+ __ mov(r6, Operand(Smi::FromInt(marker)));
+ __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ ldr(r5, MemOperand(r5));
+ __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
+
+ // Setup frame pointer for the frame to be pushed.
+ __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // Call a faked try-block that does the invoke.
+ __ bl(&invoke);
+
+ // Caught exception: Store result (exception) in the pending
+ // exception field in the JSEnv and return a failure sentinel.
+ // Coming in here the fp will be invalid because the PushTryHandler below
+ // sets it to 0 to signal the existence of the JSEntry frame.
+ __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r0, MemOperand(ip));
+ __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
+ __ b(&exit);
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ // Must preserve r0-r4, r5-r7 are available.
+ __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
+ // If an exception not caught by another handler occurs, this handler returns
+ // control to the code after the bl(&invoke) above, which restores all
+ // kCalleeSaved registers (including cp, pp and fp) to their saved values
+ // before returning a failure to C.
+
+ // Clear any pending exceptions.
+ __ mov(ip, Operand(ExternalReference::the_hole_value_location()));
+ __ ldr(r5, MemOperand(ip));
+ __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address)));
+ __ str(r5, MemOperand(ip));
+
+ // Invoke the function by calling through JS entry trampoline builtin.
+ // Notice that we cannot store a reference to the trampoline code directly in
+ // this stub, because runtime stubs are not traversed when doing GC.
+
+ // Expected registers by Builtins::JSEntryTrampoline
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // r4: argv
+ if (is_construct) {
+ ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
+ __ mov(ip, Operand(construct_entry));
+ } else {
+ ExternalReference entry(Builtins::JSEntryTrampoline);
+ __ mov(ip, Operand(entry));
+ }
+ __ ldr(ip, MemOperand(ip)); // deref address
+
+ // Branch and link to JSEntryTrampoline. We don't use the double underscore
+ // macro for the add instruction because we don't want the coverage tool
+ // inserting instructions here after we read the pc.
+ __ mov(lr, Operand(pc));
+ masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Unlink this frame from the handler chain. When reading the
+ // address of the next handler, there is no need to use the address
+ // displacement since the current stack pointer (sp) points directly
+ // to the stack handler.
+ __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset));
+ __ mov(ip, Operand(ExternalReference(Top::k_handler_address)));
+ __ str(r3, MemOperand(ip));
+ // No need to restore registers
+ __ add(sp, sp, Operand(StackHandlerConstants::kSize));
+
+
+ __ bind(&exit); // r0 holds result
+ // Restore the top frame descriptors from the stack.
+ __ pop(r3);
+ __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ __ str(r3, MemOperand(ip));
+
+ // Reset the stack to the callee saved registers.
+ __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+ // Restore callee-saved registers and return.
+#ifdef DEBUG
+ if (FLAG_debug_code) {
+ __ mov(lr, Operand(pc));
+ }
+#endif
+ __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
+}
+
+
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ b(eq, &adaptor);
+
+ // Nothing to do: The formal number of parameters has already been
+ // passed in register r0 by calling function. Just return it.
+ __ Jump(lr);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame and return it.
+ __ bind(&adaptor);
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Jump(lr);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The displacement is the offset of the last parameter (if any)
+ // relative to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
+
+ // Check that the key is a smi.
+ Label slow;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ b(eq, &adaptor);
+
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(r1, r0);
+ __ b(cs, &slow);
+
+ // Read the argument from the stack and return it.
+ __ sub(r3, r0, r1);
+ __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r0, MemOperand(r3, kDisplacement));
+ __ Jump(lr);
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(r1, r0);
+ __ b(cs, &slow);
+
+ // Read the argument from the adaptor frame and return it.
+ __ sub(r3, r0, r1);
+ __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r0, MemOperand(r3, kDisplacement));
+ __ Jump(lr);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(r1);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ b(ne, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ str(r0, MemOperand(sp, 0 * kPointerSize));
+ __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+ // Get the function to call from the stack.
+ // function, receiver [, arguments]
+ __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
+
+ // Check that the function is really a JavaScript function.
+ // r1: pushed function (to be verified)
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow);
+ // Get the map of the function object.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ b(ne, &slow);
+
+ // Fast-case: Invoke the function now.
+ // r1: pushed function
+ ParameterCount actual(argc_);
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ __ mov(r0, Operand(argc_)); // Setup the number of arguments.
+ __ mov(r2, Operand(0));
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)),
+ RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/codegen-arm.h b/V8Binding/v8/src/arm/codegen-arm.h
new file mode 100644
index 0000000..a8cb777
--- /dev/null
+++ b/V8Binding/v8/src/arm/codegen-arm.h
@@ -0,0 +1,459 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_CODEGEN_ARM_H_
+#define V8_ARM_CODEGEN_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations
+class DeferredCode;
+class RegisterAllocator;
+class RegisterFile;
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+class Reference BASE_EMBEDDED {
+ public:
+ // The values of the types is important, see size().
+ enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen, Expression* expression);
+ ~Reference();
+
+ Expression* expression() const { return expression_; }
+ Type type() const { return type_; }
+ void set_type(Type value) {
+ ASSERT(type_ == ILLEGAL);
+ type_ = value;
+ }
+
+ // The size the reference takes up on the stack.
+ int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
+
+ bool is_illegal() const { return type_ == ILLEGAL; }
+ bool is_slot() const { return type_ == SLOT; }
+ bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+
+ // Return the name. Only valid for named property references.
+ Handle<String> GetName();
+
+ // Generate code to push the value of the reference on top of the
+ // expression stack. The reference is expected to be already on top of
+ // the expression stack, and it is left in place with its value above it.
+ void GetValue(TypeofState typeof_state);
+
+ // Generate code to push the value of a reference on top of the expression
+ // stack and then spill the stack frame. This function is used temporarily
+ // while the code generator is being transformed.
+ inline void GetValueAndSpill(TypeofState typeof_state);
+
+ // Generate code to store the value on top of the expression stack in the
+ // reference. The reference is expected to be immediately below the value
+ // on the expression stack. The stored value is left in place (with the
+ // reference intact below it) to support chained assignments.
+ void SetValue(InitState init_state);
+
+ private:
+ CodeGenerator* cgen_;
+ Expression* expression_;
+ Type type_;
+};
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the label pair). It is threaded through the
+// call stack. Constructing a state implicitly pushes it on the owning code
+// generator's stack of states, and destroying one implicitly pops it.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+ // Create an initial code generator state. Destroying the initial state
+ // leaves the code generator with a NULL state.
+ explicit CodeGenState(CodeGenerator* owner);
+
+ // Create a code generator state based on a code generator's current
+ // state. The new state has its own typeof state and pair of branch
+ // labels.
+ CodeGenState(CodeGenerator* owner,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target);
+
+ // Destroy a code generator state and restore the owning code generator's
+ // previous state.
+ ~CodeGenState();
+
+ TypeofState typeof_state() const { return typeof_state_; }
+ JumpTarget* true_target() const { return true_target_; }
+ JumpTarget* false_target() const { return false_target_; }
+
+ private:
+ CodeGenerator* owner_;
+ TypeofState typeof_state_;
+ JumpTarget* true_target_;
+ JumpTarget* false_target_;
+ CodeGenState* previous_;
+};
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public AstVisitor {
+ public:
+ // Takes a function literal, generates code for it. This function should only
+ // be called by compiler.cc.
+ static Handle<Code> MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval);
+
+#ifdef ENABLE_LOGGING_AND_PROFILING
+ static bool ShouldGenerateLog(Expression* type);
+#endif
+
+ static void SetFunctionInfo(Handle<JSFunction> fun,
+ int length,
+ int function_token_position,
+ int start_position,
+ int end_position,
+ bool is_expression,
+ bool is_toplevel,
+ Handle<Script> script,
+ Handle<String> inferred_name);
+
+ // Accessors
+ MacroAssembler* masm() { return masm_; }
+
+ VirtualFrame* frame() const { return frame_; }
+
+ bool has_valid_frame() const { return frame_ != NULL; }
+
+ // Set the virtual frame to be new_frame, with non-frame register
+ // reference counts given by non_frame_registers. The non-frame
+ // register reference counts of the old frame are returned in
+ // non_frame_registers.
+ void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
+
+ void DeleteFrame();
+
+ RegisterAllocator* allocator() const { return allocator_; }
+
+ CodeGenState* state() { return state_; }
+ void set_state(CodeGenState* state) { state_ = state; }
+
+ void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+ bool in_spilled_code() const { return in_spilled_code_; }
+ void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
+
+ private:
+ // Construction/Destruction
+ CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
+ virtual ~CodeGenerator() { delete masm_; }
+
+ // Accessors
+ Scope* scope() const { return scope_; }
+
+ // Generating deferred code.
+ void ProcessDeferred();
+
+ bool is_eval() { return is_eval_; }
+
+ // State
+ bool has_cc() const { return cc_reg_ != al; }
+ TypeofState typeof_state() const { return state_->typeof_state(); }
+ JumpTarget* true_target() const { return state_->true_target(); }
+ JumpTarget* false_target() const { return state_->false_target(); }
+
+ // We don't track loop nesting level on ARM yet.
+ int loop_nesting() const { return 0; }
+
+ // Node visitors.
+ void VisitStatements(ZoneList<Statement*>* statements);
+
+#define DEF_VISIT(type) \
+ void Visit##type(type* node);
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ // Visit a statement and then spill the virtual frame if control flow can
+ // reach the end of the statement (ie, it does not exit via break,
+ // continue, return, or throw). This function is used temporarily while
+ // the code generator is being transformed.
+ void VisitAndSpill(Statement* statement);
+
+ // Visit a list of statements and then spill the virtual frame if control
+ // flow can reach the end of the list.
+ void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
+
+ // Main code generation function
+ void GenCode(FunctionLiteral* fun);
+
+ // The following are used by class Reference.
+ void LoadReference(Reference* ref);
+ void UnloadReference(Reference* ref);
+
+ MemOperand ContextOperand(Register context, int index) const {
+ return MemOperand(context, Context::SlotOffset(index));
+ }
+
+ MemOperand SlotOperand(Slot* slot, Register tmp);
+
+ MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
+ Register tmp,
+ Register tmp2,
+ JumpTarget* slow);
+
+ // Expressions
+ MemOperand GlobalObject() const {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+ }
+
+ void LoadCondition(Expression* x,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_cc);
+ void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void LoadGlobal();
+ void LoadGlobalReceiver(Register scratch);
+
+ // Generate code to push the value of an expression on top of the frame
+ // and then spill the frame fully to memory. This function is used
+ // temporarily while the code generator is being transformed.
+ void LoadAndSpill(Expression* expression,
+ TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+
+ // Call LoadCondition and then spill the virtual frame unless control flow
+ // cannot reach the end of the expression (ie, by emitting only
+ // unconditional jumps to the control targets).
+ void LoadConditionAndSpill(Expression* expression,
+ TypeofState typeof_state,
+ JumpTarget* true_target,
+ JumpTarget* false_target,
+ bool force_control);
+
+ // Read a value from a slot and leave it on top of the expression stack.
+ void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ void LoadFromGlobalSlotCheckExtensions(Slot* slot,
+ TypeofState typeof_state,
+ Register tmp,
+ Register tmp2,
+ JumpTarget* slow);
+
+ // Special code for typeof expressions: Unfortunately, we must
+ // be careful when loading the expression in 'typeof'
+ // expressions. We are not allowed to throw reference errors for
+ // non-existing properties of the global object, so we must make it
+ // look like an explicit property access, instead of an access
+ // through the context chain.
+ void LoadTypeofExpression(Expression* x);
+
+ void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
+
+ void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode);
+ void Comparison(Condition cc, bool strict = false);
+
+ void SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode mode);
+
+ void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+ // Control flow
+ void Branch(bool if_true, JumpTarget* target);
+ void CheckStack();
+
+ struct InlineRuntimeLUT {
+ void (CodeGenerator::*method)(ZoneList<Expression*>*);
+ const char* name;
+ };
+
+ static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+ bool CheckForInlineRuntimeCall(CallRuntime* node);
+ static bool PatchInlineRuntimeEntry(Handle<String> name,
+ const InlineRuntimeLUT& new_entry,
+ InlineRuntimeLUT* old_entry);
+
+ Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
+ void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+ Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
+
+ // Declare global variables and functions in the given array of
+ // name/value pairs.
+ void DeclareGlobals(Handle<FixedArray> pairs);
+
+ // Instantiate the function boilerplate.
+ void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+ // Support for type checks.
+ void GenerateIsSmi(ZoneList<Expression*>* args);
+ void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+ void GenerateIsArray(ZoneList<Expression*>* args);
+
+ // Support for arguments.length and arguments[?].
+ void GenerateArgumentsLength(ZoneList<Expression*>* args);
+ void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+ // Support for accessing the value field of an object (used by Date).
+ void GenerateValueOf(ZoneList<Expression*>* args);
+ void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+ // Fast support for charCodeAt(n).
+ void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+ // Fast support for object equality testing.
+ void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+ void GenerateLog(ZoneList<Expression*>* args);
+
+ // Methods and constants for fast case switch statement support.
+ //
+ // Only allow fast-case switch if the range of labels is at most
+ // this factor times the number of case labels.
+ // Value is derived from comparing the size of code generated by the normal
+ // switch code for Smi-labels to the size of a single pointer. If code
+ // quality increases this number should be decreased to match.
+ static const int kFastSwitchMaxOverheadFactor = 10;
+
+ // Minimal number of switch cases required before we allow jump-table
+ // optimization.
+ static const int kFastSwitchMinCaseCount = 5;
+
+ // The limit of the range of a fast-case switch, as a factor of the number
+ // of cases of the switch. Each platform should return a value that
+ // is optimal compared to the default code generated for a switch statement
+ // on that platform.
+ int FastCaseSwitchMaxOverheadFactor();
+
+ // The minimal number of cases in a switch before the fast-case switch
+ // optimization is enabled. Each platform should return a value that
+ // is optimal compared to the default code generated for a switch statement
+ // on that platform.
+ int FastCaseSwitchMinCaseCount();
+
+ // Allocate a jump table and create code to jump through it.
+ // Should call GenerateFastCaseSwitchCases to generate the code for
+ // all the cases at the appropriate point.
+ void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
+ int min_index,
+ int range,
+ Label* default_label,
+ Vector<Label*> case_targets,
+ Vector<Label> case_labels);
+
+ // Generate the code for cases for the fast case switch.
+ // Called by GenerateFastCaseSwitchJumpTable.
+ void GenerateFastCaseSwitchCases(SwitchStatement* node,
+ Vector<Label> case_labels,
+ VirtualFrame* start_frame);
+
+ // Fast support for constant-Smi switches.
+ void GenerateFastCaseSwitchStatement(SwitchStatement* node,
+ int min_index,
+ int range,
+ int default_index);
+
+ // Fast support for constant-Smi switches. Tests whether switch statement
+ // permits optimization and calls GenerateFastCaseSwitch if it does.
+ // Returns true if the fast-case switch was generated, and false if not.
+ bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
+
+
+ // Methods used to indicate which source code is generated for. Source
+ // positions are collected by the assembler and emitted with the relocation
+ // information.
+ void CodeForFunctionPosition(FunctionLiteral* fun);
+ void CodeForReturnPosition(FunctionLiteral* fun);
+ void CodeForStatementPosition(Node* node);
+ void CodeForSourcePosition(int pos);
+
+#ifdef DEBUG
+ // True if the registers are valid for entry to a block.
+ bool HasValidEntryRegisters();
+#endif
+
+ bool is_eval_; // Tells whether code is generated for eval.
+
+ Handle<Script> script_;
+ List<DeferredCode*> deferred_;
+
+ // Assembler
+ MacroAssembler* masm_; // to generate code
+
+ // Code generation state
+ Scope* scope_;
+ VirtualFrame* frame_;
+ RegisterAllocator* allocator_;
+ Condition cc_reg_;
+ CodeGenState* state_;
+
+ // Jump targets
+ BreakTarget function_return_;
+
+ // True if the function return is shadowed (ie, jumping to the target
+ // function_return_ does not jump to the true function return, but rather
+ // to some unlinking code).
+ bool function_return_is_shadowed_;
+
+ // True when we are in code that expects the virtual frame to be fully
+ // spilled. Some virtual frame function are disabled in DEBUG builds when
+ // called from spilled code, because they do not leave the virtual frame
+ // in a spilled state.
+ bool in_spilled_code_;
+
+ static InlineRuntimeLUT kInlineRuntimeLUT[];
+
+ friend class VirtualFrame;
+ friend class JumpTarget;
+ friend class Reference;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/V8Binding/v8/src/arm/constants-arm.h b/V8Binding/v8/src/arm/constants-arm.h
new file mode 100644
index 0000000..99eab23
--- /dev/null
+++ b/V8Binding/v8/src/arm/constants-arm.h
@@ -0,0 +1,241 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_CONSTANTS_ARM_H_
+#define V8_ARM_CONSTANTS_ARM_H_
+
+namespace assembler {
+namespace arm {
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate ARM instructions.
+//
+// Section references in the code refer to the "ARM Architecture Reference
+// Manual" from July 2005 (available at http://www.arm.com/miscPDFs/14128.pdf)
+//
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+
+typedef unsigned char byte;
+
+// Values for the condition field as defined in section A3.2
+enum Condition {
+ no_condition = -1,
+ EQ = 0, // equal
+ NE = 1, // not equal
+ CS = 2, // carry set/unsigned higher or same
+ CC = 3, // carry clear/unsigned lower
+ MI = 4, // minus/negative
+ PL = 5, // plus/positive or zero
+ VS = 6, // overflow
+ VC = 7, // no overflow
+ HI = 8, // unsigned higher
+ LS = 9, // unsigned lower or same
+ GE = 10, // signed greater than or equal
+ LT = 11, // signed less than
+ GT = 12, // signed greater than
+ LE = 13, // signed less than or equal
+ AL = 14, // always (unconditional)
+ special_condition = 15, // special condition (refer to section A3.2.1)
+ max_condition = 16
+};
+
+
+// Opcodes for Data-processing instructions (instructions with a type 0 and 1)
+// as defined in section A3.4
+enum Opcode {
+ no_operand = -1,
+ AND = 0, // Logical AND
+ EOR = 1, // Logical Exclusive OR
+ SUB = 2, // Subtract
+ RSB = 3, // Reverse Subtract
+ ADD = 4, // Add
+ ADC = 5, // Add with Carry
+ SBC = 6, // Subtract with Carry
+ RSC = 7, // Reverse Subtract with Carry
+ TST = 8, // Test
+ TEQ = 9, // Test Equivalence
+ CMP = 10, // Compare
+ CMN = 11, // Compare Negated
+ ORR = 12, // Logical (inclusive) OR
+ MOV = 13, // Move
+ BIC = 14, // Bit Clear
+ MVN = 15, // Move Not
+ max_operand = 16
+};
+
+
+// Shifter types for Data-processing operands as defined in section A5.1.2.
+enum Shift {
+ no_shift = -1,
+ LSL = 0, // Logical shift left
+ LSR = 1, // Logical shift right
+ ASR = 2, // Arithmetic shift right
+ ROR = 3, // Rotate right
+ max_shift = 4
+};
+
+
+// Special Software Interrupt codes when used in the presence of the ARM
+// simulator.
+enum SoftwareInterruptCodes {
+ // transition to C code
+ call_rt_r5 = 0x10,
+ call_rt_r2 = 0x11,
+ // break point
+ break_point = 0x20,
+ // FP operations. These simulate calling into C for a moment to do fp ops.
+ // They should trash all caller-save registers.
+ simulator_fp_add = 0x21,
+ simulator_fp_sub = 0x22,
+ simulator_fp_mul = 0x23
+};
+
+
+typedef int32_t instr_t;
+
+
+// The class Instr enables access to individual fields defined in the ARM
+// architecture instruction set encoding as described in figure A3-1.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+// Instr* instr = Instr::At(ptr);
+// int type = instr->TypeField();
+// return ((type == 0) || (type == 1)) && instr->HasS();
+// }
+//
+class Instr {
+ public:
+ enum {
+ kInstrSize = 4,
+ kInstrSizeLog2 = 2,
+ kPCReadOffset = 8
+ };
+
+ // Get the raw instruction bits.
+ inline instr_t InstructionBits() const {
+ return *reinterpret_cast<const instr_t*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(instr_t value) {
+ *reinterpret_cast<instr_t*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const {
+ return (InstructionBits() >> nr) & 1;
+ }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+
+ // Accessors for the different named fields used in the ARM encoding.
+ // The naming of these accessor corresponds to figure A3-1.
+ // Generally applicable fields
+ inline Condition ConditionField() const {
+ return static_cast<Condition>(Bits(31, 28));
+ }
+ inline int TypeField() const { return Bits(27, 25); }
+
+ inline int RnField() const { return Bits(19, 16); }
+ inline int RdField() const { return Bits(15, 12); }
+
+ // Fields used in Data processing instructions
+ inline Opcode OpcodeField() const {
+ return static_cast<Opcode>(Bits(24, 21));
+ }
+ inline int SField() const { return Bit(20); }
+ // with register
+ inline int RmField() const { return Bits(3, 0); }
+ inline Shift ShiftField() const { return static_cast<Shift>(Bits(6, 5)); }
+ inline int RegShiftField() const { return Bit(4); }
+ inline int RsField() const { return Bits(11, 8); }
+ inline int ShiftAmountField() const { return Bits(11, 7); }
+ // with immediate
+ inline int RotateField() const { return Bits(11, 8); }
+ inline int Immed8Field() const { return Bits(7, 0); }
+
+ // Fields used in Load/Store instructions
+ inline int PUField() const { return Bits(24, 23); }
+ inline int BField() const { return Bit(22); }
+ inline int WField() const { return Bit(21); }
+ inline int LField() const { return Bit(20); }
+ // with register uses same fields as Data processing instructions above
+ // with immediate
+ inline int Offset12Field() const { return Bits(11, 0); }
+ // multiple
+ inline int RlistField() const { return Bits(15, 0); }
+ // extra loads and stores
+ inline int SignField() const { return Bit(6); }
+ inline int HField() const { return Bit(5); }
+ inline int ImmedHField() const { return Bits(11, 8); }
+ inline int ImmedLField() const { return Bits(3, 0); }
+
+ // Fields used in Branch instructions
+ inline int LinkField() const { return Bit(24); }
+ inline int SImmed24Field() const { return ((InstructionBits() << 8) >> 8); }
+
+ // Fields used in Software interrupt instructions
+ inline SoftwareInterruptCodes SwiField() const {
+ return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
+ }
+
+ // Test for special encodings of type 0 instructions (extra loads and stores,
+ // as well as multiplications).
+ inline bool IsSpecialType0() const { return (Bit(7) == 1) && (Bit(4) == 1); }
+
+ // Special accessors that test for existence of a value.
+ inline bool HasS() const { return SField() == 1; }
+ inline bool HasB() const { return BField() == 1; }
+ inline bool HasW() const { return WField() == 1; }
+ inline bool HasL() const { return LField() == 1; }
+ inline bool HasSign() const { return SignField() == 1; }
+ inline bool HasH() const { return HField() == 1; }
+ inline bool HasLink() const { return LinkField() == 1; }
+
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instr.
+ // Use the At(pc) function to create references to Instr.
+ static Instr* At(byte* pc) { return reinterpret_cast<Instr*>(pc); }
+
+ private:
+ // We need to prevent the creation of instances of class Instr.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instr);
+};
+
+
+} } // namespace assembler::arm
+
+#endif // V8_ARM_CONSTANTS_ARM_H_
diff --git a/V8Binding/v8/src/arm/cpu-arm.cc b/V8Binding/v8/src/arm/cpu-arm.cc
new file mode 100644
index 0000000..71da1ec
--- /dev/null
+++ b/V8Binding/v8/src/arm/cpu-arm.cc
@@ -0,0 +1,125 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// CPU specific code for arm independent of OS goes here.
+#if defined(__arm__)
+#include <sys/syscall.h> // for cache flushing.
+#endif
+
+#include "v8.h"
+
+#include "cpu.h"
+
+namespace v8 {
+namespace internal {
+
+void CPU::Setup() {
+ // Nothing to do.
+}
+
+
+void CPU::FlushICache(void* start, size_t size) {
+#if !defined (__arm__)
+ // Not generating ARM instructions for C-code. This means that we are
+ // building an ARM emulator based target. No I$ flushes are necessary.
+#else
+ // Ideally, we would call
+ // syscall(__ARM_NR_cacheflush, start,
+ // reinterpret_cast<intptr_t>(start) + size, 0);
+ // however, syscall(int, ...) is not supported on all platforms, especially
+ // not when using EABI, so we call the __ARM_NR_cacheflush syscall directly.
+
+ register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start);
+ register uint32_t end asm("a2") =
+ reinterpret_cast<uint32_t>(start) + size;
+ register uint32_t flg asm("a3") = 0;
+ #ifdef __ARM_EABI__
+ register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
+ #if defined (__arm__) && !defined(__thumb__)
+ // __arm__ may be defined in thumb mode.
+ asm volatile(
+ "swi 0x0"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (scno));
+ #else
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ "adr r3, 1f \n\t"
+ "bx r3 \n\t"
+ ".ALIGN 4 \n\t"
+ ".ARM \n"
+ "1: swi 0x0 \n\t"
+ "@ Enter THUMB Mode\n\t"
+ "adr r3, 2f+1 \n\t"
+ "bx r3 \n\t"
+ ".THUMB \n"
+ "2: \n\t"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg), "r" (scno)
+ : "r3");
+ #endif
+ #else
+ #if defined (__arm__) && !defined(__thumb__)
+ // __arm__ may be defined in thumb mode.
+ asm volatile(
+ "swi %1"
+ : "=r" (beg)
+ : "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
+ #else
+ // Do not use the value of __ARM_NR_cacheflush in the inline assembly
+ // below, because the thumb mode value would be used, which would be
+ // wrong, since we switch to ARM mode before executing the swi instruction
+ asm volatile(
+ "@ Enter ARM Mode \n\t"
+ "adr r3, 1f \n\t"
+ "bx r3 \n\t"
+ ".ALIGN 4 \n\t"
+ ".ARM \n"
+ "1: swi 0x9f0002 \n"
+ "@ Enter THUMB Mode\n\t"
+ "adr r3, 2f+1 \n\t"
+ "bx r3 \n\t"
+ ".THUMB \n"
+ "2: \n\t"
+ : "=r" (beg)
+ : "0" (beg), "r" (end), "r" (flg)
+ : "r3");
+ #endif
+ #endif
+#endif
+}
+
+
+void CPU::DebugBreak() {
+#if !defined (__arm__)
+ UNIMPLEMENTED(); // when building ARM emulator target
+#else
+ asm volatile("bkpt 0");
+#endif
+}
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/debug-arm.cc b/V8Binding/v8/src/arm/debug-arm.cc
new file mode 100644
index 0000000..bcfab6c
--- /dev/null
+++ b/V8Binding/v8/src/arm/debug-arm.cc
@@ -0,0 +1,197 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "debug.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// Currently debug break is not supported in frame exit code on ARM.
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+ return false;
+}
+
+
+// Currently debug break is not supported in frame exit code on ARM.
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+ UNIMPLEMENTED();
+}
+
+
+// Currently debug break is not supported in frame exit code on ARM.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+ UNIMPLEMENTED();
+}
+
+
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+ ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+ // Currently debug break is not supported in frame exit code on ARM.
+ return false;
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+ RegList pointer_regs) {
+ // Save the content of all general purpose registers in memory. This copy in
+ // memory is later pushed onto the JS expression stack for the fake JS frame
+ // generated and also to the C frame generated on top of that. In the JS
+ // frame ONLY the registers containing pointers will be pushed on the
+ // expression stack. This causes the GC to update these pointers so that
+ // they will have the correct value when returning from the debugger.
+ __ SaveRegistersToMemory(kJSCallerSaved);
+
+ __ EnterInternalFrame();
+
+ // Store the registers containing object pointers on the expression stack to
+ // make sure that these are correctly updated during GC.
+ // Use sp as base to push.
+ __ CopyRegistersFromMemoryToStack(sp, pointer_regs);
+
+#ifdef DEBUG
+ __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+ __ mov(r0, Operand(0)); // no arguments
+ __ mov(r1, Operand(ExternalReference::debug_break()));
+
+ CEntryDebugBreakStub ceb;
+ __ CallStub(&ceb);
+
+ // Restore the register values containing object pointers from the expression
+ // stack in the reverse order as they where pushed.
+ // Use sp as base to pop.
+ __ CopyRegistersFromStackToMemory(sp, r3, pointer_regs);
+
+ __ LeaveInternalFrame();
+
+ // Inlined ExitJSFrame ends here.
+
+ // Finally restore all registers.
+ __ RestoreRegistersFromMemory(kJSCallerSaved);
+
+ // Now that the break point has been handled, resume normal execution by
+ // jumping to the target address intended by the caller and that was
+ // overwritten by the address of DebugBreakXXX.
+ __ mov(ip, Operand(ExternalReference(Debug_Address::AfterBreakTarget())));
+ __ ldr(ip, MemOperand(ip));
+ __ Jump(ip);
+}
+
+
+void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC load (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers r0 and r2 contain objects that needs to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+}
+
+
+void Debug::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC store (from ic-arm.cc).
+ // ----------- S t a t e -------------
+ // -- r0 : receiver
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ // Registers r0 and r2 contain objects that needs to be pushed on the
+ // expression stack of the fake JS frame.
+ Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit());
+}
+
+
+void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+ // Keyed load IC not implemented on ARM.
+}
+
+
+void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+ // Keyed store IC not implemented on ARM.
+}
+
+
+void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
+ // Calling convention for IC call (from ic-arm.cc)
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments
+ // -- r1: receiver
+ // -- lr: return address
+ // -----------------------------------
+ // Register r1 contains an object that needs to be pushed on the expression
+ // stack of the fake JS frame. r0 is the actual number of arguments not
+ // encoded as a smi, therefore it cannot be on the expression stack of the
+ // fake JS frame as it can easily be an invalid pointer (e.g. 1). r0 will be
+ // pushed on the stack of the C frame and restored from there.
+ Generate_DebugBreakCallHelper(masm, r1.bit());
+}
+
+
+void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, r0.bit());
+}
+
+
+void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
+ // In places other than IC call sites it is expected that r0 is TOS which
+ // is an object - this is not generally the case so this should be used with
+ // care.
+ Generate_DebugBreakCallHelper(masm, r0.bit());
+}
+
+
+void Debug::GenerateReturnDebugBreakEntry(MacroAssembler* masm) {
+ // Generate nothing as this handling of debug break return is not done this
+ // way on ARM - yet.
+}
+
+
+void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
+ // Generate nothing as CodeStub CallFunction is not used on ARM.
+}
+
+
+#undef __
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/disasm-arm.cc b/V8Binding/v8/src/arm/disasm-arm.cc
new file mode 100644
index 0000000..f56a599
--- /dev/null
+++ b/V8Binding/v8/src/arm/disasm-arm.cc
@@ -0,0 +1,901 @@
+// Copyright 2007-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+// NameConverter converter;
+// Disassembler d(converter);
+// for (byte* pc = begin; pc < end;) {
+// char buffer[128];
+// buffer[0] = '\0';
+// byte* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+// printf("%p %08x %s\n",
+// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+// }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#ifndef WIN32
+#include <stdint.h>
+#endif
+
+#include "v8.h"
+
+#include "disasm.h"
+#include "macro-assembler.h"
+#include "platform.h"
+
+
+namespace assembler {
+namespace arm {
+
+namespace v8i = v8::internal;
+
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter,
+ v8::internal::Vector<char> out_buffer)
+ : converter_(converter),
+ out_buffer_(out_buffer),
+ out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte* instruction);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintCondition(Instr* instr);
+ void PrintShiftRm(Instr* instr);
+ void PrintShiftImm(Instr* instr);
+ void PrintPU(Instr* instr);
+ void PrintSoftwareInterrupt(SoftwareInterruptCodes swi);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instr* instr, const char* option);
+ int FormatOption(Instr* instr, const char* option);
+ void Format(Instr* instr, const char* format);
+ void Unknown(Instr* instr);
+
+ // Each of these functions decodes one particular instruction type, a 3-bit
+ // field in the instruction encoding.
+ // Types 0 and 1 are combined as they are largely the same except for the way
+ // they interpret the shifter operand.
+ void DecodeType01(Instr* instr);
+ void DecodeType2(Instr* instr);
+ void DecodeType3(Instr* instr);
+ void DecodeType4(Instr* instr);
+ void DecodeType5(Instr* instr);
+ void DecodeType6(Instr* instr);
+ void DecodeType7(Instr* instr);
+
+ const disasm::NameConverter& converter_;
+ v8::internal::Vector<char> out_buffer_;
+ int out_buffer_pos_;
+
+ DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) {
+ out_buffer_[out_buffer_pos_++] = ch;
+}
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// These condition names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+static const char* cond_names[max_condition] = {
+ "eq", "ne", "cs" , "cc" , "mi" , "pl" , "vs" , "vc" ,
+ "hi", "ls", "ge", "lt", "gt", "le", "", "invalid",
+};
+
+
+// Print the condition guarding the instruction.
+void Decoder::PrintCondition(Instr* instr) {
+ Print(cond_names[instr->ConditionField()]);
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+
+// These shift names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+static const char* shift_names[max_shift] = {
+ "lsl", "lsr", "asr", "ror"
+};
+
+
+// Print the register shift operands for the instruction. Generally used for
+// data processing instructions.
+void Decoder::PrintShiftRm(Instr* instr) {
+ Shift shift = instr->ShiftField();
+ int shift_amount = instr->ShiftAmountField();
+ int rm = instr->RmField();
+
+ PrintRegister(rm);
+
+ if ((instr->RegShiftField() == 0) && (shift == LSL) && (shift_amount == 0)) {
+ // Special case for using rm only.
+ return;
+ }
+ if (instr->RegShiftField() == 0) {
+ // by immediate
+ if ((shift == ROR) && (shift_amount == 0)) {
+ Print(", RRX");
+ return;
+ } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
+ shift_amount = 32;
+ }
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s #%d",
+ shift_names[shift], shift_amount);
+ } else {
+ // by register
+ int rs = instr->RsField();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ ", %s ", shift_names[shift]);
+ PrintRegister(rs);
+ }
+}
+
+
+// Print the immediate operand for the instruction. Generally used for data
+// processing instructions.
+void Decoder::PrintShiftImm(Instr* instr) {
+ int rotate = instr->RotateField() * 2;
+ int immed8 = instr->Immed8Field();
+ int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d", imm);
+}
+
+
+// Print PU formatting to reduce complexity of FormatOption.
+void Decoder::PrintPU(Instr* instr) {
+ switch (instr->PUField()) {
+ case 0: {
+ Print("da");
+ break;
+ }
+ case 1: {
+ Print("ia");
+ break;
+ }
+ case 2: {
+ Print("db");
+ break;
+ }
+ case 3: {
+ Print("ib");
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
+// the FormatOption method.
+void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes swi) {
+ switch (swi) {
+ case call_rt_r5:
+ Print("call_rt_r5");
+ return;
+ case call_rt_r2:
+ Print("call_rt_r2");
+ return;
+ case break_point:
+ Print("break_point");
+ return;
+ case simulator_fp_add:
+ Print("simulator_fp_add");
+ return;
+ case simulator_fp_mul:
+ Print("simulator_fp_mul");
+ return;
+ case simulator_fp_sub:
+ Print("simulator_fp_sub");
+ return;
+ default:
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d",
+ swi);
+ return;
+ }
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instr* instr, const char* format) {
+ ASSERT(format[0] == 'r');
+ if (format[1] == 'n') { // 'rn: Rn register
+ int reg = instr->RnField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'rd: Rd register
+ int reg = instr->RdField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 's') { // 'rs: Rs register
+ int reg = instr->RsField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'm') { // 'rm: Rm register
+ int reg = instr->RmField();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'l') {
+ // 'rlist: register list for load and store multiple instructions
+ ASSERT(STRING_STARTS_WITH(format, "rlist"));
+ int rlist = instr->RlistField();
+ int reg = 0;
+ Print("{");
+ // Print register list in ascending order, by scanning the bit mask.
+ while (rlist != 0) {
+ if ((rlist & 1) != 0) {
+ PrintRegister(reg);
+ if ((rlist >> 1) != 0) {
+ Print(", ");
+ }
+ }
+ reg++;
+ rlist >>= 1;
+ }
+ Print("}");
+ return 5;
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instr* instr, const char* format) {
+ switch (format[0]) {
+ case 'a': { // 'a: accumulate multiplies
+ if (instr->Bit(21) == 0) {
+ Print("ul");
+ } else {
+ Print("la");
+ }
+ return 1;
+ }
+ case 'b': { // 'b: byte loads or stores
+ if (instr->HasB()) {
+ Print("b");
+ }
+ return 1;
+ }
+ case 'c': { // 'cond: conditional execution
+ ASSERT(STRING_STARTS_WITH(format, "cond"));
+ PrintCondition(instr);
+ return 4;
+ }
+ case 'h': { // 'h: halfword operation for extra loads and stores
+ if (instr->HasH()) {
+ Print("h");
+ } else {
+ Print("b");
+ }
+ return 1;
+ }
+ case 'l': { // 'l: branch and link
+ if (instr->HasLink()) {
+ Print("l");
+ }
+ return 1;
+ }
+ case 'm': {
+ if (format[1] == 'e') { // 'memop: load/store instructions
+ ASSERT(STRING_STARTS_WITH(format, "memop"));
+ if (instr->HasL()) {
+ Print("ldr");
+ } else {
+ Print("str");
+ }
+ return 5;
+ }
+ // 'msg: for simulator break instructions
+ ASSERT(STRING_STARTS_WITH(format, "msg"));
+ byte* str =
+ reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%s", converter_.NameInCode(str));
+ return 3;
+ }
+ case 'o': {
+ if (format[3] == '1') {
+ // 'off12: 12-bit offset for load and store instructions
+ ASSERT(STRING_STARTS_WITH(format, "off12"));
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", instr->Offset12Field());
+ return 5;
+ }
+ // 'off8: 8-bit offset for extra load and store instructions
+ ASSERT(STRING_STARTS_WITH(format, "off8"));
+ int offs8 = (instr->ImmedHField() << 4) | instr->ImmedLField();
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%d", offs8);
+ return 4;
+ }
+ case 'p': { // 'pu: P and U bits for load and store instructions
+ ASSERT(STRING_STARTS_WITH(format, "pu"));
+ PrintPU(instr);
+ return 2;
+ }
+ case 'r': {
+ return FormatRegister(instr, format);
+ }
+ case 's': {
+ if (format[1] == 'h') { // 'shift_op or 'shift_rm
+ if (format[6] == 'o') { // 'shift_op
+ ASSERT(STRING_STARTS_WITH(format, "shift_op"));
+ if (instr->TypeField() == 0) {
+ PrintShiftRm(instr);
+ } else {
+ ASSERT(instr->TypeField() == 1);
+ PrintShiftImm(instr);
+ }
+ return 8;
+ } else { // 'shift_rm
+ ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
+ PrintShiftRm(instr);
+ return 8;
+ }
+ } else if (format[1] == 'w') { // 'swi
+ ASSERT(STRING_STARTS_WITH(format, "swi"));
+ PrintSoftwareInterrupt(instr->SwiField());
+ return 3;
+ } else if (format[1] == 'i') { // 'sign: signed extra loads and stores
+ ASSERT(STRING_STARTS_WITH(format, "sign"));
+ if (instr->HasSign()) {
+ Print("s");
+ }
+ return 4;
+ }
+ // 's: S field of data processing instructions
+ if (instr->HasS()) {
+ Print("s");
+ }
+ return 1;
+ }
+ case 't': { // 'target: target of branch instructions
+ ASSERT(STRING_STARTS_WITH(format, "target"));
+ int off = (instr->SImmed24Field() << 2) + 8;
+ out_buffer_pos_ += v8i::OS::SNPrintF(
+ out_buffer_ + out_buffer_pos_,
+ "%+d -> %s",
+ off,
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+ return 6;
+ }
+ case 'u': { // 'u: signed or unsigned multiplies
+ if (instr->Bit(22) == 0) {
+ Print("u");
+ } else {
+ Print("s");
+ }
+ return 1;
+ }
+ case 'w': { // 'w: W field of load and store instructions
+ if (instr->HasW()) {
+ Print("!");
+ }
+ return 1;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ UNREACHABLE();
+ return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instr* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instr* instr) {
+ Format(instr, "unknown");
+}
+
+
+void Decoder::DecodeType01(Instr* instr) {
+ int type = instr->TypeField();
+ if ((type == 0) && instr->IsSpecialType0()) {
+ // multiply instruction or extra loads and stores
+ if (instr->Bits(7, 4) == 9) {
+ if (instr->Bit(24) == 0) {
+ // multiply instructions
+ if (instr->Bit(23) == 0) {
+ if (instr->Bit(21) == 0) {
+ Format(instr, "mul'cond's 'rd, 'rm, 'rs");
+ } else {
+ Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn");
+ }
+ } else {
+ Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm");
+ }
+ } else {
+ Unknown(instr); // not used by V8
+ }
+ } else {
+ // extra load/store instructions
+ switch (instr->PUField()) {
+ case 0: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
+ }
+ break;
+ }
+ case 1: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
+ }
+ break;
+ }
+ case 3: {
+ if (instr->Bit(22) == 0) {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
+ } else {
+ Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ return;
+ }
+ } else {
+ switch (instr->OpcodeField()) {
+ case AND: {
+ Format(instr, "and'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case EOR: {
+ Format(instr, "eor'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case SUB: {
+ Format(instr, "sub'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case RSB: {
+ Format(instr, "rsb'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case ADD: {
+ Format(instr, "add'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case ADC: {
+ Format(instr, "adc'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case SBC: {
+ Format(instr, "sbc'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case RSC: {
+ Format(instr, "rsc'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case TST: {
+ if (instr->HasS()) {
+ Format(instr, "tst'cond 'rn, 'shift_op");
+ } else {
+ Unknown(instr); // not used by V8
+ }
+ break;
+ }
+ case TEQ: {
+ if (instr->HasS()) {
+ Format(instr, "teq'cond 'rn, 'shift_op");
+ } else {
+ Unknown(instr); // not used by V8
+ }
+ break;
+ }
+ case CMP: {
+ if (instr->HasS()) {
+ Format(instr, "cmp'cond 'rn, 'shift_op");
+ } else {
+ Unknown(instr); // not used by V8
+ }
+ break;
+ }
+ case CMN: {
+ if (instr->HasS()) {
+ Format(instr, "cmn'cond 'rn, 'shift_op");
+ } else {
+ Unknown(instr); // not used by V8
+ }
+ break;
+ }
+ case ORR: {
+ Format(instr, "orr'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case MOV: {
+ Format(instr, "mov'cond's 'rd, 'shift_op");
+ break;
+ }
+ case BIC: {
+ Format(instr, "bic'cond's 'rd, 'rn, 'shift_op");
+ break;
+ }
+ case MVN: {
+ Format(instr, "mvn'cond's 'rd, 'shift_op");
+ break;
+ }
+ default: {
+ // The Opcode field is a 4-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeType2(Instr* instr) {
+ switch (instr->PUField()) {
+ case 0: {
+ if (instr->HasW()) {
+ Unknown(instr); // not used in V8
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
+ break;
+ }
+ case 1: {
+ if (instr->HasW()) {
+ Unknown(instr); // not used in V8
+ }
+ Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
+ break;
+ }
+ case 2: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+ break;
+ }
+ case 3: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeType3(Instr* instr) {
+ switch (instr->PUField()) {
+ case 0: {
+ ASSERT(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
+ break;
+ }
+ case 1: {
+ ASSERT(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ break;
+ }
+ case 2: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
+ break;
+ }
+ case 3: {
+ Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeType4(Instr* instr) {
+ ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported.
+ if (instr->HasL()) {
+ Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+ } else {
+ Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+ }
+}
+
+
+void Decoder::DecodeType5(Instr* instr) {
+ Format(instr, "b'l'cond 'target");
+}
+
+
+void Decoder::DecodeType6(Instr* instr) {
+ // Coprocessor instructions currently not supported.
+ Unknown(instr);
+}
+
+
+void Decoder::DecodeType7(Instr* instr) {
+ if (instr->Bit(24) == 1) {
+ Format(instr, "swi'cond 'swi");
+ } else {
+ // Coprocessor instructions currently not supported.
+ Unknown(instr);
+ }
+}
+
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instr* instr = Instr::At(instr_ptr);
+ // Print raw instruction bytes.
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ",
+ instr->InstructionBits());
+ if (instr->ConditionField() == special_condition) {
+ Format(instr, "break 'msg");
+ return Instr::kInstrSize;
+ }
+ switch (instr->TypeField()) {
+ case 0:
+ case 1: {
+ DecodeType01(instr);
+ break;
+ }
+ case 2: {
+ DecodeType2(instr);
+ break;
+ }
+ case 3: {
+ DecodeType3(instr);
+ break;
+ }
+ case 4: {
+ DecodeType4(instr);
+ break;
+ }
+ case 5: {
+ DecodeType5(instr);
+ break;
+ }
+ case 6: {
+ DecodeType6(instr);
+ break;
+ }
+ case 7: {
+ DecodeType7(instr);
+ break;
+ }
+ default: {
+ // The type field is 3-bits in the ARM encoding.
+ UNREACHABLE();
+ break;
+ }
+ }
+ return Instr::kInstrSize;
+}
+
+
+} } // namespace assembler::arm
+
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+namespace v8i = v8::internal;
+
+
+static const int kMaxRegisters = 16;
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+static const char* reg_names[kMaxRegisters] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "sl", "fp", "ip", "sp", "lr", "pc",
+};
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
+ v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
+ return tmp_buffer.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ const char* result;
+ if ((0 <= reg) && (reg < kMaxRegisters)) {
+ result = reg_names[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE(); // ARM does not have the concept of a byte register
+ return "nobytereg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ UNREACHABLE(); // ARM does not have any XMM registers
+ return "noxmmreg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+ : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+ byte* instruction) {
+ assembler::arm::Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
+ int instruction_bits = *(reinterpret_cast<int*>(instruction));
+ if ((instruction_bits & 0xfff00000) == 0x03000000) {
+ return instruction_bits & 0x0000ffff;
+ } else {
+ return -1;
+ }
+}
+
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+ NameConverter converter;
+ Disassembler d(converter);
+ for (byte* pc = begin; pc < end;) {
+ v8::internal::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ fprintf(f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ }
+}
+
+
+} // namespace disasm
diff --git a/V8Binding/v8/src/arm/frames-arm.cc b/V8Binding/v8/src/arm/frames-arm.cc
new file mode 100644
index 0000000..6fde4b7
--- /dev/null
+++ b/V8Binding/v8/src/arm/frames-arm.cc
@@ -0,0 +1,118 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "frames-inl.h"
+#include "arm/assembler-arm-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+StackFrame::Type StackFrame::ComputeType(State* state) {
+ ASSERT(state->fp != NULL);
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ return ARGUMENTS_ADAPTOR;
+ }
+ // The marker and function offsets overlap. If the marker isn't a
+ // smi then the frame is a JavaScript frame -- and the marker is
+ // really the function.
+ const int offset = StandardFrameConstants::kMarkerOffset;
+ Object* marker = Memory::Object_at(state->fp + offset);
+ if (!marker->IsSmi()) return JAVA_SCRIPT;
+ return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
+}
+
+
+StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
+ if (fp == 0) return NONE;
+ // Compute frame type and stack pointer.
+ Address sp = fp + ExitFrameConstants::kSPDisplacement;
+ Type type;
+ if (Memory::Address_at(fp + ExitFrameConstants::kDebugMarkOffset) != 0) {
+ type = EXIT_DEBUG;
+ sp -= kNumJSCallerSaved * kPointerSize;
+ } else {
+ type = EXIT;
+ }
+ // Fill in the state.
+ state->sp = sp;
+ state->fp = fp;
+ state->pc_address = reinterpret_cast<Address*>(sp - 1 * kPointerSize);
+ return type;
+}
+
+
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+ // Do nothing
+}
+
+
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+ int arguments;
+ if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
+ // The arguments for cooked frames are traversed as if they were
+ // expression stack elements of the calling frame. The reason for
+ // this rather strange decision is that we cannot access the
+ // function during mark-compact GCs when the stack is cooked.
+ // In fact accessing heap objects (like function->shared() below)
+ // at all during GC is problematic.
+ arguments = 0;
+ } else {
+ // Compute the number of arguments by getting the number of formal
+ // parameters of the function. We must remember to take the
+ // receiver into account (+1).
+ JSFunction* function = JSFunction::cast(this->function());
+ arguments = function->shared()->formal_parameter_count() + 1;
+ }
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments * kPointerSize);
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ const int arguments = Smi::cast(GetExpression(0))->value();
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+ // Internal frames have no arguments. The stack pointer of the
+ // caller is at a fixed offset from the frame pointer.
+ return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/frames-arm.h b/V8Binding/v8/src/arm/frames-arm.h
new file mode 100644
index 0000000..a67b18a
--- /dev/null
+++ b/V8Binding/v8/src/arm/frames-arm.h
@@ -0,0 +1,380 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_FRAMES_ARM_H_
+#define V8_ARM_FRAMES_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+
+// The ARM ABI does not specify the usage of register r9, which may be reserved
+// as the static base or thread register on some platforms, in which case we
+// leave it alone. Adjust the value of kR9Available accordingly:
+static const int kR9Available = 1; // 1 if available to us, 0 if reserved
+
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+static const int kNumRegs = 16;
+
+
+// Caller-saved/arguments registers
+static const RegList kJSCallerSaved =
+ 1 << 0 | // r0 a1
+ 1 << 1 | // r1 a2
+ 1 << 2 | // r2 a3
+ 1 << 3; // r3 a4
+
+static const int kNumJSCallerSaved = 4;
+
+typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers preserved when switching from C to JavaScript
+static const RegList kCalleeSaved =
+ 1 << 4 | // r4 v1
+ 1 << 5 | // r5 v2
+ 1 << 6 | // r6 v3
+ 1 << 7 | // r7 v4
+ 1 << 8 | // r8 v5 (cp in JavaScript code)
+ kR9Available
+ << 9 | // r9 v6
+ 1 << 10 | // r10 v7 (pp in JavaScript code)
+ 1 << 11; // r11 v8 (fp in JavaScript code)
+
+static const int kNumCalleeSaved = 7 + kR9Available;
+
+
+// ----------------------------------------------------
+
+
+class StackHandlerConstants : public AllStatic {
+ public:
+ // TODO(1233780): Get rid of the code slot in stack handlers.
+ static const int kCodeOffset = 0 * kPointerSize;
+ static const int kNextOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kPPOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
+ static const int kPCOffset = 5 * kPointerSize;
+
+ static const int kAddressDisplacement = -1 * kPointerSize;
+ static const int kSize = kPCOffset + kPointerSize;
+};
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ static const int kCallerFPOffset = -3 * kPointerSize;
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+ // Exit frames have a debug marker on the stack.
+ static const int kSPDisplacement = -1 * kPointerSize;
+
+ // The debug marker is just above the frame pointer.
+ static const int kDebugMarkOffset = -1 * kPointerSize;
+
+ static const int kSavedRegistersOffset = 0 * kPointerSize;
+
+ // Let the parameters pointer for exit frames point just below the
+ // frame structure on the stack.
+ static const int kPPDisplacement = 3 * kPointerSize;
+
+ // The caller fields are below the frame pointer on the stack.
+ static const int kCallerFPOffset = +0 * kPointerSize;
+ static const int kCallerPPOffset = +1 * kPointerSize;
+ static const int kCallerPCOffset = +2 * kPointerSize;
+};
+
+
+class StandardFrameConstants : public AllStatic {
+ public:
+ static const int kExpressionsOffset = -3 * kPointerSize;
+ static const int kMarkerOffset = -2 * kPointerSize;
+ static const int kContextOffset = -1 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ static const int kCallerPCOffset = +1 * kPointerSize;
+ static const int kCallerSPOffset = +2 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+ // FP-relative.
+ static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+ static const int kSavedRegistersOffset = +2 * kPointerSize;
+ static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+ // PP-relative.
+ static const int kParam0Offset = -2 * kPointerSize;
+ static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+ static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+ static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+ const int offset = JavaScriptFrameConstants::kFunctionOffset;
+ return Memory::Object_at(fp() + offset);
+}
+
+
+// ----------------------------------------------------
+
+
+
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // | JS frame |
+ // | |
+ // | |
+ // ----------- +=============+ <--- sp (stack pointer)
+ // | function |
+ // +-------------+
+ // +-------------+
+ // | |
+ // | expressions |
+ // | |
+ // +-------------+
+ // | |
+ // a | locals |
+ // c | |
+ // t +- - - - - - -+ <---
+ // i -4 | local0 | ^
+ // v +-------------+ |
+ // a -3 | code | |
+ // t +-------------+ | kLocal0Offset
+ // i -2 | context | |
+ // o +-------------+ |
+ // n -1 | args_length | v
+ // +-------------+ <--- fp (frame pointer)
+ // 0 | caller_pp |
+ // f +-------------+
+ // r 1 | caller_fp |
+ // a +-------------+
+ // m 2 | sp_on_exit | (pp if return, caller_sp if no return)
+ // e +-------------+
+ // 3 | caller_pc |
+ // +-------------+ <--- caller_sp (incl. parameters)
+ // | |
+ // | parameters |
+ // | |
+ // +- - - - - - -+ <---
+ // -2 | parameter0 | ^
+ // +-------------+ | kParam0Offset
+ // -1 | receiver | v
+ // ----------- +=============+ <--- pp (parameter pointer, r10)
+ // 0 | function |
+ // +-------------+
+ // | |
+ // |caller-saved | (must be valid JS values, traversed during GC)
+ // | regs |
+ // | |
+ // +-------------+
+ // | |
+ // | caller |
+ // higher | expressions |
+ // addresses | |
+ // | |
+ // | JS frame |
+
+
+
+ // Handler frames (part of expressions of JS frames):
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // h | expressions |
+ // a | |
+ // n +-------------+
+ // d -1 | code |
+ // l +-------------+ <--- handler sp
+ // e 0 | next_sp | link to next handler (next handler's sp)
+ // r +-------------+
+ // 1 | state |
+ // f +-------------+
+ // r 2 | pp |
+ // a +-------------+
+ // m 3 | fp |
+ // e +-------------+
+ // 4 | pc |
+ // +-------------+
+ // | |
+ // higher | expressions |
+ // addresses | |
+
+
+
+ // JS entry frames: When calling from C to JS, we construct two extra
+ // frames: An entry frame (C) and a trampoline frame (JS). The
+ // following pictures shows the two frames:
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // | JS frame |
+ // | |
+ // | |
+ // ----------- +=============+ <--- sp (stack pointer)
+ // | |
+ // | parameters |
+ // t | |
+ // r +- - - - - - -+
+ // a | parameter0 |
+ // m +-------------+
+ // p | receiver |
+ // o +-------------+
+ // l | function |
+ // i +-------------+
+ // n -3 | code |
+ // e +-------------+
+ // -2 | NULL | context is always NULL
+ // +-------------+
+ // f -1 | 0 | args_length is always zero
+ // r +-------------+ <--- fp (frame pointer)
+ // a 0 | NULL | caller pp is always NULL for entries
+ // m +-------------+
+ // e 1 | caller_fp |
+ // +-------------+
+ // 2 | sp_on_exit | (caller_sp)
+ // +-------------+
+ // 3 | caller_pc |
+ // ----------- +=============+ <--- caller_sp == pp
+ // . ^
+ // . | try-handler, fake, not GC'ed
+ // . v
+ // +-------------+ <---
+ // -2 | next top pp |
+ // +-------------+
+ // -1 | next top fp |
+ // +-------------+ <--- fp
+ // | r4 | r4-r9 holding non-JS values must be preserved
+ // +-------------+
+ // J | r5 | before being initialized not to confuse GC
+ // S +-------------+
+ // | r6 |
+ // +-------------+
+ // e | r7 |
+ // n +-------------+
+ // t | r8 |
+ // r +-------------+
+ // y [ | r9 | ] only if r9 available
+ // +-------------+
+ // | r10 |
+ // f +-------------+
+ // r | r11 |
+ // a +-------------+
+ // m | caller_sp |
+ // e +-------------+
+ // | caller_pc |
+ // +-------------+ <--- caller_sp
+ // | argv | passed on stack from C code
+ // +-------------+
+ // | |
+ // higher | |
+ // addresses | C frame |
+
+
+ // The first 4 args are passed from C in r0-r3 and are not spilled on entry:
+ // r0: code entry
+ // r1: function
+ // r2: receiver
+ // r3: argc
+ // [sp+0]: argv
+
+
+ // C entry frames: When calling from JS to C, we construct one extra
+ // frame:
+
+ // lower | Stack |
+ // addresses | ^ |
+ // | | |
+ // | |
+ // | C frame |
+ // | |
+ // | |
+ // ----------- +=============+ <--- sp (stack pointer)
+ // | |
+ // | parameters | (first 4 args are passed in r0-r3)
+ // | |
+ // +-------------+ <--- fp (frame pointer)
+ // f 4/5 | caller_fp |
+ // r +-------------+
+ // a 5/6 | sp_on_exit | (pp)
+ // m +-------------+
+ // e 6/7 | caller_pc |
+ // +-------------+ <--- caller_sp (incl. parameters)
+ // 7/8 | |
+ // | parameters |
+ // | |
+ // +- - - - - - -+ <---
+ // -2 | parameter0 | ^
+ // +-------------+ | kParam0Offset
+ // -1 | receiver | v
+ // ----------- +=============+ <--- pp (parameter pointer, r10)
+ // 0 | function |
+ // +-------------+
+ // | |
+ // |caller-saved |
+ // | regs |
+ // | |
+ // +-------------+
+ // | |
+ // | caller |
+ // | expressions |
+ // | |
+ // higher | |
+ // addresses | JS frame |
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/V8Binding/v8/src/arm/ic-arm.cc b/V8Binding/v8/src/arm/ic-arm.cc
new file mode 100644
index 0000000..9b45c46
--- /dev/null
+++ b/V8Binding/v8/src/arm/ic-arm.cc
@@ -0,0 +1,807 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "ic-inl.h"
+#include "runtime.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+// Helper function used from LoadIC/CallIC GenerateNormal.
+static void GenerateDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register t0,
+ Register t1) {
+ // Register use:
+ //
+ // t0 - used to hold the property dictionary.
+ //
+ // t1 - initially the receiver
+ // - used for the index into the property dictionary
+ // - holds the result on exit.
+ //
+ // r3 - used as temporary and to hold the capacity of the property
+ // dictionary.
+ //
+ // r2 - holds the name of the property and is unchanges.
+
+ Label done;
+
+ // Check for the absence of an interceptor.
+ // Load the map into t0.
+ __ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
+ // Test the has_named_interceptor bit in the map.
+ __ ldr(t0, FieldMemOperand(t1, Map::kInstanceAttributesOffset));
+ __ tst(t0, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8))));
+ // Jump to miss if the interceptor bit is set.
+ __ b(ne, miss);
+
+
+ // Check that the properties array is a dictionary.
+ __ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
+ __ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Factory::hash_table_map()));
+ __ b(ne, miss);
+
+ // Compute the capacity mask.
+ const int kCapacityOffset =
+ Array::kHeaderSize + Dictionary::kCapacityIndex * kPointerSize;
+ __ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
+ __ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int
+ __ sub(r3, r3, Operand(1));
+
+ const int kElementsStartOffset =
+ Array::kHeaderSize + Dictionary::kElementsStartIndex * kPointerSize;
+
+ // Generate an unrolled loop that performs a few probes before
+ // giving up. Measurements done on Gmail indicate that 2 probes
+ // cover ~93% of loads from dictionaries.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Compute the masked index: (hash + i + i * i) & mask.
+ __ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
+ __ mov(t1, Operand(t1, LSR, String::kHashShift));
+ if (i > 0) {
+ __ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
+ }
+ __ and_(t1, t1, Operand(r3));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(Dictionary::kElementSize == 3);
+ __ add(t1, t1, Operand(t1, LSL, 1)); // t1 = t1 * 3
+
+ // Check if the key is identical to the name.
+ __ add(t1, t0, Operand(t1, LSL, 2));
+ __ ldr(ip, FieldMemOperand(t1, kElementsStartOffset));
+ __ cmp(r2, Operand(ip));
+ if (i != kProbes - 1) {
+ __ b(eq, &done);
+ } else {
+ __ b(ne, miss);
+ }
+ }
+
+ // Check that the value is a normal property.
+ __ bind(&done); // t1 == t0 + 4*index
+ __ ldr(r3, FieldMemOperand(t1, kElementsStartOffset + 2 * kPointerSize));
+ __ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ b(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ __ ldr(t1, FieldMemOperand(t1, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used to check that a value is either not an object
+// or is loaded if it is an object.
+static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm,
+ Label* miss,
+ Register value,
+ Register scratch) {
+ Label done;
+ // Check if the value is a Smi.
+ __ tst(value, Operand(kSmiTagMask));
+ __ b(eq, &done);
+ // Check if the object has been loaded.
+ __ ldr(scratch, FieldMemOperand(value, JSObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+ __ tst(scratch, Operand(1 << Map::kNeedsLoading));
+ __ b(ne, miss);
+ __ bind(&done);
+}
+
+
+void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ Label miss;
+
+ __ ldr(r0, MemOperand(sp, 0));
+
+ StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ ldr(r0, MemOperand(sp, 0));
+
+ StubCompiler::GenerateLoadStringLength2(masm, r0, r1, r3, &miss);
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
+}
+
+
+void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ // NOTE: Right now, this code always misses on ARM which is
+ // sub-optimal. We should port the fast case code from IA-32.
+
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+// Defined in ic.cc.
+Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- lr: return address
+ // -----------------------------------
+ Label number, non_number, non_string, boolean, probe, miss;
+
+ // Get the receiver of the function from the stack into r1.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+ // Get the name of the function from the stack; 1 ~ receiver.
+ __ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
+
+ // Probe the stub cache.
+ Code::Flags flags =
+ Code::ComputeFlags(Code::CALL_IC, NOT_IN_LOOP, MONOMORPHIC, NORMAL, argc);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3);
+
+ // If the stub cache probing failed, the receiver might be a value.
+ // For value objects, we use the map of the prototype objects for
+ // the corresponding JSValue for the cache and that is what we need
+ // to probe.
+ //
+ // Check for number.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &number);
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ cmp(r3, Operand(HEAP_NUMBER_TYPE));
+ __ b(ne, &non_number);
+ __ bind(&number);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::NUMBER_FUNCTION_INDEX, r1);
+ __ b(&probe);
+
+ // Check for string.
+ __ bind(&non_number);
+ __ cmp(r3, Operand(FIRST_NONSTRING_TYPE));
+ __ b(hs, &non_string);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::STRING_FUNCTION_INDEX, r1);
+ __ b(&probe);
+
+ // Check for boolean.
+ __ bind(&non_string);
+ __ cmp(r1, Operand(Factory::true_value()));
+ __ b(eq, &boolean);
+ __ cmp(r1, Operand(Factory::false_value()));
+ __ b(ne, &miss);
+ __ bind(&boolean);
+ StubCompiler::GenerateLoadGlobalFunctionPrototype(
+ masm, Context::BOOLEAN_FUNCTION_INDEX, r1);
+
+ // Probe the stub cache for the value object.
+ __ bind(&probe);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+static void GenerateNormalHelper(MacroAssembler* masm,
+ int argc,
+ bool is_global_object,
+ Label* miss) {
+ // Search dictionary - put result in register r1.
+ GenerateDictionaryLoad(masm, miss, r0, r1);
+
+ // Check that the value isn't a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, miss);
+
+ // Check that the value is a JSFunction.
+ __ ldr(r0, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+ __ cmp(r0, Operand(JS_FUNCTION_TYPE));
+ __ b(ne, miss);
+
+ // Check that the function has been loaded.
+ __ ldr(r0, FieldMemOperand(r1, JSObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r0, Map::kBitField2Offset));
+ __ tst(r0, Operand(1 << Map::kNeedsLoading));
+ __ b(ne, miss);
+
+ // Patch the receiver with the global proxy if necessary.
+ if (is_global_object) {
+ __ ldr(r2, MemOperand(sp, argc * kPointerSize));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ str(r2, MemOperand(sp, argc * kPointerSize));
+ }
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+}
+
+
+void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ // ----------- S t a t e -------------
+ // -- lr: return address
+ // -----------------------------------
+
+ Label miss, global_object, non_global_object;
+
+ // Get the receiver of the function from the stack into r1.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+ // Get the name of the function from the stack; 1 ~ receiver.
+ __ ldr(r2, MemOperand(sp, (argc + 1) * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the receiver is a valid JS object.
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r0, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ cmp(r0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, &miss);
+
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object.
+ __ cmp(r0, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ b(eq, &global_object);
+ __ cmp(r0, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ b(ne, &non_global_object);
+
+ // Accessing global object: Load and invoke.
+ __ bind(&global_object);
+ // Check that the global object does not require access checks.
+ __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &miss);
+ GenerateNormalHelper(masm, argc, true, &miss);
+
+ // Accessing non-global object: Check for access to global proxy.
+ Label global_proxy, invoke;
+ __ bind(&non_global_object);
+ __ cmp(r0, Operand(JS_GLOBAL_PROXY_TYPE));
+ __ b(eq, &global_proxy);
+ // Check that the non-global, non-global-proxy object does not
+ // require access checks.
+ __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &miss);
+ __ bind(&invoke);
+ GenerateNormalHelper(masm, argc, false, &miss);
+
+ // Global object access: Check access rights.
+ __ bind(&global_proxy);
+ __ CheckAccessGlobalProxy(r1, r0, &miss);
+ __ b(&invoke);
+
+ // Cache miss: Jump to runtime.
+ __ bind(&miss);
+ Generate(masm, argc, ExternalReference(IC_Utility(kCallIC_Miss)));
+}
+
+
+void CallIC::Generate(MacroAssembler* masm,
+ int argc,
+ const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- lr: return address
+ // -----------------------------------
+
+ // Get the receiver of the function from the stack.
+ __ ldr(r2, MemOperand(sp, argc * kPointerSize));
+ // Get the name of the function to call from the stack.
+ __ ldr(r1, MemOperand(sp, (argc + 1) * kPointerSize));
+
+ __ EnterInternalFrame();
+
+ // Push the receiver and the name of the function.
+ __ stm(db_w, sp, r1.bit() | r2.bit());
+
+ // Call the entry.
+ __ mov(r0, Operand(2));
+ __ mov(r1, Operand(f));
+
+ CEntryStub stub;
+ __ CallStub(&stub);
+
+ // Move result to r1 and leave the internal frame.
+ __ mov(r1, Operand(r0));
+ __ LeaveInternalFrame();
+
+ // Check if the receiver is a global object of some sort.
+ Label invoke, global;
+ __ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
+ __ tst(r2, Operand(kSmiTagMask));
+ __ b(eq, &invoke);
+ __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ b(eq, &global);
+ __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ b(ne, &invoke);
+
+ // Patch the receiver on the stack.
+ __ bind(&global);
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+ __ str(r2, MemOperand(sp, argc * kPointerSize));
+
+ // Invoke the function.
+ ParameterCount actual(argc);
+ __ bind(&invoke);
+ __ InvokeFunction(r1, actual, JUMP_FUNCTION);
+}
+
+
+// Defined in ic.cc.
+Object* LoadIC_Miss(Arguments args);
+
+void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ __ ldr(r0, MemOperand(sp, 0));
+ // Probe the stub cache.
+ Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
+ StubCache::GenerateProbe(masm, flags, r0, r2, r3);
+
+ // Cache miss: Jump to runtime.
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ Label miss, probe, global;
+
+ __ ldr(r0, MemOperand(sp, 0));
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the receiver is a valid JS object.
+ __ ldr(r3, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldrb(r1, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+ __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, &miss);
+ // If this assert fails, we have to check upper bound too.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+
+ // Check for access to global object (unlikely).
+ __ cmp(r1, Operand(JS_GLOBAL_PROXY_TYPE));
+ __ b(eq, &global);
+
+ // Check for non-global object that requires access check.
+ __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &miss);
+
+ __ bind(&probe);
+ GenerateDictionaryLoad(masm, &miss, r1, r0);
+ GenerateCheckNonObjectOrLoaded(masm, &miss, r0, r1);
+ __ Ret();
+
+ // Global object access: Check access rights.
+ __ bind(&global);
+ __ CheckAccessGlobalProxy(r0, r1, &miss);
+ __ b(&probe);
+
+ // Cache miss: Restore receiver from stack and jump to runtime.
+ __ bind(&miss);
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kLoadIC_Miss)));
+}
+
+
+void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ __ ldr(r3, MemOperand(sp, 0));
+ __ stm(db_w, sp, r2.bit() | r3.bit());
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(f, 2);
+}
+
+
+// TODO(181): Implement map patching once loop nesting is tracked on the
+// ARM platform so we can generate inlined fast-case code loads in
+// loops.
+void LoadIC::ClearInlinedVersion(Address address) {}
+bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+ return false;
+}
+
+void KeyedLoadIC::ClearInlinedVersion(Address address) {}
+bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+ return false;
+}
+
+
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+ Generate(masm, ExternalReference(IC_Utility(kKeyedLoadIC_Miss)));
+}
+
+
+void KeyedLoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ __ ldm(ia, sp, r2.bit() | r3.bit());
+ __ stm(db_w, sp, r2.bit() | r3.bit());
+
+ __ TailCallRuntime(f, 2);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ Label slow, fast;
+
+ // Get the key and receiver object from the stack.
+ __ ldm(ia, sp, r0.bit() | r1.bit());
+ // Check that the key is a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ __ mov(r0, Operand(r0, ASR, kSmiTagSize));
+ // Check that the object isn't a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &slow);
+
+ // Get the map of the receiver.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to check this explicitly since this generic stub does not perform
+ // map checks.
+ __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &slow);
+ // Check that the object is some kind of JS object EXCEPT JS Value type.
+ // In the case that the object is a value-wrapper object,
+ // we enter the runtime system to make sure that indexing into string
+ // objects work as intended.
+ ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_OBJECT_TYPE));
+ __ b(lt, &slow);
+
+ // Get the elements array of the object.
+ __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r3, Operand(Factory::hash_table_map()));
+ __ b(eq, &slow);
+ // Check that the key (index) is within bounds.
+ __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
+ __ cmp(r0, Operand(r3));
+ __ b(lo, &fast);
+
+ // Slow case: Push extra copies of the arguments (2).
+ __ bind(&slow);
+ __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
+ __ ldm(ia, sp, r0.bit() | r1.bit());
+ __ stm(db_w, sp, r0.bit() | r1.bit());
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2);
+
+ // Fast case: Do the load.
+ __ bind(&fast);
+ __ add(r3, r1, Operand(Array::kHeaderSize - kHeapObjectTag));
+ __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
+ __ cmp(r0, Operand(Factory::the_hole_value()));
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ b(eq, &slow);
+
+ __ Ret();
+}
+
+
+void KeyedStoreIC::Generate(MacroAssembler* masm,
+ const ExternalReference& f) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[1] : receiver
+
+ __ ldm(ia, sp, r2.bit() | r3.bit());
+ __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+
+ __ TailCallRuntime(f, 3);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[1] : receiver
+ Label slow, fast, array, extra, exit;
+ // Get the key and the object from the stack.
+ __ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
+ // Check that the key is a smi.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(ne, &slow);
+ // Check that the object isn't a smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ b(eq, &slow);
+ // Get the map of the object.
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ // Check that the receiver does not require access checks. We need
+ // to do this because this generic stub does not perform map checks.
+ __ ldrb(ip, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
+ __ b(ne, &slow);
+ // Check if the object is a JS array or not.
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_ARRAY_TYPE));
+ // r1 == key.
+ __ b(eq, &array);
+ // Check that the object is some kind of JS object.
+ __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, &slow);
+
+
+ // Object case: Check key against length in the elements array.
+ __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r2, Operand(Factory::hash_table_map()));
+ __ b(eq, &slow);
+ // Untag the key (for checking against untagged length in the fixed array).
+ __ mov(r1, Operand(r1, ASR, kSmiTagSize));
+ // Compute address to store into and check array bounds.
+ __ add(r2, r3, Operand(Array::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
+ __ ldr(ip, FieldMemOperand(r3, Array::kLengthOffset));
+ __ cmp(r1, Operand(ip));
+ __ b(lo, &fast);
+
+
+ // Slow case: Push extra copies of the arguments (3).
+ __ bind(&slow);
+ __ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
+
+ // Extra capacity case: Check if there is extra capacity to
+ // perform the store and update the length. Used for adding one
+ // element to the array by writing to array[array.length].
+ // r0 == value, r1 == key, r2 == elements, r3 == object
+ __ bind(&extra);
+ __ b(ne, &slow); // do not leave holes in the array
+ __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag
+ __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
+ __ cmp(r1, Operand(ip));
+ __ b(hs, &slow);
+ __ mov(r1, Operand(r1, LSL, kSmiTagSize)); // restore tag
+ __ add(r1, r1, Operand(1 << kSmiTagSize)); // and increment
+ __ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
+ __ mov(r3, Operand(r2));
+ // NOTE: Computing the address to store into must take the fact
+ // that the key has been incremented into account.
+ int displacement = Array::kHeaderSize - kHeapObjectTag -
+ ((1 << kSmiTagSize) * 2);
+ __ add(r2, r2, Operand(displacement));
+ __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ b(&fast);
+
+
+ // Array case: Get the length and the elements array from the JS
+ // array. Check that the array is in fast mode; if it is the
+ // length is always a smi.
+ // r0 == value, r3 == object
+ __ bind(&array);
+ __ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
+ __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Factory::hash_table_map()));
+ __ b(eq, &slow);
+
+ // Check the key against the length in the array, compute the
+ // address to store into and fall through to fast case.
+ __ ldr(r1, MemOperand(sp)); // restore key
+ // r0 == value, r1 == key, r2 == elements, r3 == object.
+ __ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
+ __ cmp(r1, Operand(ip));
+ __ b(hs, &extra);
+ __ mov(r3, Operand(r2));
+ __ add(r2, r2, Operand(Array::kHeaderSize - kHeapObjectTag));
+ __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+
+
+ // Fast case: Do the store.
+ // r0 == value, r2 == address to store into, r3 == elements
+ __ bind(&fast);
+ __ str(r0, MemOperand(r2));
+ // Skip write barrier if the written value is a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+ // Update write barrier for the elements array address.
+ __ sub(r1, r2, Operand(r3));
+ __ RecordWrite(r3, r1, r2);
+
+ __ bind(&exit);
+ __ Ret();
+}
+
+
+void KeyedStoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[1] : receiver
+ // ----------- S t a t e -------------
+
+ __ ldm(ia, sp, r2.bit() | r3.bit());
+ __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ // Get the receiver from the stack and probe the stub cache.
+ __ ldr(r1, MemOperand(sp));
+ Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
+ NOT_IN_LOOP,
+ MONOMORPHIC);
+ StubCache::GenerateProbe(masm, flags, r1, r2, r3);
+
+ // Cache miss: Jump to runtime.
+ Generate(masm, ExternalReference(IC_Utility(kStoreIC_Miss)));
+}
+
+
+void StoreIC::GenerateExtendStorage(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ __ ldr(r3, MemOperand(sp)); // copy receiver
+ __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(
+ ExternalReference(IC_Utility(kSharedStoreIC_ExtendStorage)), 3);
+}
+
+
+void StoreIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+
+ __ ldr(r3, MemOperand(sp)); // copy receiver
+ __ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
+
+ // Perform tail call to the entry.
+ __ TailCallRuntime(f, 3);
+}
+
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/jump-target-arm.cc b/V8Binding/v8/src/arm/jump-target-arm.cc
new file mode 100644
index 0000000..65e7eaf
--- /dev/null
+++ b/V8Binding/v8/src/arm/jump-target-arm.cc
@@ -0,0 +1,324 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "jump-target-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// JumpTarget implementation.
+
+#define __ ACCESS_MASM(cgen()->masm())
+
+void JumpTarget::DoJump() {
+ ASSERT(cgen()->has_valid_frame());
+ // Live non-frame registers are not allowed at unconditional jumps
+ // because we have no way of invalidating the corresponding results
+ // which are still live in the C++ code.
+ ASSERT(cgen()->HasValidEntryRegisters());
+
+ if (is_bound()) {
+ // Backward jump. There is an expected frame to merge to.
+ ASSERT(direction_ == BIDIRECTIONAL);
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ __ jmp(&entry_label_);
+ } else {
+ // Preconfigured entry frame is not used on ARM.
+ ASSERT(entry_frame_ == NULL);
+ // Forward jump. The current frame is added to the end of the list
+ // of frames reaching the target block and a jump to the merge code
+ // is emitted.
+ AddReachingFrame(cgen()->frame());
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
+ __ jmp(&merge_labels_.last());
+ }
+}
+
+
+void JumpTarget::DoBranch(Condition cc, Hint ignored) {
+ ASSERT(cgen()->has_valid_frame());
+
+ if (is_bound()) {
+ ASSERT(direction_ == BIDIRECTIONAL);
+ // Backward branch. We have an expected frame to merge to on the
+ // backward edge.
+
+ // Swap the current frame for a copy (we do the swapping to get
+ // the off-frame registers off the fall through) to use for the
+ // branch.
+ VirtualFrame* fall_through_frame = cgen()->frame();
+ VirtualFrame* branch_frame = new VirtualFrame(fall_through_frame);
+ RegisterFile non_frame_registers;
+ cgen()->SetFrame(branch_frame, &non_frame_registers);
+
+ // Check if we can avoid merge code.
+ cgen()->frame()->PrepareMergeTo(entry_frame_);
+ if (cgen()->frame()->Equals(entry_frame_)) {
+ // Branch right in to the block.
+ cgen()->DeleteFrame();
+ __ b(cc, &entry_label_);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+ return;
+ }
+
+ // Check if we can reuse existing merge code.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (reaching_frames_[i] != NULL &&
+ cgen()->frame()->Equals(reaching_frames_[i])) {
+ // Branch to the merge code.
+ cgen()->DeleteFrame();
+ __ b(cc, &merge_labels_[i]);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+ return;
+ }
+ }
+
+ // To emit the merge code here, we negate the condition and branch
+ // around the merge code on the fall through path.
+ Label original_fall_through;
+ __ b(NegateCondition(cc), &original_fall_through);
+ cgen()->frame()->MergeTo(entry_frame_);
+ cgen()->DeleteFrame();
+ __ b(&entry_label_);
+ cgen()->SetFrame(fall_through_frame, &non_frame_registers);
+ __ bind(&original_fall_through);
+
+ } else {
+ // Preconfigured entry frame is not used on ARM.
+ ASSERT(entry_frame_ == NULL);
+ // Forward branch. A copy of the current frame is added to the end
+ // of the list of frames reaching the target block and a branch to
+ // the merge code is emitted.
+ AddReachingFrame(new VirtualFrame(cgen()->frame()));
+ __ b(cc, &merge_labels_.last());
+ }
+}
+
+
+void JumpTarget::Call() {
+ // Call is used to push the address of the catch block on the stack as
+ // a return address when compiling try/catch and try/finally. We
+ // fully spill the frame before making the call. The expected frame
+ // at the label (which should be the only one) is the spilled current
+ // frame plus an in-memory return address. The "fall-through" frame
+ // at the return site is the spilled current frame.
+ ASSERT(cgen()->has_valid_frame());
+ // There are no non-frame references across the call.
+ ASSERT(cgen()->HasValidEntryRegisters());
+ ASSERT(!is_linked());
+
+ cgen()->frame()->SpillAll();
+ VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
+ target_frame->Adjust(1);
+ // We do not expect a call with a preconfigured entry frame.
+ ASSERT(entry_frame_ == NULL);
+ AddReachingFrame(target_frame);
+ __ bl(&merge_labels_.last());
+}
+
+
+void JumpTarget::DoBind(int mergable_elements) {
+ ASSERT(!is_bound());
+
+ // Live non-frame registers are not allowed at the start of a basic
+ // block.
+ ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
+
+ if (direction_ == FORWARD_ONLY) {
+ // A simple case: no forward jumps and no possible backward jumps.
+ if (!is_linked()) {
+ // The stack pointer can be floating above the top of the
+ // virtual frame before the bind. Afterward, it should not.
+ ASSERT(cgen()->has_valid_frame());
+ VirtualFrame* frame = cgen()->frame();
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+ if (difference > 0) {
+ frame->stack_pointer_ -= difference;
+ __ add(sp, sp, Operand(difference * kPointerSize));
+ }
+ __ bind(&entry_label_);
+ return;
+ }
+
+ // Another simple case: no fall through, a single forward jump,
+ // and no possible backward jumps.
+ if (!cgen()->has_valid_frame() && reaching_frames_.length() == 1) {
+ // Pick up the only reaching frame, take ownership of it, and
+ // use it for the block about to be emitted.
+ VirtualFrame* frame = reaching_frames_[0];
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[0] = NULL;
+ __ bind(&merge_labels_[0]);
+
+ // The stack pointer can be floating above the top of the
+ // virtual frame before the bind. Afterward, it should not.
+ int difference = frame->stack_pointer_ - (frame->element_count() - 1);
+ if (difference > 0) {
+ frame->stack_pointer_ -= difference;
+ __ add(sp, sp, Operand(difference * kPointerSize));
+ }
+ __ bind(&entry_label_);
+ return;
+ }
+ }
+
+ // If there is a current frame, record it as the fall-through. It
+ // is owned by the reaching frames for now.
+ bool had_fall_through = false;
+ if (cgen()->has_valid_frame()) {
+ had_fall_through = true;
+ AddReachingFrame(cgen()->frame()); // Return value ignored.
+ RegisterFile empty;
+ cgen()->SetFrame(NULL, &empty);
+ }
+
+ // Compute the frame to use for entry to the block.
+ if (entry_frame_ == NULL) {
+ ComputeEntryFrame(mergable_elements);
+ }
+
+ // Some moves required to merge to an expected frame require purely
+ // frame state changes, and do not require any code generation.
+ // Perform those first to increase the possibility of finding equal
+ // frames below.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (reaching_frames_[i] != NULL) {
+ reaching_frames_[i]->PrepareMergeTo(entry_frame_);
+ }
+ }
+
+ if (is_linked()) {
+ // There were forward jumps. Handle merging the reaching frames
+ // and possible fall through to the entry frame.
+
+ // Loop over the (non-null) reaching frames and process any that
+ // need merge code. Iterate backwards through the list to handle
+ // the fall-through frame first. Set frames that will be
+ // processed after 'i' to NULL if we want to avoid processing
+ // them.
+ for (int i = reaching_frames_.length() - 1; i >= 0; i--) {
+ VirtualFrame* frame = reaching_frames_[i];
+
+ if (frame != NULL) {
+ // Does the frame (probably) need merge code?
+ if (!frame->Equals(entry_frame_)) {
+ // We could have a valid frame as the fall through to the
+ // binding site or as the fall through from a previous merge
+ // code block. Jump around the code we are about to
+ // generate.
+ if (cgen()->has_valid_frame()) {
+ cgen()->DeleteFrame();
+ __ b(&entry_label_);
+ }
+ // Pick up the frame for this block. Assume ownership if
+ // there cannot be backward jumps.
+ RegisterFile empty;
+ if (direction_ == BIDIRECTIONAL) {
+ cgen()->SetFrame(new VirtualFrame(frame), &empty);
+ } else {
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[i] = NULL;
+ }
+ __ bind(&merge_labels_[i]);
+
+ // Loop over the remaining (non-null) reaching frames,
+ // looking for any that can share merge code with this one.
+ for (int j = 0; j < i; j++) {
+ VirtualFrame* other = reaching_frames_[j];
+ if (other != NULL && other->Equals(cgen()->frame())) {
+ // Set the reaching frame element to null to avoid
+ // processing it later, and then bind its entry label.
+ reaching_frames_[j] = NULL;
+ __ bind(&merge_labels_[j]);
+ }
+ }
+
+ // Emit the merge code.
+ cgen()->frame()->MergeTo(entry_frame_);
+ } else if (i == reaching_frames_.length() - 1 && had_fall_through) {
+ // If this is the fall through, and it didn't need merge
+ // code, we need to pick up the frame so we can jump around
+ // subsequent merge blocks if necessary.
+ RegisterFile empty;
+ cgen()->SetFrame(frame, &empty);
+ reaching_frames_[i] = NULL;
+ }
+ }
+ }
+
+ // The code generator may not have a current frame if there was no
+ // fall through and none of the reaching frames needed merging.
+ // In that case, clone the entry frame as the current frame.
+ if (!cgen()->has_valid_frame()) {
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
+ }
+
+ // There may be unprocessed reaching frames that did not need
+ // merge code. They will have unbound merge labels. Bind their
+ // merge labels to be the same as the entry label and deallocate
+ // them.
+ for (int i = 0; i < reaching_frames_.length(); i++) {
+ if (!merge_labels_[i].is_bound()) {
+ reaching_frames_[i] = NULL;
+ __ bind(&merge_labels_[i]);
+ }
+ }
+
+ // There are non-NULL reaching frames with bound labels for each
+ // merge block, but only on backward targets.
+ } else {
+ // There were no forward jumps. There must be a current frame and
+ // this must be a bidirectional target.
+ ASSERT(reaching_frames_.length() == 1);
+ ASSERT(reaching_frames_[0] != NULL);
+ ASSERT(direction_ == BIDIRECTIONAL);
+
+ // Use a copy of the reaching frame so the original can be saved
+ // for possible reuse as a backward merge block.
+ RegisterFile empty;
+ cgen()->SetFrame(new VirtualFrame(reaching_frames_[0]), &empty);
+ __ bind(&merge_labels_[0]);
+ cgen()->frame()->MergeTo(entry_frame_);
+ }
+
+ __ bind(&entry_label_);
+}
+
+#undef __
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.cc b/V8Binding/v8/src/arm/macro-assembler-arm.cc
new file mode 100644
index 0000000..4e24063
--- /dev/null
+++ b/V8Binding/v8/src/arm/macro-assembler-arm.cc
@@ -0,0 +1,959 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "bootstrapper.h"
+#include "codegen-inl.h"
+#include "debug.h"
+#include "runtime.h"
+
+namespace v8 {
+namespace internal {
+
+// Give alias names to registers
+Register cp = { 8 }; // JavaScript context pointer
+Register pp = { 10 }; // parameter pointer
+
+
+MacroAssembler::MacroAssembler(void* buffer, int size)
+ : Assembler(buffer, size),
+ unresolved_(0),
+ generating_stub_(false),
+ allow_stub_calls_(true),
+ code_object_(Heap::undefined_value()) {
+}
+
+
+// We always generate arm code, never thumb code, even if V8 is compiled to
+// thumb, so we require inter-working support
+#if defined(__thumb__) && !defined(__THUMB_INTERWORK__)
+#error "flag -mthumb-interwork missing"
+#endif
+
+
+// We do not support thumb inter-working with an arm architecture not supporting
+// the blx instruction (below v5t)
+#if defined(__THUMB_INTERWORK__)
+#if !defined(__ARM_ARCH_5T__) && \
+ !defined(__ARM_ARCH_5TE__) && \
+ !defined(__ARM_ARCH_7A__) && \
+ !defined(__ARM_ARCH_7__)
+// add tests for other versions above v5t as required
+#error "for thumb inter-working we require architecture v5t or above"
+#endif
+#endif
+
+
+// Using blx may yield better code, so use it when required or when available
+#if defined(__THUMB_INTERWORK__) || defined(__ARM_ARCH_5__)
+#define USE_BLX 1
+#endif
+
+// Using bx does not yield better code, so use it only when required
+#if defined(__THUMB_INTERWORK__)
+#define USE_BX 1
+#endif
+
+
+void MacroAssembler::Jump(Register target, Condition cond) {
+#if USE_BX
+ bx(target, cond);
+#else
+ mov(pc, Operand(target), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond) {
+#if USE_BX
+ mov(ip, Operand(target, rmode), LeaveCC, cond);
+ bx(ip, cond);
+#else
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
+ Condition cond) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ // 'code' is always generated ARM code, never THUMB code
+ Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Register target, Condition cond) {
+#if USE_BLX
+ blx(target, cond);
+#else
+ // set lr for return at current pc + 8
+ mov(lr, Operand(pc), LeaveCC, cond);
+ mov(pc, Operand(target), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond) {
+#if !defined(__arm__)
+ if (rmode == RelocInfo::RUNTIME_ENTRY) {
+ mov(r2, Operand(target, rmode), LeaveCC, cond);
+ // Set lr for return at current pc + 8.
+ mov(lr, Operand(pc), LeaveCC, cond);
+ // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
+ // Notify the simulator of the transition to C code.
+ swi(assembler::arm::call_rt_r2);
+ } else {
+ // set lr for return at current pc + 8
+ mov(lr, Operand(pc), LeaveCC, cond);
+ // emit a ldr<cond> pc, [pc + offset of target in constant pool]
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
+ }
+#else
+ // Set lr for return at current pc + 8.
+ mov(lr, Operand(pc), LeaveCC, cond);
+ // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
+#endif // !defined(__arm__)
+ // If USE_BLX is defined, we could emit a 'mov ip, target', followed by a
+ // 'blx ip'; however, the code would not be shorter than the above sequence
+ // and the target address of the call would be referenced by the first
+ // instruction rather than the second one, which would make it harder to patch
+ // (two instructions before the return address, instead of one).
+ ASSERT(kTargetAddrToReturnAddrDist == sizeof(Instr));
+}
+
+
+void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
+ Condition cond) {
+ ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ Call(reinterpret_cast<intptr_t>(target), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond) {
+ ASSERT(RelocInfo::IsCodeTarget(rmode));
+ // 'code' is always generated ARM code, never THUMB code
+ Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Ret(Condition cond) {
+#if USE_BX
+ bx(lr, cond);
+#else
+ mov(pc, Operand(lr), LeaveCC, cond);
+#endif
+}
+
+
+void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
+ // Empty the const pool.
+ CheckConstPool(true, true);
+ add(pc, pc, Operand(index,
+ LSL,
+ assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize));
+ BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * sizeof(Instr));
+ nop(); // Jump table alignment.
+ for (int i = 0; i < targets.length(); i++) {
+ b(targets[i]);
+ }
+}
+
+
+// Will clobber 4 registers: object, offset, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object, Register offset,
+ Register scratch) {
+ // This is how much we shift the remembered set bit offset to get the
+ // offset of the word in the remembered set. We divide by kBitsPerInt (32,
+ // shift right 5) and then multiply by kIntSize (4, shift left 2).
+ const int kRSetWordShift = 3;
+
+ Label fast, done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // remembered set bits in the new space.
+ // object: heap object pointer (with tag)
+ // offset: offset to store location from the object
+ and_(scratch, object, Operand(Heap::NewSpaceMask()));
+ cmp(scratch, Operand(ExternalReference::new_space_start()));
+ b(eq, &done);
+
+ // Compute the bit offset in the remembered set.
+ // object: heap object pointer (with tag)
+ // offset: offset to store location from the object
+ mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
+ and_(scratch, object, Operand(ip)); // offset into page of the object
+ add(offset, scratch, Operand(offset)); // add offset into the object
+ mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
+
+ // Compute the page address from the heap object pointer.
+ // object: heap object pointer (with tag)
+ // offset: bit offset of store position in the remembered set
+ bic(object, object, Operand(ip));
+
+ // If the bit offset lies beyond the normal remembered set range, it is in
+ // the extra remembered set area of a large object.
+ // object: page start
+ // offset: bit offset of store position in the remembered set
+ cmp(offset, Operand(Page::kPageSize / kPointerSize));
+ b(lt, &fast);
+
+ // Adjust the bit offset to be relative to the start of the extra
+ // remembered set and the start address to be the address of the extra
+ // remembered set.
+ sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
+ // Load the array length into 'scratch' and multiply by four to get the
+ // size in bytes of the elements.
+ ldr(scratch, MemOperand(object, Page::kObjectStartOffset
+ + FixedArray::kLengthOffset));
+ mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
+ // Add the page header (including remembered set), array header, and array
+ // body size to the page address.
+ add(object, object, Operand(Page::kObjectStartOffset
+ + Array::kHeaderSize));
+ add(object, object, Operand(scratch));
+
+ bind(&fast);
+ // Get address of the rset word.
+ // object: start of the remembered set (page start for the fast case)
+ // offset: bit offset of store position in the remembered set
+ bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
+ add(object, object, Operand(scratch, LSR, kRSetWordShift));
+ // Get bit offset in the rset word.
+ // object: address of remembered set word
+ // offset: bit offset of store position
+ and_(offset, offset, Operand(kBitsPerInt - 1));
+
+ ldr(scratch, MemOperand(object));
+ mov(ip, Operand(1));
+ orr(scratch, scratch, Operand(ip, LSL, offset));
+ str(scratch, MemOperand(object));
+
+ bind(&done);
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+ // r0-r3: preserved
+ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ mov(ip, Operand(Smi::FromInt(type)));
+ push(ip);
+ mov(ip, Operand(CodeObject()));
+ push(ip);
+ add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+ // r0: preserved
+ // r1: preserved
+ // r2: preserved
+
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer and return address.
+ mov(sp, fp);
+ ldm(ia_w, sp, fp.bit() | lr.bit());
+}
+
+
+void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
+ ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
+
+ // Compute the argv pointer and keep it in a callee-saved register.
+ // r0 is argc.
+ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+ sub(r6, r6, Operand(kPointerSize));
+
+ // Compute parameter pointer before making changes and save it as ip
+ // register so that it is restored as sp register on exit, thereby
+ // popping the args.
+
+ // ip = sp + kPointerSize * #args;
+ add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
+
+ // Align the stack at this point. After this point we have 5 pushes,
+ // so in fact we have to unalign here! See also the assert on the
+ // alignment immediately below.
+ if (OS::ActivationFrameAlignment() != kPointerSize) {
+ // This code needs to be made more general if this assert doesn't hold.
+ ASSERT(OS::ActivationFrameAlignment() == 2 * kPointerSize);
+ mov(r7, Operand(Smi::FromInt(0)));
+ tst(sp, Operand(OS::ActivationFrameAlignment() - 1));
+ push(r7, eq); // Conditional push instruction.
+ }
+
+ // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
+ stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
+ mov(fp, Operand(sp)); // setup new frame pointer
+
+ // Push debug marker.
+ mov(ip, Operand(type == StackFrame::EXIT_DEBUG ? 1 : 0));
+ push(ip);
+
+ // Save the frame pointer and the context in top.
+ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ str(fp, MemOperand(ip));
+ mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ str(cp, MemOperand(ip));
+
+ // Setup argc and the builtin function in callee-saved registers.
+ mov(r4, Operand(r0));
+ mov(r5, Operand(r1));
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Save the state of all registers to the stack from the memory
+ // location. This is needed to allow nested break points.
+ if (type == StackFrame::EXIT_DEBUG) {
+ // Use sp as base to push.
+ CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
+ }
+#endif
+}
+
+
+void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // Restore the memory copy of the registers by digging them out from
+ // the stack. This is needed to allow nested break points.
+ if (type == StackFrame::EXIT_DEBUG) {
+ // This code intentionally clobbers r2 and r3.
+ const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
+ const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
+ add(r3, fp, Operand(kOffset));
+ CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
+ }
+#endif
+
+ // Clear top frame.
+ mov(r3, Operand(0));
+ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
+ str(r3, MemOperand(ip));
+
+ // Restore current context from top and clear it in debug mode.
+ mov(ip, Operand(ExternalReference(Top::k_context_address)));
+ ldr(cp, MemOperand(ip));
+#ifdef DEBUG
+ str(r3, MemOperand(ip));
+#endif
+
+ // Pop the arguments, restore registers, and return.
+ mov(sp, Operand(fp)); // respect ABI stack constraint
+ ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag) {
+ bool definitely_matches = false;
+ Label regular_invoke;
+
+ // Check whether the expected and actual arguments count match. If not,
+ // setup registers according to contract with ArgumentsAdaptorTrampoline:
+ // r0: actual arguments count
+ // r1: function (passed through to callee)
+ // r2: expected arguments count
+ // r3: callee code entry
+
+ // The code below is made a lot easier because the calling code already sets
+ // up actual and expected registers according to the contract if values are
+ // passed in registers.
+ ASSERT(actual.is_immediate() || actual.reg().is(r0));
+ ASSERT(expected.is_immediate() || expected.reg().is(r2));
+ ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
+
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ mov(r0, Operand(actual.immediate()));
+ const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+ if (expected.immediate() == sentinel) {
+ // Don't worry about adapting arguments for builtins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ mov(r2, Operand(expected.immediate()));
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ cmp(expected.reg(), Operand(actual.immediate()));
+ b(eq, &regular_invoke);
+ mov(r0, Operand(actual.immediate()));
+ } else {
+ cmp(expected.reg(), Operand(actual.reg()));
+ b(eq, &regular_invoke);
+ }
+ }
+
+ if (!definitely_matches) {
+ if (!code_constant.is_null()) {
+ mov(r3, Operand(code_constant));
+ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+ }
+
+ Handle<Code> adaptor =
+ Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ if (flag == CALL_FUNCTION) {
+ Call(adaptor, RelocInfo::CODE_TARGET);
+ b(done);
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&regular_invoke);
+ }
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ Label done;
+
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ if (flag == CALL_FUNCTION) {
+ Call(code);
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ Jump(code);
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+}
+
+
+void MacroAssembler::InvokeCode(Handle<Code> code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ RelocInfo::Mode rmode,
+ InvokeFlag flag) {
+ Label done;
+
+ InvokePrologue(expected, actual, code, no_reg, &done, flag);
+ if (flag == CALL_FUNCTION) {
+ Call(code, rmode);
+ } else {
+ Jump(code, rmode);
+ }
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ // Contract with called JS functions requires that function is passed in r1.
+ ASSERT(fun.is(r1));
+
+ Register expected_reg = r2;
+ Register code_reg = r3;
+
+ ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ ldr(expected_reg,
+ FieldMemOperand(code_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ ldr(code_reg,
+ MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
+ add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ ParameterCount expected(expected_reg);
+ InvokeCode(code_reg, expected, actual, flag);
+}
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void MacroAssembler::SaveRegistersToMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of registers to memory location.
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ Register reg = { r };
+ mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+ str(reg, MemOperand(ip));
+ }
+ }
+}
+
+
+void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of memory location to registers.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ Register reg = { r };
+ mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+ ldr(reg, MemOperand(ip));
+ }
+ }
+}
+
+
+void MacroAssembler::CopyRegistersFromMemoryToStack(Register base,
+ RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of the memory location to the stack and adjust base.
+ for (int i = kNumJSCallerSaved; --i >= 0;) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+ ldr(ip, MemOperand(ip));
+ str(ip, MemOperand(base, 4, NegPreIndex));
+ }
+ }
+}
+
+
+void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
+ Register scratch,
+ RegList regs) {
+ ASSERT((regs & ~kJSCallerSaved) == 0);
+ // Copy the content of the stack to the memory location and adjust base.
+ for (int i = 0; i < kNumJSCallerSaved; i++) {
+ int r = JSCallerSavedCode(i);
+ if ((regs & (1 << r)) != 0) {
+ mov(ip, Operand(ExternalReference(Debug_Address::Register(i))));
+ ldr(scratch, MemOperand(base, 4, PostIndex));
+ str(scratch, MemOperand(ip));
+ }
+ }
+}
+#endif
+
+void MacroAssembler::PushTryHandler(CodeLocation try_location,
+ HandlerType type) {
+ ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
+ // The pc (return address) is passed in register lr.
+ if (try_location == IN_JAVASCRIPT) {
+ stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
+ if (type == TRY_CATCH_HANDLER) {
+ mov(r3, Operand(StackHandler::TRY_CATCH));
+ } else {
+ mov(r3, Operand(StackHandler::TRY_FINALLY));
+ }
+ push(r3); // state
+ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
+ ldr(r1, MemOperand(r3));
+ push(r1); // next sp
+ str(sp, MemOperand(r3)); // chain handler
+ mov(r0, Operand(Smi::FromInt(StackHandler::kCodeNotPresent))); // new TOS
+ push(r0);
+ } else {
+ // Must preserve r0-r4, r5-r7 are available.
+ ASSERT(try_location == IN_JS_ENTRY);
+ // The parameter pointer is meaningless here and fp does not point to a JS
+ // frame. So we save NULL for both pp and fp. We expect the code throwing an
+ // exception to check fp before dereferencing it to restore the context.
+ mov(pp, Operand(0)); // set pp to NULL
+ mov(ip, Operand(0)); // to save a NULL fp
+ stm(db_w, sp, pp.bit() | ip.bit() | lr.bit());
+ mov(r6, Operand(StackHandler::ENTRY));
+ push(r6); // state
+ mov(r7, Operand(ExternalReference(Top::k_handler_address)));
+ ldr(r6, MemOperand(r7));
+ push(r6); // next sp
+ str(sp, MemOperand(r7)); // chain handler
+ mov(r5, Operand(Smi::FromInt(StackHandler::kCodeNotPresent))); // new TOS
+ push(r5); // flush TOS
+ }
+}
+
+
+Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
+ JSObject* holder, Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ // Make sure there's no overlap between scratch and the other
+ // registers.
+ ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
+
+ // Keep track of the current object in register reg.
+ Register reg = object_reg;
+ int depth = 1;
+
+ // Check the maps in the prototype chain.
+ // Traverse the prototype chain from the object and do map checks.
+ while (object != holder) {
+ depth++;
+
+ // Only global objects and objects that do not require access
+ // checks are allowed in stubs.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Get the map of the current object.
+ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ cmp(scratch, Operand(Handle<Map>(object->map())));
+
+ // Branch on the result of the map check.
+ b(ne, miss);
+
+ // Check access rights to the global object. This has to happen
+ // after the map check so that we know that the object is
+ // actually a global object.
+ if (object->IsJSGlobalProxy()) {
+ CheckAccessGlobalProxy(reg, scratch, miss);
+ // Restore scratch register to be the map of the object. In the
+ // new space case below, we load the prototype from the map in
+ // the scratch register.
+ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ }
+
+ reg = holder_reg; // from now the object is in holder_reg
+ JSObject* prototype = JSObject::cast(object->GetPrototype());
+ if (Heap::InNewSpace(prototype)) {
+ // The prototype is in new space; we cannot store a reference
+ // to it in the code. Load it from the map.
+ ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset));
+ } else {
+ // The prototype is in old space; load it directly.
+ mov(reg, Operand(Handle<JSObject>(prototype)));
+ }
+
+ // Go to the next object in the prototype chain.
+ object = prototype;
+ }
+
+ // Check the holder map.
+ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
+ cmp(scratch, Operand(Handle<Map>(object->map())));
+ b(ne, miss);
+
+ // Log the check depth.
+ LOG(IntEvent("check-maps-depth", depth));
+
+ // Perform security check for access to the global object and return
+ // the holder register.
+ ASSERT(object == holder);
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+ if (object->IsJSGlobalProxy()) {
+ CheckAccessGlobalProxy(reg, scratch, miss);
+ }
+ return reg;
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ Label same_contexts;
+
+ ASSERT(!holder_reg.is(scratch));
+ ASSERT(!holder_reg.is(ip));
+ ASSERT(!scratch.is(ip));
+
+ // Load current lexical context from the stack frame.
+ ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ cmp(scratch, Operand(0));
+ Check(ne, "we should not have an empty lexical context");
+#endif
+
+ // Load the global context of the current context.
+ int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ ldr(scratch, FieldMemOperand(scratch, offset));
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check the context is a global context.
+ if (FLAG_debug_code) {
+ // TODO(119): avoid push(holder_reg)/pop(holder_reg)
+ // Cannot use ip as a temporary in this verification code. Due to the fact
+ // that ip is clobbered as part of cmp with an object Operand.
+ push(holder_reg); // Temporarily save holder on the stack.
+ // Read the first word and compare to the global_context_map.
+ ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ cmp(holder_reg, Operand(Factory::global_context_map()));
+ Check(eq, "JSGlobalObject::global_context should be a global context.");
+ pop(holder_reg); // Restore holder.
+ }
+
+ // Check if both contexts are the same.
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ cmp(scratch, Operand(ip));
+ b(eq, &same_contexts);
+
+ // Check the context is a global context.
+ if (FLAG_debug_code) {
+ // TODO(119): avoid push(holder_reg)/pop(holder_reg)
+ // Cannot use ip as a temporary in this verification code. Due to the fact
+ // that ip is clobbered as part of cmp with an object Operand.
+ push(holder_reg); // Temporarily save holder on the stack.
+ mov(holder_reg, ip); // Move ip to its holding place.
+ cmp(holder_reg, Operand(Factory::null_value()));
+ Check(ne, "JSGlobalProxy::context() should not be null.");
+
+ ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+ cmp(holder_reg, Operand(Factory::global_context_map()));
+ Check(eq, "JSGlobalObject::global_context should be a global context.");
+ // Restore ip is not needed. ip is reloaded below.
+ pop(holder_reg); // Restore holder.
+ // Restore ip to holder's context.
+ ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ ldr(scratch, FieldMemOperand(scratch, token_offset));
+ ldr(ip, FieldMemOperand(ip, token_offset));
+ cmp(scratch, Operand(ip));
+ b(ne, miss);
+
+ bind(&same_contexts);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::StubReturn(int argc) {
+ ASSERT(argc >= 1 && generating_stub());
+ if (argc > 1)
+ add(sp, sp, Operand((argc - 1) * kPointerSize));
+ Ret();
+}
+
+
+void MacroAssembler::IllegalOperation(int num_arguments) {
+ if (num_arguments > 0) {
+ add(sp, sp, Operand(num_arguments * kPointerSize));
+ }
+ mov(r0, Operand(Factory::undefined_value()));
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+ // All parameters are on the stack. r0 has the return value after call.
+
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ if (f->nargs >= 0 && f->nargs != num_arguments) {
+ IllegalOperation(num_arguments);
+ return;
+ }
+
+ Runtime::FunctionId function_id =
+ static_cast<Runtime::FunctionId>(f->stub_id);
+ RuntimeStub stub(function_id, num_arguments);
+ CallStub(&stub);
+}
+
+
+void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments);
+}
+
+
+void MacroAssembler::TailCallRuntime(const ExternalReference& ext,
+ int num_arguments) {
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ mov(r0, Operand(num_arguments));
+ JumpToBuiltin(ext);
+}
+
+
+void MacroAssembler::JumpToBuiltin(const ExternalReference& builtin) {
+#if defined(__thumb__)
+ // Thumb mode builtin.
+ ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+#endif
+ mov(r1, Operand(builtin));
+ CEntryStub stub;
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
+ bool* resolved) {
+ // Contract with compiled functions is that the function is passed in r1.
+ int builtins_offset =
+ JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
+ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
+ ldr(r1, FieldMemOperand(r1, builtins_offset));
+
+ return Builtins::GetCode(id, resolved);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flags) {
+ bool resolved;
+ Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+ if (flags == CALL_JS) {
+ Call(code, RelocInfo::CODE_TARGET);
+ } else {
+ ASSERT(flags == JUMP_JS);
+ Jump(code, RelocInfo::CODE_TARGET);
+ }
+
+ if (!resolved) {
+ const char* name = Builtins::GetName(id);
+ int argc = Builtins::GetArgumentsCount(id);
+ uint32_t flags =
+ Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
+ Bootstrapper::FixupFlagsUseCodeObject::encode(false);
+ Unresolved entry = { pc_offset() - sizeof(Instr), flags, name };
+ unresolved_.Add(entry);
+ }
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+ bool resolved;
+ Handle<Code> code = ResolveBuiltin(id, &resolved);
+
+ mov(target, Operand(code));
+ if (!resolved) {
+ const char* name = Builtins::GetName(id);
+ int argc = Builtins::GetArgumentsCount(id);
+ uint32_t flags =
+ Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
+ Bootstrapper::FixupFlagsIsPCRelative::encode(true) |
+ Bootstrapper::FixupFlagsUseCodeObject::encode(true);
+ Unresolved entry = { pc_offset() - sizeof(Instr), flags, name };
+ unresolved_.Add(entry);
+ }
+
+ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(scratch1, Operand(value));
+ mov(scratch2, Operand(ExternalReference(counter)));
+ str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(scratch2, Operand(ExternalReference(counter)));
+ ldr(scratch1, MemOperand(scratch2));
+ add(scratch1, scratch1, Operand(value));
+ str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2) {
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ mov(scratch2, Operand(ExternalReference(counter)));
+ ldr(scratch1, MemOperand(scratch2));
+ sub(scratch1, scratch1, Operand(value));
+ str(scratch1, MemOperand(scratch2));
+ }
+}
+
+
+void MacroAssembler::Assert(Condition cc, const char* msg) {
+ if (FLAG_debug_code)
+ Check(cc, msg);
+}
+
+
+void MacroAssembler::Check(Condition cc, const char* msg) {
+ Label L;
+ b(cc, &L);
+ Abort(msg);
+ // will not return here
+ bind(&L);
+}
+
+
+void MacroAssembler::Abort(const char* msg) {
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the alignment difference
+ // from the real pointer as a smi.
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+#endif
+ mov(r0, Operand(p0));
+ push(r0);
+ mov(r0, Operand(Smi::FromInt(p1 - p0)));
+ push(r0);
+ CallRuntime(Runtime::kAbort, 2);
+ // will not return here
+}
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/macro-assembler-arm.h b/V8Binding/v8/src/arm/macro-assembler-arm.h
new file mode 100644
index 0000000..27eeab2
--- /dev/null
+++ b/V8Binding/v8/src/arm/macro-assembler-arm.h
@@ -0,0 +1,314 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
+#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
+
+#include "assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Give alias names to registers
+extern Register cp; // JavaScript context pointer
+extern Register pp; // parameter pointer
+
+
+// Helper types to make boolean flag easier to read at call-site.
+enum InvokeFlag {
+ CALL_FUNCTION,
+ JUMP_FUNCTION
+};
+
+enum InvokeJSFlags {
+ CALL_JS,
+ JUMP_JS
+};
+
+enum ExitJSFlag {
+ RETURN,
+ DO_NOT_RETURN
+};
+
+enum CodeLocation {
+ IN_JAVASCRIPT,
+ IN_JS_ENTRY,
+ IN_C_ENTRY
+};
+
+enum HandlerType {
+ TRY_CATCH_HANDLER,
+ TRY_FINALLY_HANDLER,
+ JS_ENTRY_HANDLER
+};
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+ MacroAssembler(void* buffer, int size);
+
+ // ---------------------------------------------------------------------------
+ // Low-level helpers for compiler
+
+ // Jump, Call, and Ret pseudo instructions implementing inter-working
+ private:
+ void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+ public:
+ void Jump(Register target, Condition cond = al);
+ void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(Register target, Condition cond = al);
+ void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ void Ret(Condition cond = al);
+ // Jumps to the label at the index given by the Smi in "index".
+ void SmiJumpTable(Register index, Vector<Label*> targets);
+
+ // Sets the remembered set bit for [address+offset], where address is the
+ // address of the heap object 'object'. The address must be in the first 8K
+ // of an allocated page. The 'scratch' register is used in the
+ // implementation and all 3 registers are clobbered by the operation, as
+ // well as the ip register.
+ void RecordWrite(Register object, Register offset, Register scratch);
+
+ // ---------------------------------------------------------------------------
+ // Activation frames
+
+ void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+ void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
+ // Enter specific kind of exit frame; either EXIT or
+ // EXIT_DEBUG. Expects the number of arguments in register r0 and
+ // the builtin function to call in register r1. Exits with argc in
+ // r4, argv in r6, and and the builtin function to call in r5.
+ void EnterExitFrame(StackFrame::Type type);
+
+ // Leave the current exit frame. Expects the return value in r0.
+ void LeaveExitFrame(StackFrame::Type type);
+
+
+ // ---------------------------------------------------------------------------
+ // JavaScript invokes
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeCode(Register code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ InvokeFlag flag);
+
+ void InvokeCode(Handle<Code> code,
+ const ParameterCount& expected,
+ const ParameterCount& actual,
+ RelocInfo::Mode rmode,
+ InvokeFlag flag);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunction(Register function,
+ const ParameterCount& actual,
+ InvokeFlag flag);
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // ---------------------------------------------------------------------------
+ // Debugger Support
+
+ void SaveRegistersToMemory(RegList regs);
+ void RestoreRegistersFromMemory(RegList regs);
+ void CopyRegistersFromMemoryToStack(Register base, RegList regs);
+ void CopyRegistersFromStackToMemory(Register base,
+ Register scratch,
+ RegList regs);
+#endif
+
+ // ---------------------------------------------------------------------------
+ // Exception handling
+
+ // Push a new try handler and link into try handler chain.
+ // The return address must be passed in register lr.
+ // On exit, r0 contains TOS (code slot).
+ void PushTryHandler(CodeLocation try_location, HandlerType type);
+
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support
+
+ // Generates code that verifies that the maps of objects in the
+ // prototype chain of object hasn't changed since the code was
+ // generated and branches to the miss label if any map has. If
+ // necessary the function also generates code for security check
+ // in case of global object holders. The scratch and holder
+ // registers are always clobbered, but the object register is only
+ // clobbered if it the same as the holder register. The function
+ // returns a register containing the holder - either object_reg or
+ // holder_reg.
+ Register CheckMaps(JSObject* object, Register object_reg,
+ JSObject* holder, Register holder_reg,
+ Register scratch, Label* miss);
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss);
+
+
+ // ---------------------------------------------------------------------------
+ // Support functions.
+
+ // Generates code for reporting that an illegal operation has
+ // occurred.
+ void IllegalOperation(int num_arguments);
+
+
+ // ---------------------------------------------------------------------------
+ // Runtime calls
+
+ // Call a code stub.
+ void CallStub(CodeStub* stub);
+ void CallJSExitStub(CodeStub* stub);
+
+ // Return from a code stub after popping its arguments.
+ void StubReturn(int argc);
+
+ // Call a runtime routine.
+ // Eventually this should be used for all C calls.
+ void CallRuntime(Runtime::Function* f, int num_arguments);
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+
+ // Tail call of a runtime routine (jump).
+ // Like JumpToBuiltin, but also takes care of passing the number
+ // of parameters.
+ void TailCallRuntime(const ExternalReference& ext, int num_arguments);
+
+ // Jump to the builtin routine.
+ void JumpToBuiltin(const ExternalReference& builtin);
+
+ // Invoke specified builtin JavaScript function. Adds an entry to
+ // the unresolved list if the name does not resolve.
+ void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+
+ // Store the code object for the given builtin in the target register and
+ // setup the function in r1.
+ void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+ struct Unresolved {
+ int pc;
+ uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
+ const char* name;
+ };
+ List<Unresolved>* unresolved() { return &unresolved_; }
+
+ Handle<Object> CodeObject() { return code_object_; }
+
+
+ // ---------------------------------------------------------------------------
+ // StatsCounter support
+
+ void SetCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2);
+ void IncrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value,
+ Register scratch1, Register scratch2);
+
+
+ // ---------------------------------------------------------------------------
+ // Debugging
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, const char* msg);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, const char* msg);
+
+ // Print a message to stdout and abort execution.
+ void Abort(const char* msg);
+
+ // Verify restrictions about code generated in stubs.
+ void set_generating_stub(bool value) { generating_stub_ = value; }
+ bool generating_stub() { return generating_stub_; }
+ void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
+ bool allow_stub_calls() { return allow_stub_calls_; }
+
+ private:
+ List<Unresolved> unresolved_;
+ bool generating_stub_;
+ bool allow_stub_calls_;
+ Handle<Object> code_object_; // This handle will be patched with the code
+ // object on installation.
+
+ // Helper functions for generating invokes.
+ void InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_reg,
+ Label* done,
+ InvokeFlag flag);
+
+ // Get the code for the given builtin. Returns if able to resolve
+ // the function in the 'resolved' flag.
+ Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+// Generate a MemOperand for loading a field from an object.
+static inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_
diff --git a/V8Binding/v8/src/arm/regexp-macro-assembler-arm.cc b/V8Binding/v8/src/arm/regexp-macro-assembler-arm.cc
new file mode 100644
index 0000000..78ebc7e
--- /dev/null
+++ b/V8Binding/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -0,0 +1,44 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+#include "ast.h"
+#include "regexp-macro-assembler.h"
+#include "arm/regexp-macro-assembler-arm.h"
+
+namespace v8 {
+namespace internal {
+
+RegExpMacroAssemblerARM::RegExpMacroAssemblerARM() {
+ UNIMPLEMENTED();
+}
+
+
+RegExpMacroAssemblerARM::~RegExpMacroAssemblerARM() {}
+
+}} // namespace v8::internal
+
diff --git a/V8Binding/v8/src/arm/regexp-macro-assembler-arm.h b/V8Binding/v8/src/arm/regexp-macro-assembler-arm.h
new file mode 100644
index 0000000..de55183
--- /dev/null
+++ b/V8Binding/v8/src/arm/regexp-macro-assembler-arm.h
@@ -0,0 +1,42 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
+#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerARM();
+ virtual ~RegExpMacroAssemblerARM();
+};
+
+}} // namespace v8::internal
+
+#endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
diff --git a/V8Binding/v8/src/arm/register-allocator-arm-inl.h b/V8Binding/v8/src/arm/register-allocator-arm-inl.h
new file mode 100644
index 0000000..d98818f
--- /dev/null
+++ b/V8Binding/v8/src/arm/register-allocator-arm-inl.h
@@ -0,0 +1,103 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
+
+#include "v8.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+bool RegisterAllocator::IsReserved(Register reg) {
+ return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
+}
+
+
+
+// The register allocator uses small integers to represent the
+// non-reserved assembler registers. The mapping is:
+//
+// r0 <-> 0
+// r1 <-> 1
+// r2 <-> 2
+// r3 <-> 3
+// r4 <-> 4
+// r5 <-> 5
+// r6 <-> 6
+// r7 <-> 7
+// r9 <-> 8
+// r10 <-> 9
+// ip <-> 10
+// lr <-> 11
+
+int RegisterAllocator::ToNumber(Register reg) {
+ ASSERT(reg.is_valid() && !IsReserved(reg));
+ static int numbers[] = {
+ 0, // r0
+ 1, // r1
+ 2, // r2
+ 3, // r3
+ 4, // r4
+ 5, // r5
+ 6, // r6
+ 7, // r7
+ -1, // cp
+ 8, // r9
+ 9, // r10
+ -1, // fp
+ 10, // ip
+ -1, // sp
+ 11, // lr
+ -1 // pc
+ };
+ return numbers[reg.code()];
+}
+
+
+Register RegisterAllocator::ToRegister(int num) {
+ ASSERT(num >= 0 && num < kNumRegisters);
+ static Register registers[] =
+ { r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
+ return registers[num];
+}
+
+
+void RegisterAllocator::Initialize() {
+ Reset();
+ // The non-reserved r1 and lr registers are live on JS function entry.
+ Use(r1); // JS function.
+ Use(lr); // Return address.
+}
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
diff --git a/V8Binding/v8/src/arm/register-allocator-arm.cc b/V8Binding/v8/src/arm/register-allocator-arm.cc
new file mode 100644
index 0000000..ad0c7f9
--- /dev/null
+++ b/V8Binding/v8/src/arm/register-allocator-arm.cc
@@ -0,0 +1,59 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Result implementation.
+
+void Result::ToRegister() {
+ UNIMPLEMENTED();
+}
+
+
+void Result::ToRegister(Register target) {
+ UNIMPLEMENTED();
+}
+
+
+// -------------------------------------------------------------------------
+// RegisterAllocator implementation.
+
+Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
+ // No byte registers on ARM.
+ UNREACHABLE();
+ return Result();
+}
+
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/register-allocator-arm.h b/V8Binding/v8/src/arm/register-allocator-arm.h
new file mode 100644
index 0000000..f953ed9
--- /dev/null
+++ b/V8Binding/v8/src/arm/register-allocator-arm.h
@@ -0,0 +1,43 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+class RegisterAllocatorConstants : public AllStatic {
+ public:
+ static const int kNumRegisters = 12;
+ static const int kInvalidRegister = -1;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_H_
diff --git a/V8Binding/v8/src/arm/simulator-arm.cc b/V8Binding/v8/src/arm/simulator-arm.cc
new file mode 100644
index 0000000..b8b6663
--- /dev/null
+++ b/V8Binding/v8/src/arm/simulator-arm.cc
@@ -0,0 +1,1688 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "disasm.h"
+#include "arm/constants-arm.h"
+#include "arm/simulator-arm.h"
+
+#if !defined(__arm__)
+
+// Only build the simulator if not compiling for real ARM hardware.
+namespace assembler {
+namespace arm {
+
+using ::v8::internal::Object;
+using ::v8::internal::PrintF;
+using ::v8::internal::OS;
+using ::v8::internal::ReadLine;
+using ::v8::internal::DeleteArray;
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
+// Library does not provide vsscanf.
+#define SScanF sscanf // NOLINT
+
+// The Debugger class is used by the simulator while debugging simulated ARM
+// code.
+class Debugger {
+ public:
+ explicit Debugger(Simulator* sim);
+ ~Debugger();
+
+ void Stop(Instr* instr);
+ void Debug();
+
+ private:
+ static const instr_t kBreakpointInstr =
+ ((AL << 28) | (7 << 25) | (1 << 24) | break_point);
+ static const instr_t kNopInstr =
+ ((AL << 28) | (13 << 21));
+
+ Simulator* sim_;
+
+ bool GetValue(char* desc, int32_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool SetBreakpoint(Instr* breakpc);
+ bool DeleteBreakpoint(Instr* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void UndoBreakpoints();
+ void RedoBreakpoints();
+};
+
+
+Debugger::Debugger(Simulator* sim) {
+ sim_ = sim;
+}
+
+
+Debugger::~Debugger() {
+}
+
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitializeCoverage() {
+ char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+ if (file_name != NULL) {
+ coverage_log = fopen(file_name, "aw+");
+ }
+}
+
+
+void Debugger::Stop(Instr* instr) {
+ char* str = reinterpret_cast<char*>(instr->InstructionBits() & 0x0fffffff);
+ if (strlen(str) > 0) {
+ if (coverage_log != NULL) {
+ fprintf(coverage_log, "%s\n", str);
+ fflush(coverage_log);
+ }
+ instr->SetInstructionBits(0xe1a00000); // Overwrite with nop.
+ }
+ sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
+}
+
+#else // ndef GENERATED_CODE_COVERAGE
+
+static void InitializeCoverage() {
+}
+
+
+void Debugger::Stop(Instr* instr) {
+ const char* str = (const char*)(instr->InstructionBits() & 0x0fffffff);
+ PrintF("Simulator hit %s\n", str);
+ sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
+ Debug();
+}
+#endif
+
+
+static const char* reg_names[] = { "r0", "r1", "r2", "r3",
+ "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+ "pc", "lr", "sp", "ip",
+ "fp", "sl", ""};
+
+static int reg_nums[] = { 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 10, 11,
+ 12, 13, 14, 15,
+ 15, 14, 13, 12,
+ 11, 10};
+
+
+static int RegNameToRegNum(char* name) {
+ int reg = 0;
+ while (*reg_names[reg] != 0) {
+ if (strcmp(reg_names[reg], name) == 0) {
+ return reg_nums[reg];
+ }
+ reg++;
+ }
+ return -1;
+}
+
+
+bool Debugger::GetValue(char* desc, int32_t* value) {
+ int regnum = RegNameToRegNum(desc);
+ if (regnum >= 0) {
+ if (regnum == 15) {
+ *value = sim_->get_pc();
+ } else {
+ *value = sim_->get_register(regnum);
+ }
+ return true;
+ } else {
+ return SScanF(desc, "%i", value) == 1;
+ }
+ return false;
+}
+
+
+bool Debugger::SetBreakpoint(Instr* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != NULL) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->InstructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+
+bool Debugger::DeleteBreakpoint(Instr* breakpc) {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = NULL;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+
+void Debugger::UndoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+}
+
+
+void Debugger::RedoBreakpoints() {
+ if (sim_->break_pc_ != NULL) {
+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ }
+}
+
+
+void Debugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+
+ // make sure to have a proper terminating character if reaching the limit
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ UndoBreakpoints();
+
+ while (!done) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%x %s\n", sim_->get_pc(), buffer.start());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == NULL) {
+ break;
+ } else {
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int args = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->InstructionDecode(reinterpret_cast<Instr*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ PrintF("%s: %d 0x%x\n", arg1, value, value);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print value\n");
+ }
+ } else if ((strcmp(cmd, "po") == 0)
+ || (strcmp(cmd, "printobject") == 0)) {
+ if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ Object* obj = reinterpret_cast<Object*>(value);
+ USE(obj);
+ PrintF("%s: \n", arg1);
+#if defined(DEBUG)
+ obj->PrintLn();
+#endif // defined(DEBUG)
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("printobject value\n");
+ }
+ } else if (strcmp(cmd, "disasm") == 0) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = NULL;
+ byte* end = NULL;
+
+ if (args == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * Instr::kInstrSize);
+ } else if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // no length parameter passed, assume 10 instructions
+ end = cur + (10 * Instr::kInstrSize);
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * Instr::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%x %s\n", cur, buffer.start());
+ cur += Instr::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::internal::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (args == 2) {
+ int32_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instr*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break addr\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!DeleteBreakpoint(NULL)) {
+ PrintF("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ PrintF("N flag: %d; ", sim_->n_flag_);
+ PrintF("Z flag: %d; ", sim_->z_flag_);
+ PrintF("C flag: %d; ", sim_->c_flag_);
+ PrintF("V flag: %d\n", sim_->v_flag_);
+ } else if (strcmp(cmd, "unstop") == 0) {
+ intptr_t stop_pc = sim_->get_pc() - Instr::kInstrSize;
+ Instr* stop_instr = reinterpret_cast<Instr*>(stop_pc);
+ if (stop_instr->ConditionField() == special_condition) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.");
+ }
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ DeleteArray(line);
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+Simulator::Simulator() {
+ // Setup simulator support first. Some of this information is needed to
+ // setup the architecture state.
+ size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
+ stack_ = reinterpret_cast<char*>(malloc(stack_size));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_pc_ = NULL;
+ break_instr_ = 0;
+
+ // Setup architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < num_registers; i++) {
+ registers_[i] = 0;
+ }
+ n_flag_ = false;
+ z_flag_ = false;
+ c_flag_ = false;
+ v_flag_ = false;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+ // The lr and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_lr;
+ registers_[lr] = bad_lr;
+ InitializeCoverage();
+}
+
+
+// Create one simulator per thread and keep it in thread local storage.
+static v8::internal::Thread::LocalStorageKey simulator_key =
+ v8::internal::Thread::CreateThreadLocalKey();
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current() {
+ Simulator* sim = reinterpret_cast<Simulator*>(
+ v8::internal::Thread::GetThreadLocal(simulator_key));
+ if (sim == NULL) {
+ // TODO(146): delete the simulator object when a thread goes away.
+ sim = new Simulator();
+ v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+ }
+ return sim;
+}
+
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int32_t value) {
+ ASSERT((reg >= 0) && (reg < num_registers));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+ registers_[reg] = value;
+}
+
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t Simulator::get_register(int reg) const {
+ ASSERT((reg >= 0) && (reg < num_registers));
+ return registers_[reg] + ((reg == pc) ? Instr::kPCReadOffset : 0);
+}
+
+
+// Raw access to the PC register.
+void Simulator::set_pc(int32_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t Simulator::get_pc() const {
+ return registers_[pc];
+}
+
+
+// For use in calls that take two double values, constructed from r0, r1, r2
+// and r3.
+void Simulator::GetFpArgs(double* x, double* y) {
+ // We use a char buffer to get around the strict-aliasing rules which
+ // otherwise allow the compiler to optimize away the copy.
+ char buffer[2 * sizeof(registers_[0])];
+ // Registers 0 and 1 -> x.
+ memcpy(buffer, registers_, sizeof(buffer));
+ memcpy(x, buffer, sizeof(buffer));
+ // Registers 2 and 3 -> y.
+ memcpy(buffer, registers_ + 2, sizeof(buffer));
+ memcpy(y, buffer, sizeof(buffer));
+}
+
+
+void Simulator::SetFpResult(const double& result) {
+ char buffer[2 * sizeof(registers_[0])];
+ memcpy(buffer, &result, sizeof(buffer));
+ // result -> registers 0 and 1.
+ memcpy(registers_, buffer, sizeof(buffer));
+}
+
+
+void Simulator::TrashCallerSaveRegisters() {
+ // We don't trash the registers with the return value.
+ registers_[2] = 0x50Bad4U;
+ registers_[3] = 0x50Bad4U;
+ registers_[12] = 0x50Bad4U;
+}
+
+
+// The ARM cannot do unaligned reads and writes. On some ARM platforms an
+// interrupt is caused. On others it does a funky rotation thing. For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour. Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator. Since the host is typically IA32 we will not
+// get the correct ARM-like behaviour on unaligned accesses.
+
+int Simulator::ReadW(int32_t addr, Instr* instr) {
+ if ((addr & 3) == 0) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+ PrintF("Unaligned read at %x\n", addr);
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
+void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
+ if ((addr & 3) == 0) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned write at %x, pc=%p\n", addr, instr);
+ UNIMPLEMENTED();
+}
+
+
+uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+ PrintF("Unaligned read at %x, pc=%p\n", addr, instr);
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
+int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+ PrintF("Unaligned read at %x\n", addr);
+ UNIMPLEMENTED();
+ return 0;
+}
+
+
+void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
+ if ((addr & 1) == 0) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned write at %x, pc=%p\n", addr, instr);
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
+ if ((addr & 1) == 0) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ PrintF("Unaligned write at %x, pc=%p\n", addr, instr);
+ UNIMPLEMENTED();
+}
+
+
+uint8_t Simulator::ReadBU(int32_t addr) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+
+int8_t Simulator::ReadB(int32_t addr) {
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+
+void Simulator::WriteB(int32_t addr, uint8_t value) {
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+
+void Simulator::WriteB(int32_t addr, int8_t value) {
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+ // Leave a safety margin of 256 bytes to prevent overrunning the stack when
+ // pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + 256;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instr* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%x: %s\n",
+ instr, format);
+ UNIMPLEMENTED();
+}
+
+
+// Checks if the current instruction should be executed based on its
+// condition bits.
+bool Simulator::ConditionallyExecute(Instr* instr) {
+ switch (instr->ConditionField()) {
+ case EQ: return z_flag_;
+ case NE: return !z_flag_;
+ case CS: return c_flag_;
+ case CC: return !c_flag_;
+ case MI: return n_flag_;
+ case PL: return !n_flag_;
+ case VS: return v_flag_;
+ case VC: return !v_flag_;
+ case HI: return c_flag_ && !z_flag_;
+ case LS: return !c_flag_ || z_flag_;
+ case GE: return n_flag_ == v_flag_;
+ case LT: return n_flag_ != v_flag_;
+ case GT: return !z_flag_ && (n_flag_ == v_flag_);
+ case LE: return z_flag_ || (n_flag_ != v_flag_);
+ case AL: return true;
+ default: UNREACHABLE();
+ }
+ return false;
+}
+
+
+// Calculate and set the Negative and Zero flags.
+void Simulator::SetNZFlags(int32_t val) {
+ n_flag_ = (val < 0);
+ z_flag_ = (val == 0);
+}
+
+
+// Set the Carry flag.
+void Simulator::SetCFlag(bool val) {
+ c_flag_ = val;
+}
+
+
+// Set the oVerflow flag.
+void Simulator::SetVFlag(bool val) {
+ v_flag_ = val;
+}
+
+
+// Calculate C flag value for additions.
+bool Simulator::CarryFrom(int32_t left, int32_t right) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+ uint32_t urest = 0xffffffffU - uleft;
+
+ return (uright > urest);
+}
+
+
+// Calculate C flag value for subtractions.
+bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+ uint32_t uleft = static_cast<uint32_t>(left);
+ uint32_t uright = static_cast<uint32_t>(right);
+
+ return (uright > uleft);
+}
+
+
+// Calculate V flag value for additions and subtractions.
+bool Simulator::OverflowFrom(int32_t alu_out,
+ int32_t left, int32_t right, bool addition) {
+ bool overflow;
+ if (addition) {
+ // operands have the same sign
+ overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
+ // and operands and result have different sign
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ } else {
+ // operands have different signs
+ overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
+ // and first operand and result have different signs
+ && ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+ }
+ return overflow;
+}
+
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with register.
+int32_t Simulator::GetShiftRm(Instr* instr, bool* carry_out) {
+ Shift shift = instr->ShiftField();
+ int shift_amount = instr->ShiftAmountField();
+ int32_t result = get_register(instr->RmField());
+ if (instr->Bit(4) == 0) {
+ // by immediate
+ if ((shift == ROR) && (shift_amount == 0)) {
+ UNIMPLEMENTED();
+ return result;
+ } else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
+ shift_amount = 32;
+ }
+ switch (shift) {
+ case ASR: {
+ if (shift_amount == 0) {
+ if (result < 0) {
+ result = 0xffffffff;
+ *carry_out = true;
+ } else {
+ result = 0;
+ *carry_out = false;
+ }
+ } else {
+ result >>= (shift_amount - 1);
+ *carry_out = (result & 1) == 1;
+ result >>= 1;
+ }
+ break;
+ }
+
+ case LSL: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else {
+ result <<= (shift_amount - 1);
+ *carry_out = (result < 0);
+ result <<= 1;
+ }
+ break;
+ }
+
+ case LSR: {
+ if (shift_amount == 0) {
+ result = 0;
+ *carry_out = c_flag_;
+ } else {
+ uint32_t uresult = static_cast<uint32_t>(result);
+ uresult >>= (shift_amount - 1);
+ *carry_out = (uresult & 1) == 1;
+ uresult >>= 1;
+ result = static_cast<int32_t>(uresult);
+ }
+ break;
+ }
+
+ case ROR: {
+ UNIMPLEMENTED();
+ break;
+ }
+
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ // by register
+ int rs = instr->RsField();
+ shift_amount = get_register(rs) &0xff;
+ switch (shift) {
+ case ASR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ result >>= (shift_amount - 1);
+ *carry_out = (result & 1) == 1;
+ result >>= 1;
+ } else {
+ ASSERT(shift_amount >= 32);
+ if (result < 0) {
+ *carry_out = true;
+ result = 0xffffffff;
+ } else {
+ *carry_out = false;
+ result = 0;
+ }
+ }
+ break;
+ }
+
+ case LSL: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ result <<= (shift_amount - 1);
+ *carry_out = (result < 0);
+ result <<= 1;
+ } else if (shift_amount == 32) {
+ *carry_out = (result & 1) == 1;
+ result = 0;
+ } else {
+ ASSERT(shift_amount > 32);
+ *carry_out = false;
+ result = 0;
+ }
+ break;
+ }
+
+ case LSR: {
+ if (shift_amount == 0) {
+ *carry_out = c_flag_;
+ } else if (shift_amount < 32) {
+ uint32_t uresult = static_cast<uint32_t>(result);
+ uresult >>= (shift_amount - 1);
+ *carry_out = (uresult & 1) == 1;
+ uresult >>= 1;
+ result = static_cast<int32_t>(uresult);
+ } else if (shift_amount == 32) {
+ *carry_out = (result < 0);
+ result = 0;
+ } else {
+ *carry_out = false;
+ result = 0;
+ }
+ break;
+ }
+
+ case ROR: {
+ UNIMPLEMENTED();
+ break;
+ }
+
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ return result;
+}
+
+
+// Addressing Mode 1 - Data-processing operands:
+// Get the value based on the shifter_operand with immediate.
+int32_t Simulator::GetImm(Instr* instr, bool* carry_out) {
+ int rotate = instr->RotateField() * 2;
+ int immed8 = instr->Immed8Field();
+ int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+ *carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
+ return imm;
+}
+
+
+static int count_bits(int bit_vector) {
+ int count = 0;
+ while (bit_vector != 0) {
+ if ((bit_vector & 1) != 0) {
+ count++;
+ }
+ bit_vector >>= 1;
+ }
+ return count;
+}
+
+
+// Addressing Mode 4 - Load and Store Multiple
+void Simulator::HandleRList(Instr* instr, bool load) {
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ int rlist = instr->RlistField();
+ int num_regs = count_bits(rlist);
+
+ intptr_t start_address = 0;
+ intptr_t end_address = 0;
+ switch (instr->PUField()) {
+ case 0: {
+ // Print("da");
+ UNIMPLEMENTED();
+ break;
+ }
+ case 1: {
+ // Print("ia");
+ start_address = rn_val;
+ end_address = rn_val + (num_regs * 4) - 4;
+ rn_val = rn_val + (num_regs * 4);
+ break;
+ }
+ case 2: {
+ // Print("db");
+ start_address = rn_val - (num_regs * 4);
+ end_address = rn_val - 4;
+ rn_val = start_address;
+ break;
+ }
+ case 3: {
+ // Print("ib");
+ UNIMPLEMENTED();
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
+ int reg = 0;
+ while (rlist != 0) {
+ if ((rlist & 1) != 0) {
+ if (load) {
+ set_register(reg, *address);
+ } else {
+ *address = get_register(reg);
+ }
+ address += 1;
+ }
+ reg++;
+ rlist >>= 1;
+ }
+ ASSERT(end_address == ((intptr_t)address) - 4);
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the r1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1);
+
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instr* instr) {
+ int swi = instr->SwiField();
+ switch (swi) {
+ case call_rt_r5: {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(get_register(r5));
+ intptr_t arg0 = get_register(r0);
+ intptr_t arg1 = get_register(r1);
+ int64_t result = target(arg0, arg1);
+ int32_t lo_res = static_cast<int32_t>(result);
+ int32_t hi_res = static_cast<int32_t>(result >> 32);
+ set_register(r0, lo_res);
+ set_register(r1, hi_res);
+ set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ break;
+ }
+ case call_rt_r2: {
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(get_register(r2));
+ intptr_t arg0 = get_register(r0);
+ intptr_t arg1 = get_register(r1);
+ int64_t result = target(arg0, arg1);
+ int32_t lo_res = static_cast<int32_t>(result);
+ int32_t hi_res = static_cast<int32_t>(result >> 32);
+ set_register(r0, lo_res);
+ set_register(r1, hi_res);
+ set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ break;
+ }
+ case break_point: {
+ Debugger dbg(this);
+ dbg.Debug();
+ break;
+ }
+ {
+ double x, y, z;
+ case simulator_fp_add:
+ GetFpArgs(&x, &y);
+ z = x + y;
+ SetFpResult(z);
+ TrashCallerSaveRegisters();
+ set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ break;
+ case simulator_fp_sub:
+ GetFpArgs(&x, &y);
+ z = x - y;
+ SetFpResult(z);
+ TrashCallerSaveRegisters();
+ set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ break;
+ case simulator_fp_mul:
+ GetFpArgs(&x, &y);
+ z = x * y;
+ SetFpResult(z);
+ TrashCallerSaveRegisters();
+ set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
+// Handle execution based on instruction types.
+
+// Instruction types 0 and 1 are both rolled into one function because they
+// only differ in the handling of the shifter_operand.
+void Simulator::DecodeType01(Instr* instr) {
+ int type = instr->TypeField();
+ if ((type == 0) && instr->IsSpecialType0()) {
+ // multiply instruction or extra loads and stores
+ if (instr->Bits(7, 4) == 9) {
+ if (instr->Bit(24) == 0) {
+ // multiply instructions
+ int rd = instr->RdField();
+ int rm = instr->RmField();
+ int rs = instr->RsField();
+ int32_t rs_val = get_register(rs);
+ int32_t rm_val = get_register(rm);
+ if (instr->Bit(23) == 0) {
+ if (instr->Bit(21) == 0) {
+ // Format(instr, "mul'cond's 'rd, 'rm, 'rs");
+ int32_t alu_out = rm_val * rs_val;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ }
+ } else {
+ Format(instr, "mla'cond's 'rd, 'rm, 'rs, 'rn");
+ }
+ } else {
+ // Format(instr, "'um'al'cond's 'rn, 'rd, 'rs, 'rm");
+ int rn = instr->RnField();
+ int32_t hi_res = 0;
+ int32_t lo_res = 0;
+ if (instr->Bit(22) == 0) {
+ // signed multiply
+ UNIMPLEMENTED();
+ } else {
+ // unsigned multiply
+ uint64_t left_op = rm_val;
+ uint64_t right_op = rs_val;
+ uint64_t result = left_op * right_op;
+ hi_res = static_cast<int32_t>(result >> 32);
+ lo_res = static_cast<int32_t>(result & 0xffffffff);
+ }
+ set_register(rn, hi_res);
+ set_register(rd, lo_res);
+ if (instr->HasS()) {
+ UNIMPLEMENTED();
+ }
+ }
+ } else {
+ UNIMPLEMENTED(); // not used by V8
+ }
+ } else {
+ // extra load/store instructions
+ int rd = instr->RdField();
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ int32_t addr = 0;
+ if (instr->Bit(22) == 0) {
+ int rm = instr->RmField();
+ int32_t rm_val = get_register(rm);
+ switch (instr->PUField()) {
+ case 0: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val -= rm_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 1: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val += rm_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 2: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn, -'rm]'w");
+ rn_val -= rm_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ case 3: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn, +'rm]'w");
+ rn_val += rm_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ } else {
+ int32_t imm_val = (instr->ImmedHField() << 4) | instr->ImmedLField();
+ switch (instr->PUField()) {
+ case 0: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val -= imm_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 1: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val += imm_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 2: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #-'off8]'w");
+ rn_val -= imm_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ case 3: {
+ // Format(instr, "'memop'cond'sign'h 'rd, ['rn, #+'off8]'w");
+ rn_val += imm_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ default: {
+ // The PU field is a 2-bit field.
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+ if (instr->HasH()) {
+ if (instr->HasSign()) {
+ if (instr->HasL()) {
+ int16_t val = ReadH(addr, instr);
+ set_register(rd, val);
+ } else {
+ int16_t val = get_register(rd);
+ WriteH(addr, val, instr);
+ }
+ } else {
+ if (instr->HasL()) {
+ uint16_t val = ReadHU(addr, instr);
+ set_register(rd, val);
+ } else {
+ uint16_t val = get_register(rd);
+ WriteH(addr, val, instr);
+ }
+ }
+ } else {
+ // signed byte loads
+ ASSERT(instr->HasSign());
+ ASSERT(instr->HasL());
+ int8_t val = ReadB(addr);
+ set_register(rd, val);
+ }
+ return;
+ }
+ } else {
+ int rd = instr->RdField();
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ int32_t shifter_operand = 0;
+ bool shifter_carry_out = 0;
+ if (type == 0) {
+ shifter_operand = GetShiftRm(instr, &shifter_carry_out);
+ } else {
+ ASSERT(instr->TypeField() == 1);
+ shifter_operand = GetImm(instr, &shifter_carry_out);
+ }
+ int32_t alu_out;
+
+ switch (instr->OpcodeField()) {
+ case AND: {
+ // Format(instr, "and'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "and'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val & shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case EOR: {
+ // Format(instr, "eor'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "eor'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val ^ shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case SUB: {
+ // Format(instr, "sub'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "sub'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val - shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(!BorrowFrom(rn_val, shifter_operand));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
+ }
+ break;
+ }
+
+ case RSB: {
+ // Format(instr, "rsb'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "rsb'cond's 'rd, 'rn, 'imm");
+ alu_out = shifter_operand - rn_val;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(!BorrowFrom(shifter_operand, rn_val));
+ SetVFlag(OverflowFrom(alu_out, shifter_operand, rn_val, false));
+ }
+ break;
+ }
+
+ case ADD: {
+ // Format(instr, "add'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "add'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val + shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(CarryFrom(rn_val, shifter_operand));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
+ }
+ break;
+ }
+
+ case ADC: {
+ Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
+ Format(instr, "adc'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+
+ case SBC: {
+ Format(instr, "sbc'cond's 'rd, 'rn, 'shift_rm");
+ Format(instr, "sbc'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+
+ case RSC: {
+ Format(instr, "rsc'cond's 'rd, 'rn, 'shift_rm");
+ Format(instr, "rsc'cond's 'rd, 'rn, 'imm");
+ break;
+ }
+
+ case TST: {
+ if (instr->HasS()) {
+ // Format(instr, "tst'cond 'rn, 'shift_rm");
+ // Format(instr, "tst'cond 'rn, 'imm");
+ alu_out = rn_val & shifter_operand;
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+
+ case TEQ: {
+ if (instr->HasS()) {
+ // Format(instr, "teq'cond 'rn, 'shift_rm");
+ // Format(instr, "teq'cond 'rn, 'imm");
+ alu_out = rn_val ^ shifter_operand;
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+
+ case CMP: {
+ if (instr->HasS()) {
+ // Format(instr, "cmp'cond 'rn, 'shift_rm");
+ // Format(instr, "cmp'cond 'rn, 'imm");
+ alu_out = rn_val - shifter_operand;
+ SetNZFlags(alu_out);
+ SetCFlag(!BorrowFrom(rn_val, shifter_operand));
+ SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+
+ case CMN: {
+ if (instr->HasS()) {
+ Format(instr, "cmn'cond 'rn, 'shift_rm");
+ Format(instr, "cmn'cond 'rn, 'imm");
+ } else {
+ UNIMPLEMENTED();
+ }
+ break;
+ }
+
+ case ORR: {
+ // Format(instr, "orr'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "orr'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val | shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case MOV: {
+ // Format(instr, "mov'cond's 'rd, 'shift_rm");
+ // Format(instr, "mov'cond's 'rd, 'imm");
+ alu_out = shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case BIC: {
+ // Format(instr, "bic'cond's 'rd, 'rn, 'shift_rm");
+ // Format(instr, "bic'cond's 'rd, 'rn, 'imm");
+ alu_out = rn_val & ~shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ case MVN: {
+ // Format(instr, "mvn'cond's 'rd, 'shift_rm");
+ // Format(instr, "mvn'cond's 'rd, 'imm");
+ alu_out = ~shifter_operand;
+ set_register(rd, alu_out);
+ if (instr->HasS()) {
+ SetNZFlags(alu_out);
+ SetCFlag(shifter_carry_out);
+ }
+ break;
+ }
+
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
+void Simulator::DecodeType2(Instr* instr) {
+ int rd = instr->RdField();
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ int32_t im_val = instr->Offset12Field();
+ int32_t addr = 0;
+ switch (instr->PUField()) {
+ case 0: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val -= im_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 1: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
+ ASSERT(!instr->HasW());
+ addr = rn_val;
+ rn_val += im_val;
+ set_register(rn, rn_val);
+ break;
+ }
+ case 2: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+ rn_val -= im_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ case 3: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+ rn_val += im_val;
+ addr = rn_val;
+ if (instr->HasW()) {
+ set_register(rn, rn_val);
+ }
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ if (instr->HasB()) {
+ if (instr->HasL()) {
+ byte val = ReadBU(addr);
+ set_register(rd, val);
+ } else {
+ byte val = get_register(rd);
+ WriteB(addr, val);
+ }
+ } else {
+ if (instr->HasL()) {
+ set_register(rd, ReadW(addr, instr));
+ } else {
+ WriteW(addr, get_register(rd), instr);
+ }
+ }
+}
+
+
+void Simulator::DecodeType3(Instr* instr) {
+ int rd = instr->RdField();
+ int rn = instr->RnField();
+ int32_t rn_val = get_register(rn);
+ bool shifter_carry_out = 0;
+ int32_t shifter_operand = GetShiftRm(instr, &shifter_carry_out);
+ int32_t addr = 0;
+ switch (instr->PUField()) {
+ case 0: {
+ ASSERT(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
+ break;
+ }
+ case 1: {
+ ASSERT(!instr->HasW());
+ Format(instr, "'memop'cond'b 'rd, ['rn], +'shift_rm");
+ break;
+ }
+ case 2: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
+ addr = rn_val - shifter_operand;
+ if (instr->HasW()) {
+ set_register(rn, addr);
+ }
+ break;
+ }
+ case 3: {
+ // Format(instr, "'memop'cond'b 'rd, ['rn, +'shift_rm]'w");
+ addr = rn_val + shifter_operand;
+ if (instr->HasW()) {
+ set_register(rn, addr);
+ }
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ break;
+ }
+ }
+ if (instr->HasB()) {
+ UNIMPLEMENTED();
+ } else {
+ if (instr->HasL()) {
+ set_register(rd, ReadW(addr, instr));
+ } else {
+ WriteW(addr, get_register(rd), instr);
+ }
+ }
+}
+
+
+void Simulator::DecodeType4(Instr* instr) {
+ ASSERT(instr->Bit(22) == 0); // only allowed to be set in privileged mode
+ if (instr->HasL()) {
+ // Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
+ HandleRList(instr, true);
+ } else {
+ // Format(instr, "stm'cond'pu 'rn'w, 'rlist");
+ HandleRList(instr, false);
+ }
+}
+
+
+void Simulator::DecodeType5(Instr* instr) {
+ // Format(instr, "b'l'cond 'target");
+ int off = (instr->SImmed24Field() << 2) + 8;
+ intptr_t pc = get_pc();
+ if (instr->HasLink()) {
+ set_register(lr, pc + Instr::kInstrSize);
+ }
+ set_pc(pc+off);
+}
+
+
+void Simulator::DecodeType6(Instr* instr) {
+ UNIMPLEMENTED();
+}
+
+
+void Simulator::DecodeType7(Instr* instr) {
+ if (instr->Bit(24) == 1) {
+ // Format(instr, "swi 'swi");
+ SoftwareInterrupt(instr);
+ } else {
+ UNIMPLEMENTED();
+ }
+}
+
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instr* instr) {
+ pc_modified_ = false;
+ if (instr->ConditionField() == special_condition) {
+ Debugger dbg(this);
+ dbg.Stop(instr);
+ return;
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // use a reasonably large buffer
+ v8::internal::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer,
+ reinterpret_cast<byte*>(instr));
+ PrintF(" 0x%x %s\n", instr, buffer.start());
+ }
+ if (ConditionallyExecute(instr)) {
+ switch (instr->TypeField()) {
+ case 0:
+ case 1: {
+ DecodeType01(instr);
+ break;
+ }
+ case 2: {
+ DecodeType2(instr);
+ break;
+ }
+ case 3: {
+ DecodeType3(instr);
+ break;
+ }
+ case 4: {
+ DecodeType4(instr);
+ break;
+ }
+ case 5: {
+ DecodeType5(instr);
+ break;
+ }
+ case 6: {
+ DecodeType6(instr);
+ break;
+ }
+ case 7: {
+ DecodeType7(instr);
+ break;
+ }
+ default: {
+ UNIMPLEMENTED();
+ break;
+ }
+ }
+ }
+ if (!pc_modified_) {
+ set_register(pc, reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
+ }
+}
+
+
+//
+void Simulator::Execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int program_counter = get_pc();
+
+ if (::v8::internal::FLAG_stop_sim_at == 0) {
+ // Fast version of the dispatch loop without checking whether the simulator
+ // should be stopping at a particular executed instruction.
+ while (program_counter != end_sim_pc) {
+ Instr* instr = reinterpret_cast<Instr*>(program_counter);
+ icount_++;
+ InstructionDecode(instr);
+ program_counter = get_pc();
+ }
+ } else {
+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+ // we reach the particular instuction count.
+ while (program_counter != end_sim_pc) {
+ Instr* instr = reinterpret_cast<Instr*>(program_counter);
+ icount_++;
+ if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+ Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ InstructionDecode(instr);
+ }
+ program_counter = get_pc();
+ }
+ }
+}
+
+
+Object* Simulator::Call(int32_t entry, int32_t p0, int32_t p1, int32_t p2,
+ int32_t p3, int32_t p4) {
+ // Setup parameters
+ set_register(r0, p0);
+ set_register(r1, p1);
+ set_register(r2, p2);
+ set_register(r3, p3);
+ intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
+ *(--stack_pointer) = p4;
+ set_register(sp, reinterpret_cast<int32_t>(stack_pointer));
+
+ // Prepare to execute the code at entry
+ set_register(pc, entry);
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(lr, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int32_t r4_val = get_register(r4);
+ int32_t r5_val = get_register(r5);
+ int32_t r6_val = get_register(r6);
+ int32_t r7_val = get_register(r7);
+ int32_t r8_val = get_register(r8);
+ int32_t r9_val = get_register(r9);
+ int32_t r10_val = get_register(r10);
+ int32_t r11_val = get_register(r11);
+
+ // Setup the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int32_t callee_saved_value = icount_;
+ set_register(r4, callee_saved_value);
+ set_register(r5, callee_saved_value);
+ set_register(r6, callee_saved_value);
+ set_register(r7, callee_saved_value);
+ set_register(r8, callee_saved_value);
+ set_register(r9, callee_saved_value);
+ set_register(r10, callee_saved_value);
+ set_register(r11, callee_saved_value);
+
+ // Start the simulation
+ Execute();
+
+ // Check that the callee-saved registers have been preserved.
+ CHECK_EQ(get_register(r4), callee_saved_value);
+ CHECK_EQ(get_register(r5), callee_saved_value);
+ CHECK_EQ(get_register(r6), callee_saved_value);
+ CHECK_EQ(get_register(r7), callee_saved_value);
+ CHECK_EQ(get_register(r8), callee_saved_value);
+ CHECK_EQ(get_register(r9), callee_saved_value);
+ CHECK_EQ(get_register(r10), callee_saved_value);
+ CHECK_EQ(get_register(r11), callee_saved_value);
+
+ // Restore callee-saved registers with the original value.
+ set_register(r4, r4_val);
+ set_register(r5, r5_val);
+ set_register(r6, r6_val);
+ set_register(r7, r7_val);
+ set_register(r8, r8_val);
+ set_register(r9, r9_val);
+ set_register(r10, r10_val);
+ set_register(r11, r11_val);
+
+ int result = get_register(r0);
+ return reinterpret_cast<Object*>(result);
+}
+
+} } // namespace assembler::arm
+
+#endif // !defined(__arm__)
diff --git a/V8Binding/v8/src/arm/simulator-arm.h b/V8Binding/v8/src/arm/simulator-arm.h
new file mode 100644
index 0000000..d4a395a
--- /dev/null
+++ b/V8Binding/v8/src/arm/simulator-arm.h
@@ -0,0 +1,205 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Declares a Simulator for ARM instructions if we are not generating a native
+// ARM binary. This Simulator allows us to run and debug ARM code generation on
+// regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a ARM HW platform.
+
+#ifndef V8_ARM_SIMULATOR_ARM_H_
+#define V8_ARM_SIMULATOR_ARM_H_
+
+#if defined(__arm__)
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ reinterpret_cast<Object*>(entry(p0, p1, p2, p3, p4))
+
+// Calculated the stack limit beyond which we will throw stack overflow errors.
+// This macro must be called from a C++ method. It relies on being able to take
+// the address of "this" to get a value on the current execution stack and then
+// calculates the stack limit based on that value.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+ (reinterpret_cast<uintptr_t>(this) - limit)
+
+#else // defined(__arm__)
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+ assembler::arm::Simulator::current()->Call((int32_t)entry, (int32_t)p0, \
+ (int32_t)p1, (int32_t)p2, (int32_t)p3, (int32_t)p4)
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+ (assembler::arm::Simulator::current()->StackLimit())
+
+
+#include "constants-arm.h"
+
+
+namespace assembler {
+namespace arm {
+
+class Simulator {
+ public:
+ friend class Debugger;
+
+ enum Register {
+ no_reg = -1,
+ r0 = 0, r1, r2, r3, r4, r5, r6, r7,
+ r8, r9, r10, r11, r12, r13, r14, r15,
+ num_registers,
+ sp = 13,
+ lr = 14,
+ pc = 15
+ };
+
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* current();
+
+ // Accessors for register state. Reading the pc value adheres to the ARM
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int32_t value);
+ int32_t get_register(int reg) const;
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int32_t value);
+ int32_t get_pc() const;
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit() const;
+
+ // Executes ARM instructions until the PC reaches end_sim_pc.
+ void Execute();
+
+ // V8 generally calls into generated code with 5 parameters. This is a
+ // convenience function, which sets up the simulator state and grabs the
+ // result on return.
+ v8::internal::Object* Call(int32_t entry, int32_t p0, int32_t p1,
+ int32_t p2, int32_t p3, int32_t p4);
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_lr = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the lr is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2
+ };
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instr* instr, const char* format);
+
+ // Checks if the current instruction should be executed based on its
+ // condition bits.
+ bool ConditionallyExecute(Instr* instr);
+
+ // Helper functions to set the conditional flags in the architecture state.
+ void SetNZFlags(int32_t val);
+ void SetCFlag(bool val);
+ void SetVFlag(bool val);
+ bool CarryFrom(int32_t left, int32_t right);
+ bool BorrowFrom(int32_t left, int32_t right);
+ bool OverflowFrom(int32_t alu_out,
+ int32_t left,
+ int32_t right,
+ bool addition);
+
+ // Helper functions to decode common "addressing" modes
+ int32_t GetShiftRm(Instr* instr, bool* carry_out);
+ int32_t GetImm(Instr* instr, bool* carry_out);
+ void HandleRList(Instr* instr, bool load);
+ void SoftwareInterrupt(Instr* instr);
+
+ // Read and write memory.
+ inline uint8_t ReadBU(int32_t addr);
+ inline int8_t ReadB(int32_t addr);
+ inline void WriteB(int32_t addr, uint8_t value);
+ inline void WriteB(int32_t addr, int8_t value);
+
+ inline uint16_t ReadHU(int32_t addr, Instr* instr);
+ inline int16_t ReadH(int32_t addr, Instr* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void WriteH(int32_t addr, uint16_t value, Instr* instr);
+ inline void WriteH(int32_t addr, int16_t value, Instr* instr);
+
+ inline int ReadW(int32_t addr, Instr* instr);
+ inline void WriteW(int32_t addr, int value, Instr* instr);
+
+ // Executing is handled based on the instruction type.
+ void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one
+ void DecodeType2(Instr* instr);
+ void DecodeType3(Instr* instr);
+ void DecodeType4(Instr* instr);
+ void DecodeType5(Instr* instr);
+ void DecodeType6(Instr* instr);
+ void DecodeType7(Instr* instr);
+
+ // Executes one instruction.
+ void InstructionDecode(Instr* instr);
+
+ // For use in calls that take two double values, constructed from r0, r1, r2
+ // and r3.
+ void GetFpArgs(double* x, double* y);
+ void SetFpResult(const double& result);
+ void TrashCallerSaveRegisters();
+
+ // architecture state
+ int32_t registers_[16];
+ bool n_flag_;
+ bool z_flag_;
+ bool c_flag_;
+ bool v_flag_;
+
+ // simulator support
+ char* stack_;
+ bool pc_modified_;
+ int icount_;
+
+ // registered breakpoints
+ Instr* break_pc_;
+ instr_t break_instr_;
+};
+
+} } // namespace assembler::arm
+
+#endif // defined(__arm__)
+
+#endif // V8_ARM_SIMULATOR_ARM_H_
diff --git a/V8Binding/v8/src/arm/stub-cache-arm.cc b/V8Binding/v8/src/arm/stub-cache-arm.cc
new file mode 100644
index 0000000..c09f9e3
--- /dev/null
+++ b/V8Binding/v8/src/arm/stub-cache-arm.cc
@@ -0,0 +1,1148 @@
+// Copyright 2006-2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "ic-inl.h"
+#include "codegen-inl.h"
+#include "stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(MacroAssembler* masm,
+ Code::Flags flags,
+ StubCache::Table table,
+ Register name,
+ Register offset) {
+ ExternalReference key_offset(SCTableReference::keyReference(table));
+ ExternalReference value_offset(SCTableReference::valueReference(table));
+
+ Label miss;
+
+ // Save the offset on the stack.
+ __ push(offset);
+
+ // Check that the key in the entry matches the name.
+ __ mov(ip, Operand(key_offset));
+ __ ldr(ip, MemOperand(ip, offset, LSL, 1));
+ __ cmp(name, Operand(ip));
+ __ b(ne, &miss);
+
+ // Get the code entry from the cache.
+ __ mov(ip, Operand(value_offset));
+ __ ldr(offset, MemOperand(ip, offset, LSL, 1));
+
+ // Check that the flags match what we're looking for.
+ __ ldr(offset, FieldMemOperand(offset, Code::kFlagsOffset));
+ __ and_(offset, offset, Operand(~Code::kFlagsNotUsedInLookup));
+ __ cmp(offset, Operand(flags));
+ __ b(ne, &miss);
+
+ // Restore offset and re-load code entry from cache.
+ __ pop(offset);
+ __ mov(ip, Operand(value_offset));
+ __ ldr(offset, MemOperand(ip, offset, LSL, 1));
+
+ // Jump to the first instruction in the code stub.
+ __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(offset);
+
+ // Miss: Restore offset and fall through.
+ __ bind(&miss);
+ __ pop(offset);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm,
+ Code::Flags flags,
+ Register receiver,
+ Register name,
+ Register scratch) {
+ Label miss;
+
+ // Make sure that code is valid. The shifting code relies on the
+ // entry size being 8.
+ ASSERT(sizeof(Entry) == 8);
+
+ // Make sure the flags does not name a specific type.
+ ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
+
+ // Make sure that there are no register conflicts.
+ ASSERT(!scratch.is(receiver));
+ ASSERT(!scratch.is(name));
+
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Get the map of the receiver and compute the hash.
+ __ ldr(scratch, FieldMemOperand(name, String::kLengthOffset));
+ __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ add(scratch, scratch, Operand(ip));
+ __ eor(scratch, scratch, Operand(flags));
+ __ and_(scratch,
+ scratch,
+ Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the primary table.
+ ProbeTable(masm, flags, kPrimary, name, scratch);
+
+ // Primary miss: Compute hash for secondary probe.
+ __ sub(scratch, scratch, Operand(name));
+ __ add(scratch, scratch, Operand(flags));
+ __ and_(scratch,
+ scratch,
+ Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+
+ // Probe the secondary table.
+ ProbeTable(masm, flags, kSecondary, name, scratch);
+
+ // Cache miss: Fall-through and let caller handle the miss by
+ // entering the runtime system.
+ __ bind(&miss);
+}
+
+
+void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
+ int index,
+ Register prototype) {
+ // Load the global or builtins object from the current context.
+ __ ldr(prototype, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ __ ldr(prototype,
+ FieldMemOperand(prototype, GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
+ // Load the initial map. The global functions all have initial maps.
+ __ ldr(prototype,
+ FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
+ // Load the prototype from the initial map.
+ __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst, Register src,
+ JSObject* holder, int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
+ __ ldr(dst, FieldMemOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ ldr(dst, FieldMemOperand(dst, offset));
+ }
+}
+
+
+void StubCompiler::GenerateLoadField(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ int index,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, miss_label);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
+ GenerateFastPropertyLoad(masm, r0, reg, holder, index);
+ __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Object* value,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, miss_label);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
+
+ // Return the constant value.
+ __ mov(r0, Operand(Handle<Object>(value)));
+ __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name,
+ Register scratch1,
+ Register scratch2,
+ AccessorInfo* callback,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, miss_label);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
+
+ // Push the arguments on the JS stack of the caller.
+ __ push(receiver); // receiver
+ __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data
+ __ push(ip);
+ __ push(name); // name
+ __ push(reg); // holder
+
+ // Do tail-call to the runtime system.
+ ExternalReference load_callback_property =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallRuntime(load_callback_property, 4);
+}
+
+
+void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ Smi* lookup_hint,
+ Register receiver,
+ Register name,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, miss_label);
+
+ // Check that the maps haven't changed.
+ Register reg =
+ masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
+
+ // Push the arguments on the JS stack of the caller.
+ __ push(receiver); // receiver
+ __ push(reg); // holder
+ __ push(name); // name
+ __ mov(scratch1, Operand(lookup_hint));
+ __ push(scratch1);
+
+ // Do tail-call to the runtime system.
+ ExternalReference load_ic_property =
+ ExternalReference(IC_Utility(IC::kLoadInterceptorProperty));
+ __ TailCallRuntime(load_ic_property, 4);
+}
+
+
+void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch,
+ Label* miss_label) {
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, miss_label);
+
+ // Check that the object is a JS array.
+ __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ cmp(scratch, Operand(JS_ARRAY_TYPE));
+ __ b(ne, miss_label);
+
+ // Load length directly from the JS array.
+ __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+ __ Ret();
+}
+
+
+// Generate code to check if an object is a string. If the object is
+// a string, the map's instance type is left in the scratch1 register.
+static void GenerateStringCheck(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* smi,
+ Label* non_string_object) {
+ // Check that the receiver isn't a smi.
+ __ tst(receiver, Operand(kSmiTagMask));
+ __ b(eq, smi);
+
+ // Check that the object is a string.
+ __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
+ // The cast is to resolve the overload for the argument of 0x0.
+ __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
+ __ b(ne, non_string_object);
+}
+
+
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is potentially clobbered.
+void StubCompiler::GenerateLoadStringLength2(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss) {
+ Label check_string, check_wrapper;
+
+ __ bind(&check_string);
+ // Check if the object is a string leaving the instance type in the
+ // scratch1 register.
+ GenerateStringCheck(masm, receiver, scratch1, scratch2,
+ miss, &check_wrapper);
+
+ // Load length directly from the string.
+ __ and_(scratch1, scratch1, Operand(kStringSizeMask));
+ __ add(scratch1, scratch1, Operand(String::kHashShift));
+ __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
+ __ mov(r0, Operand(r0, LSR, scratch1));
+ __ mov(r0, Operand(r0, LSL, kSmiTagSize));
+ __ Ret();
+
+ // Check if the object is a JSValue wrapper.
+ __ bind(&check_wrapper);
+ __ cmp(scratch1, Operand(JS_VALUE_TYPE));
+ __ b(ne, miss);
+
+ // Unwrap the value in place and check if the wrapped value is a string.
+ __ ldr(receiver, FieldMemOperand(receiver, JSValue::kValueOffset));
+ __ b(&check_string);
+}
+
+
+// Generate StoreField code, value is passed in r0 register.
+// After executing generated code, the receiver_reg and name_reg
+// may be clobbered.
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Builtins::Name storage_extend,
+ JSObject* object,
+ int index,
+ Map* transition,
+ Register receiver_reg,
+ Register name_reg,
+ Register scratch,
+ Label* miss_label) {
+ // r0 : value
+ Label exit;
+
+ // Check that the receiver isn't a smi.
+ __ tst(receiver_reg, Operand(kSmiTagMask));
+ __ b(eq, miss_label);
+
+ // Check that the map of the receiver hasn't changed.
+ __ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ __ cmp(scratch, Operand(Handle<Map>(object->map())));
+ __ b(ne, miss_label);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver_reg, scratch, miss_label);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ // Perform map transition for the receiver if necessary.
+ if ((transition != NULL) && (object->map()->unused_property_fields() == 0)) {
+ // The properties must be extended before we can store the value.
+ // We jump to a runtime call that extends the properties array.
+ __ mov(r2, Operand(Handle<Map>(transition)));
+ // Please note, if we implement keyed store for arm we need
+ // to call the Builtins::KeyedStoreIC_ExtendStorage.
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_ExtendStorage));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+ return;
+ }
+
+ if (transition != NULL) {
+ // Update the map of the object; no write barrier updating is
+ // needed because the map is never in new space.
+ __ mov(ip, Operand(Handle<Map>(transition)));
+ __ str(ip, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ }
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ str(r0, FieldMemOperand(receiver_reg, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, Operand(offset));
+ __ RecordWrite(receiver_reg, name_reg, scratch);
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ // Get the properties array
+ __ ldr(scratch, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ str(r0, FieldMemOperand(scratch, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, Operand(offset));
+ __ RecordWrite(scratch, name_reg, receiver_reg);
+ }
+
+ // Return the value (register r0).
+ __ bind(&exit);
+ __ Ret();
+}
+
+
+void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
+ ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC);
+ Code* code = NULL;
+ if (kind == Code::LOAD_IC) {
+ code = Builtins::builtin(Builtins::LoadIC_Miss);
+ } else {
+ code = Builtins::builtin(Builtins::KeyedLoadIC_Miss);
+ }
+
+ Handle<Code> ic(code);
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
+ // ----------- S t a t e -------------
+ // -- r1: function
+ // -- lr: return address
+ // -----------------------------------
+
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Preserve the function.
+ __ push(r1);
+
+ // Push the function on the stack as the argument to the runtime function.
+ __ push(r1);
+ __ CallRuntime(Runtime::kLazyCompile, 1);
+
+ // Calculate the entry point.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // Restore saved function.
+ __ pop(r1);
+
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Do a tail-call of the compiled function.
+ __ Jump(r2);
+
+ return GetCodeWithFlags(flags, "LazyCompileStub");
+}
+
+
+Object* CallStubCompiler::CompileCallField(Object* object,
+ JSObject* holder,
+ int index,
+ String* name,
+ Code::Flags flags) {
+ ASSERT_EQ(FIELD, Code::ExtractTypeFromFlags(flags));
+ // ----------- S t a t e -------------
+ // -- lr: return address
+ // -----------------------------------
+ Label miss;
+
+ const int argc = arguments().immediate();
+
+ // Get the receiver of the function from the stack into r0.
+ __ ldr(r0, MemOperand(sp, argc * kPointerSize));
+ // Check that the receiver isn't a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Do the right check and compute the holder register.
+ Register reg =
+ masm()->CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss);
+ GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
+
+ // Check that the function really is a function.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+ // Get the map.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ b(ne, &miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, argc * kPointerSize));
+ }
+
+ // Invoke the function.
+ __ InvokeFunction(r1, arguments(), JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCodeWithFlags(flags, name);
+}
+
+
+Object* CallStubCompiler::CompileCallConstant(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ CheckType check,
+ Code::Flags flags) {
+ ASSERT_EQ(CONSTANT_FUNCTION, Code::ExtractTypeFromFlags(flags));
+ // ----------- S t a t e -------------
+ // -- lr: return address
+ // -----------------------------------
+ Label miss;
+
+ // Get the receiver from the stack
+ const int argc = arguments().immediate();
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+
+ // Check that the receiver isn't a smi.
+ if (check != NUMBER_CHECK) {
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+ }
+
+ // Make sure that it's okay not to patch the on stack receiver
+ // unless we're doing a receiver map check.
+ ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
+
+ switch (check) {
+ case RECEIVER_MAP_CHECK:
+ // Check that the maps haven't changed.
+ __ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
+
+ // Patch the receiver on the stack with the global proxy if
+ // necessary.
+ if (object->IsGlobalObject()) {
+ __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ __ str(r3, MemOperand(sp, argc * kPointerSize));
+ }
+ break;
+
+ case STRING_CHECK:
+ // Check that the object is a two-byte string or a symbol.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(FIRST_NONSTRING_TYPE));
+ __ b(hs, &miss);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ r2);
+ __ CheckMaps(JSObject::cast(object->GetPrototype()),
+ r2, holder, r3, r1, &miss);
+ break;
+
+ case NUMBER_CHECK: {
+ Label fast;
+ // Check that the object is a smi or a heap number.
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(eq, &fast);
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(HEAP_NUMBER_TYPE));
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::NUMBER_FUNCTION_INDEX,
+ r2);
+ __ CheckMaps(JSObject::cast(object->GetPrototype()),
+ r2, holder, r3, r1, &miss);
+ break;
+ }
+
+ case BOOLEAN_CHECK: {
+ Label fast;
+ // Check that the object is a boolean.
+ __ cmp(r1, Operand(Factory::true_value()));
+ __ b(eq, &fast);
+ __ cmp(r1, Operand(Factory::false_value()));
+ __ b(ne, &miss);
+ __ bind(&fast);
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::BOOLEAN_FUNCTION_INDEX,
+ r2);
+ __ CheckMaps(JSObject::cast(object->GetPrototype()),
+ r2, holder, r3, r1, &miss);
+ break;
+ }
+
+ case JSARRAY_HAS_FAST_ELEMENTS_CHECK:
+ __ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
+ // Make sure object->elements()->map() != Heap::hash_table_map()
+ // Get the elements array of the object.
+ __ ldr(r3, FieldMemOperand(r1, JSObject::kElementsOffset));
+ // Check that the object is in fast mode (not dictionary).
+ __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r2, Operand(Factory::hash_table_map()));
+ __ b(eq, &miss);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // Get the function and setup the context.
+ __ mov(r1, Operand(Handle<JSFunction>(function)));
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+ // Jump to the cached code (tail call).
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ __ InvokeCode(code, expected, arguments(),
+ RelocInfo::CODE_TARGET, JUMP_FUNCTION);
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ String* function_name = NULL;
+ if (function->shared()->name()->IsString()) {
+ function_name = String::cast(function->shared()->name());
+ }
+ return GetCodeWithFlags(flags, function_name);
+}
+
+
+Object* CallStubCompiler::CompileCallInterceptor(Object* object,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- lr: return address
+ // -----------------------------------
+ Label miss;
+
+ // TODO(1224669): Implement.
+
+ // Handle call cache miss.
+ __ bind(&miss);
+ Handle<Code> ic = ComputeCallMiss(arguments().immediate());
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Get the receiver from the stack.
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+ // name register might be clobbered.
+ GenerateStoreField(masm(),
+ Builtins::StoreIC_ExtendStorage,
+ object,
+ index,
+ transition,
+ r3, r2, r1,
+ &miss);
+ __ bind(&miss);
+ __ mov(r2, Operand(Handle<String>(name))); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Get the object from the stack.
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Handle<Map>(object->map())));
+ __ b(ne, &miss);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(r3, r1, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ __ ldr(ip, MemOperand(sp)); // receiver
+ __ push(ip);
+ __ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
+ __ push(ip);
+ __ push(r2); // name
+ __ push(r0); // value
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_callback_property =
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty));
+ __ TailCallRuntime(store_callback_property, 4);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ mov(r2, Operand(Handle<String>(name))); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Get the object from the stack.
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+ // Check that the object isn't a smi.
+ __ tst(r3, Operand(kSmiTagMask));
+ __ b(eq, &miss);
+
+ // Check that the map of the object hasn't changed.
+ __ ldr(r1, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ cmp(r1, Operand(Handle<Map>(receiver->map())));
+ __ b(ne, &miss);
+
+ // Perform global security token check if needed.
+ if (receiver->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(r3, r1, &miss);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
+
+ __ ldr(ip, MemOperand(sp)); // receiver
+ __ push(ip);
+ __ push(r2); // name
+ __ push(r0); // value
+
+ // Do tail-call to the runtime system.
+ ExternalReference store_ic_property =
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
+ __ TailCallRuntime(store_ic_property, 3);
+
+ // Handle store cache miss.
+ __ bind(&miss);
+ __ mov(r2, Operand(Handle<String>(name))); // restore name
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadField(JSObject* object,
+ JSObject* holder,
+ int index,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ ldr(r0, MemOperand(sp, 0));
+
+ GenerateLoadField(masm(), object, holder, r0, r3, r1, index, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(FIELD, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadCallback(JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ ldr(r0, MemOperand(sp, 0));
+ GenerateLoadCallback(masm(), object, holder, r0, r2, r3, r1, callback, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+ JSObject* holder,
+ Object* value,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ ldr(r0, MemOperand(sp, 0));
+
+ GenerateLoadConstant(masm(), object, holder, r0, r3, r1, value, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ ldr(r0, MemOperand(sp, 0));
+
+ GenerateLoadInterceptor(masm(),
+ object,
+ holder,
+ holder->InterceptorPropertyLookupHint(name),
+ r0,
+ r2,
+ r3,
+ r1,
+ &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(INTERCEPTOR, name);
+}
+
+
+// TODO(1224671): IC stubs for keyed loads have not been implemented
+// for ARM.
+Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int index) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ ldr(r2, MemOperand(sp, 0));
+ __ ldr(r0, MemOperand(sp, kPointerSize));
+
+ __ cmp(r2, Operand(Handle<String>(name)));
+ __ b(ne, &miss);
+
+ GenerateLoadField(masm(), receiver, holder, r0, r3, r1, index, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(FIELD, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ ldr(r2, MemOperand(sp, 0));
+ __ ldr(r0, MemOperand(sp, kPointerSize));
+
+ __ cmp(r2, Operand(Handle<String>(name)));
+ __ b(ne, &miss);
+
+ GenerateLoadCallback(masm(), receiver, holder, r0, r2, r3,
+ r1, callback, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one
+ __ ldr(r2, MemOperand(sp, 0));
+ __ ldr(r0, MemOperand(sp, kPointerSize));
+
+ __ cmp(r2, Operand(Handle<String>(name)));
+ __ b(ne, &miss);
+
+ GenerateLoadConstant(masm(), receiver, holder, r0, r3, r1, value, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ // Return the generated code.
+ return GetCode(CONSTANT_FUNCTION, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one
+ __ ldr(r2, MemOperand(sp, 0));
+ __ ldr(r0, MemOperand(sp, kPointerSize));
+
+ __ cmp(r2, Operand(Handle<String>(name)));
+ __ b(ne, &miss);
+
+ GenerateLoadInterceptor(masm(),
+ receiver,
+ holder,
+ Smi::FromInt(JSObject::kLookupInHolder),
+ r0,
+ r2,
+ r3,
+ r1,
+ &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(INTERCEPTOR, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ // -----------------------------------
+ Label miss;
+
+ // Check the key is the cached one
+ __ ldr(r2, MemOperand(sp, 0));
+ __ ldr(r0, MemOperand(sp, kPointerSize));
+
+ __ cmp(r2, Operand(Handle<String>(name)));
+ __ b(ne, &miss);
+
+ GenerateLoadArrayLength(masm(), r0, r3, &miss);
+ __ bind(&miss);
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ // -----------------------------------
+ Label miss;
+ __ IncrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
+
+ __ ldr(r2, MemOperand(sp));
+ __ ldr(r0, MemOperand(sp, kPointerSize)); // receiver
+
+ __ cmp(r2, Operand(Handle<String>(name)));
+ __ b(ne, &miss);
+
+ GenerateLoadStringLength2(masm(), r0, r1, r3, &miss);
+ __ bind(&miss);
+ __ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
+
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
+}
+
+
+// TODO(1224671): implement the fast case.
+Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ // -----------------------------------
+ GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
+
+ return GetCode(CALLBACKS, name);
+}
+
+
+Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r2 : name
+ // -- lr : return address
+ // -- [sp] : receiver
+ // -----------------------------------
+ Label miss;
+
+ __ IncrementCounter(&Counters::keyed_store_field, 1, r1, r3);
+
+ // Check that the name has not changed.
+ __ cmp(r2, Operand(Handle<String>(name)));
+ __ b(ne, &miss);
+
+ // Load receiver from the stack.
+ __ ldr(r3, MemOperand(sp));
+ // r1 is used as scratch register, r3 and r2 might be clobbered.
+ GenerateStoreField(masm(),
+ Builtins::StoreIC_ExtendStorage,
+ object,
+ index,
+ transition,
+ r3, r2, r1,
+ &miss);
+ __ bind(&miss);
+
+ __ DecrementCounter(&Counters::keyed_store_field, 1, r1, r3);
+ __ mov(r2, Operand(Handle<String>(name))); // restore name register.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(transition == NULL ? FIELD : MAP_TRANSITION, name);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.cc b/V8Binding/v8/src/arm/virtual-frame-arm.cc
new file mode 100644
index 0000000..9527383
--- /dev/null
+++ b/V8Binding/v8/src/arm/virtual-frame-arm.cc
@@ -0,0 +1,439 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen-inl.h"
+#include "register-allocator-inl.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// VirtualFrame implementation.
+
+#define __ ACCESS_MASM(masm())
+
+
+// On entry to a function, the virtual frame already contains the
+// receiver and the parameters. All initial frame elements are in
+// memory.
+VirtualFrame::VirtualFrame()
+ : elements_(parameter_count() + local_count() + kPreallocatedElements),
+ stack_pointer_(parameter_count()) { // 0-based index of TOS.
+ for (int i = 0; i <= stack_pointer_; i++) {
+ elements_.Add(FrameElement::MemoryElement());
+ }
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ register_locations_[i] = kIllegalIndex;
+ }
+}
+
+
+void VirtualFrame::SyncElementBelowStackPointer(int index) {
+ UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncElementByPushing(int index) {
+ UNREACHABLE();
+}
+
+
+void VirtualFrame::SyncRange(int begin, int end) {
+ // All elements are in memory on ARM (ie, synced).
+#ifdef DEBUG
+ for (int i = begin; i <= end; i++) {
+ ASSERT(elements_[i].is_synced());
+ }
+#endif
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected) {
+ Comment cmnt(masm(), "[ Merge frame");
+ // We should always be merging the code generator's current frame to an
+ // expected frame.
+ ASSERT(cgen()->frame() == this);
+
+ // Adjust the stack pointer upward (toward the top of the virtual
+ // frame) if necessary.
+ if (stack_pointer_ < expected->stack_pointer_) {
+ int difference = expected->stack_pointer_ - stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ sub(sp, sp, Operand(difference * kPointerSize));
+ }
+
+ MergeMoveRegistersToMemory(expected);
+ MergeMoveRegistersToRegisters(expected);
+ MergeMoveMemoryToRegisters(expected);
+
+ // Fix any sync bit problems from the bottom-up, stopping when we
+ // hit the stack pointer or the top of the frame if the stack
+ // pointer is floating above the frame.
+ int limit = Min(static_cast<int>(stack_pointer_), element_count() - 1);
+ for (int i = 0; i <= limit; i++) {
+ FrameElement source = elements_[i];
+ FrameElement target = expected->elements_[i];
+ if (source.is_synced() && !target.is_synced()) {
+ elements_[i].clear_sync();
+ } else if (!source.is_synced() && target.is_synced()) {
+ SyncElementAt(i);
+ }
+ }
+
+ // Adjust the stack point downard if necessary.
+ if (stack_pointer_ > expected->stack_pointer_) {
+ int difference = stack_pointer_ - expected->stack_pointer_;
+ stack_pointer_ = expected->stack_pointer_;
+ __ add(sp, sp, Operand(difference * kPointerSize));
+ }
+
+ // At this point, the frames should be identical.
+ ASSERT(Equals(expected));
+}
+
+
+void VirtualFrame::MergeMoveRegistersToMemory(VirtualFrame* expected) {
+ ASSERT(stack_pointer_ >= expected->stack_pointer_);
+
+ // Move registers, constants, and copies to memory. Perform moves
+ // from the top downward in the frame in order to leave the backing
+ // stores of copies in registers.
+ // On ARM, all elements are in memory.
+
+#ifdef DEBUG
+ int start = Min(static_cast<int>(stack_pointer_), element_count() - 1);
+ for (int i = start; i >= 0; i--) {
+ ASSERT(elements_[i].is_memory());
+ ASSERT(expected->elements_[i].is_memory());
+ }
+#endif
+}
+
+
+void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
+}
+
+
+void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame* expected) {
+}
+
+
+void VirtualFrame::Enter() {
+ Comment cmnt(masm(), "[ Enter JS frame");
+
+#ifdef DEBUG
+ // Verify that r1 contains a JS function. The following code relies
+ // on r2 being available for use.
+ { Label map_check, done;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(ne, &map_check);
+ __ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
+ __ bind(&map_check);
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ __ cmp(r2, Operand(JS_FUNCTION_TYPE));
+ __ b(eq, &done);
+ __ stop("VirtualFrame::Enter - r1 is not a function (map check).");
+ __ bind(&done);
+ }
+#endif // DEBUG
+
+ // We are about to push four values to the frame.
+ Adjust(4);
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ // Adjust FP to point to saved FP.
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ cgen()->allocator()->Unuse(r1);
+ cgen()->allocator()->Unuse(lr);
+}
+
+
+void VirtualFrame::Exit() {
+ Comment cmnt(masm(), "[ Exit JS frame");
+ // Drop the execution stack down to the frame pointer and restore the caller
+ // frame pointer and return address.
+ __ mov(sp, fp);
+ __ ldm(ia_w, sp, fp.bit() | lr.bit());
+}
+
+
+void VirtualFrame::AllocateStackSlots() {
+ int count = local_count();
+ if (count > 0) {
+ Comment cmnt(masm(), "[ Allocate space for locals");
+ Adjust(count);
+ // Initialize stack slots with 'undefined' value.
+ __ mov(ip, Operand(Factory::undefined_value()));
+ for (int i = 0; i < count; i++) {
+ __ push(ip);
+ }
+ }
+}
+
+
+void VirtualFrame::SaveContextRegister() {
+ UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::RestoreContextRegister() {
+ UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::PushReceiverSlotAddress() {
+ UNIMPLEMENTED();
+}
+
+
+int VirtualFrame::InvalidateFrameSlotAt(int index) {
+ UNIMPLEMENTED();
+ return kIllegalIndex;
+}
+
+
+void VirtualFrame::TakeFrameSlotAt(int index) {
+ UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::StoreToFrameSlotAt(int index) {
+ UNIMPLEMENTED();
+}
+
+
+void VirtualFrame::PushTryHandler(HandlerType type) {
+ // Grow the expression stack by handler size less one (the return address
+ // is already pushed by a call instruction).
+ Adjust(kHandlerSize - 1);
+ __ PushTryHandler(IN_JAVASCRIPT, type);
+}
+
+
+Result VirtualFrame::RawCallStub(CodeStub* stub) {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallStub(stub);
+ Result result = cgen()->allocator()->Allocate(r0);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+ PrepareForCall(0, 0);
+ arg->Unuse();
+ return RawCallStub(stub);
+}
+
+
+Result VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
+ PrepareForCall(0, 0);
+ arg0->Unuse();
+ arg1->Unuse();
+ return RawCallStub(stub);
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallRuntime(f, arg_count);
+ Result result = cgen()->allocator()->Allocate(r0);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ CallRuntime(id, arg_count);
+ Result result = cgen()->allocator()->Allocate(r0);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flags,
+ Result* arg_count_register,
+ int arg_count) {
+ ASSERT(arg_count_register->reg().is(r0));
+ PrepareForCall(arg_count, arg_count);
+ arg_count_register->Unuse();
+ __ InvokeBuiltin(id, flags);
+ Result result = cgen()->allocator()->Allocate(r0);
+ return result;
+}
+
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode) {
+ ASSERT(cgen()->HasValidEntryRegisters());
+ __ Call(code, rmode);
+ Result result = cgen()->allocator()->Allocate(r0);
+ ASSERT(result.is_valid());
+ return result;
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ int dropped_args) {
+ int spilled_args = 0;
+ switch (code->kind()) {
+ case Code::CALL_IC:
+ spilled_args = dropped_args + 1;
+ break;
+ case Code::FUNCTION:
+ spilled_args = dropped_args + 1;
+ break;
+ case Code::KEYED_LOAD_IC:
+ ASSERT(dropped_args == 0);
+ spilled_args = 2;
+ break;
+ default:
+ // The other types of code objects are called with values
+ // in specific registers, and are handled in functions with
+ // a different signature.
+ UNREACHABLE();
+ break;
+ }
+ PrepareForCall(spilled_args, dropped_args);
+ return RawCallCodeObject(code, rmode);
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args) {
+ int spilled_args = 0;
+ switch (code->kind()) {
+ case Code::LOAD_IC:
+ ASSERT(arg->reg().is(r2));
+ ASSERT(dropped_args == 0);
+ spilled_args = 1;
+ break;
+ case Code::KEYED_STORE_IC:
+ ASSERT(arg->reg().is(r0));
+ ASSERT(dropped_args == 0);
+ spilled_args = 2;
+ break;
+ default:
+ // No other types of code objects are called with values
+ // in exactly one register.
+ UNREACHABLE();
+ break;
+ }
+ PrepareForCall(spilled_args, dropped_args);
+ arg->Unuse();
+ return RawCallCodeObject(code, rmode);
+}
+
+
+Result VirtualFrame::CallCodeObject(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args) {
+ int spilled_args = 1;
+ switch (code->kind()) {
+ case Code::STORE_IC:
+ ASSERT(arg0->reg().is(r0));
+ ASSERT(arg1->reg().is(r2));
+ ASSERT(dropped_args == 0);
+ spilled_args = 1;
+ break;
+ case Code::BUILTIN:
+ ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
+ ASSERT(arg0->reg().is(r0));
+ ASSERT(arg1->reg().is(r1));
+ spilled_args = dropped_args + 1;
+ break;
+ default:
+ // No other types of code objects are called with values
+ // in exactly two registers.
+ UNREACHABLE();
+ break;
+ }
+ PrepareForCall(spilled_args, dropped_args);
+ arg0->Unuse();
+ arg1->Unuse();
+ return RawCallCodeObject(code, rmode);
+}
+
+
+void VirtualFrame::Drop(int count) {
+ ASSERT(height() >= count);
+ int num_virtual_elements = (element_count() - 1) - stack_pointer_;
+
+ // Emit code to lower the stack pointer if necessary.
+ if (num_virtual_elements < count) {
+ int num_dropped = count - num_virtual_elements;
+ stack_pointer_ -= num_dropped;
+ __ add(sp, sp, Operand(num_dropped * kPointerSize));
+ }
+
+ // Discard elements from the virtual frame and free any registers.
+ for (int i = 0; i < count; i++) {
+ FrameElement dropped = elements_.RemoveLast();
+ if (dropped.is_register()) {
+ Unuse(dropped.reg());
+ }
+ }
+}
+
+
+Result VirtualFrame::Pop() {
+ UNIMPLEMENTED();
+ return Result();
+}
+
+
+void VirtualFrame::EmitPop(Register reg) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_--;
+ elements_.RemoveLast();
+ __ pop(reg);
+}
+
+
+void VirtualFrame::EmitPush(Register reg) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ elements_.Add(FrameElement::MemoryElement());
+ stack_pointer_++;
+ __ push(reg);
+}
+
+
+#undef __
+
+} } // namespace v8::internal
diff --git a/V8Binding/v8/src/arm/virtual-frame-arm.h b/V8Binding/v8/src/arm/virtual-frame-arm.h
new file mode 100644
index 0000000..ebebd53
--- /dev/null
+++ b/V8Binding/v8/src/arm/virtual-frame-arm.h
@@ -0,0 +1,536 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
+#define V8_ARM_VIRTUAL_FRAME_ARM_H_
+
+#include "register-allocator.h"
+#include "scopes.h"
+
+namespace v8 {
+namespace internal {
+
+// -------------------------------------------------------------------------
+// Virtual frames
+//
+// The virtual frame is an abstraction of the physical stack frame. It
+// encapsulates the parameters, frame-allocated locals, and the expression
+// stack. It supports push/pop operations on the expression stack, as well
+// as random access to the expression stack elements, locals, and
+// parameters.
+
+class VirtualFrame : public ZoneObject {
+ public:
+ // A utility class to introduce a scope where the virtual frame is
+ // expected to remain spilled. The constructor spills the code
+ // generator's current frame, but no attempt is made to require it
+ // to stay spilled. It is intended as documentation while the code
+ // generator is being transformed.
+ class SpilledScope BASE_EMBEDDED {
+ public:
+ SpilledScope() : previous_state_(cgen()->in_spilled_code()) {
+ ASSERT(cgen()->has_valid_frame());
+ cgen()->frame()->SpillAll();
+ cgen()->set_in_spilled_code(true);
+ }
+
+ ~SpilledScope() {
+ cgen()->set_in_spilled_code(previous_state_);
+ }
+
+ private:
+ bool previous_state_;
+
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ };
+
+ // An illegal index into the virtual frame.
+ static const int kIllegalIndex = -1;
+
+ // Construct an initial virtual frame on entry to a JS function.
+ VirtualFrame();
+
+ // Construct a virtual frame as a clone of an existing one.
+ explicit VirtualFrame(VirtualFrame* original);
+
+ CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
+ MacroAssembler* masm() { return cgen()->masm(); }
+
+ // Create a duplicate of an existing valid frame element.
+ FrameElement CopyElementAt(int index);
+
+ // The number of elements on the virtual frame.
+ int element_count() { return elements_.length(); }
+
+ // The height of the virtual expression stack.
+ int height() {
+ return element_count() - expression_base_index();
+ }
+
+ int register_location(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num];
+ }
+
+ int register_location(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)];
+ }
+
+ void set_register_location(Register reg, int index) {
+ register_locations_[RegisterAllocator::ToNumber(reg)] = index;
+ }
+
+ bool is_used(int num) {
+ ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
+ return register_locations_[num] != kIllegalIndex;
+ }
+
+ bool is_used(Register reg) {
+ return register_locations_[RegisterAllocator::ToNumber(reg)]
+ != kIllegalIndex;
+ }
+
+ // Add extra in-memory elements to the top of the frame to match an actual
+ // frame (eg, the frame after an exception handler is pushed). No code is
+ // emitted.
+ void Adjust(int count);
+
+ // Forget elements from the top of the frame to match an actual frame (eg,
+ // the frame after a runtime call). No code is emitted.
+ void Forget(int count) {
+ ASSERT(count >= 0);
+ ASSERT(stack_pointer_ == element_count() - 1);
+ stack_pointer_ -= count;
+ ForgetElements(count);
+ }
+
+ // Forget count elements from the top of the frame without adjusting
+ // the stack pointer downward. This is used, for example, before
+ // merging frames at break, continue, and return targets.
+ void ForgetElements(int count);
+
+ // Spill all values from the frame to memory.
+ void SpillAll();
+
+ // Spill all occurrences of a specific register from the frame.
+ void Spill(Register reg) {
+ if (is_used(reg)) SpillElementAt(register_location(reg));
+ }
+
+ // Spill all occurrences of an arbitrary register if possible. Return the
+ // register spilled or no_reg if it was not possible to free any register
+ // (ie, they all have frame-external references).
+ Register SpillAnyRegister();
+
+ // Prepare this virtual frame for merging to an expected frame by
+ // performing some state changes that do not require generating
+ // code. It is guaranteed that no code will be generated.
+ void PrepareMergeTo(VirtualFrame* expected);
+
+ // Make this virtual frame have a state identical to an expected virtual
+ // frame. As a side effect, code may be emitted to make this frame match
+ // the expected one.
+ void MergeTo(VirtualFrame* expected);
+
+ // Detach a frame from its code generator, perhaps temporarily. This
+ // tells the register allocator that it is free to use frame-internal
+ // registers. Used when the code generator's frame is switched from this
+ // one to NULL by an unconditional jump.
+ void DetachFromCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
+
+ // (Re)attach a frame to its code generator. This informs the register
+ // allocator that the frame-internal register references are active again.
+ // Used when a code generator's frame is switched from NULL to this one by
+ // binding a label.
+ void AttachToCodeGenerator() {
+ RegisterAllocator* cgen_allocator = cgen()->allocator();
+ for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
+ if (is_used(i)) cgen_allocator->Unuse(i);
+ }
+ }
+
+ // Emit code for the physical JS entry and exit frame sequences. After
+ // calling Enter, the virtual frame is ready for use; and after calling
+ // Exit it should not be used. Note that Enter does not allocate space in
+ // the physical frame for storing frame-allocated locals.
+ void Enter();
+ void Exit();
+
+ // Prepare for returning from the frame by spilling locals and
+ // dropping all non-locals elements in the virtual frame. This
+ // avoids generating unnecessary merge code when jumping to the
+ // shared return site. Emits code for spills.
+ void PrepareForReturn();
+
+ // Allocate and initialize the frame-allocated locals.
+ void AllocateStackSlots();
+
+ // The current top of the expression stack as an assembly operand.
+ MemOperand Top() { return MemOperand(sp, 0); }
+
+ // An element of the expression stack as an assembly operand.
+ MemOperand ElementAt(int index) {
+ return MemOperand(sp, index * kPointerSize);
+ }
+
+ // Random-access store to a frame-top relative frame element. The result
+ // becomes owned by the frame and is invalidated.
+ void SetElementAt(int index, Result* value);
+
+ // Set a frame element to a constant. The index is frame-top relative.
+ void SetElementAt(int index, Handle<Object> value) {
+ Result temp(value);
+ SetElementAt(index, &temp);
+ }
+
+ void PushElementAt(int index) {
+ PushFrameSlotAt(element_count() - index - 1);
+ }
+
+ // A frame-allocated local as an assembly operand.
+ MemOperand LocalAt(int index) {
+ ASSERT(0 <= index);
+ ASSERT(index < local_count());
+ return MemOperand(fp, kLocal0Offset - index * kPointerSize);
+ }
+
+ // Push a copy of the value of a local frame slot on top of the frame.
+ void PushLocalAt(int index) {
+ PushFrameSlotAt(local0_index() + index);
+ }
+
+ // Push the value of a local frame slot on top of the frame and invalidate
+ // the local slot. The slot should be written to before trying to read
+ // from it again.
+ void TakeLocalAt(int index) {
+ TakeFrameSlotAt(local0_index() + index);
+ }
+
+ // Store the top value on the virtual frame into a local frame slot. The
+ // value is left in place on top of the frame.
+ void StoreToLocalAt(int index) {
+ StoreToFrameSlotAt(local0_index() + index);
+ }
+
+ // Push the address of the receiver slot on the frame.
+ void PushReceiverSlotAddress();
+
+ // The function frame slot.
+ MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
+
+ // Push the function on top of the frame.
+ void PushFunction() { PushFrameSlotAt(function_index()); }
+
+ // The context frame slot.
+ MemOperand Context() { return MemOperand(fp, kContextOffset); }
+
+ // Save the value of the esi register to the context frame slot.
+ void SaveContextRegister();
+
+ // Restore the esi register from the value of the context frame
+ // slot.
+ void RestoreContextRegister();
+
+ // A parameter as an assembly operand.
+ MemOperand ParameterAt(int index) {
+ // Index -1 corresponds to the receiver.
+ ASSERT(-1 <= index); // -1 is the receiver.
+ ASSERT(index <= parameter_count());
+ return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
+ }
+
+ // Push a copy of the value of a parameter frame slot on top of the frame.
+ void PushParameterAt(int index) {
+ PushFrameSlotAt(param0_index() + index);
+ }
+
+ // Push the value of a paramter frame slot on top of the frame and
+ // invalidate the parameter slot. The slot should be written to before
+ // trying to read from it again.
+ void TakeParameterAt(int index) {
+ TakeFrameSlotAt(param0_index() + index);
+ }
+
+ // Store the top value on the virtual frame into a parameter frame slot.
+ // The value is left in place on top of the frame.
+ void StoreToParameterAt(int index) {
+ StoreToFrameSlotAt(param0_index() + index);
+ }
+
+ // The receiver frame slot.
+ MemOperand Receiver() { return ParameterAt(-1); }
+
+ // Push a try-catch or try-finally handler on top of the virtual frame.
+ void PushTryHandler(HandlerType type);
+
+ // Call stub given the number of arguments it expects on (and
+ // removes from) the stack.
+ Result CallStub(CodeStub* stub, int arg_count) {
+ PrepareForCall(arg_count, arg_count);
+ return RawCallStub(stub);
+ }
+
+ // Call stub that expects its argument in r0. The argument is given
+ // as a result which must be the register r0.
+ Result CallStub(CodeStub* stub, Result* arg);
+
+ // Call stub that expects its arguments in r1 and r0. The arguments
+ // are given as results which must be the appropriate registers.
+ Result CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+
+ // Call runtime given the number of arguments expected on (and
+ // removed from) the stack.
+ Result CallRuntime(Runtime::Function* f, int arg_count);
+ Result CallRuntime(Runtime::FunctionId id, int arg_count);
+
+ // Invoke builtin given the number of arguments it expects on (and
+ // removes from) the stack.
+ Result InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flag,
+ Result* arg_count_register,
+ int arg_count);
+
+ // Call into an IC stub given the number of arguments it removes
+ // from the stack. Register arguments are passed as results and
+ // consumed by the call.
+ Result CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ int dropped_args);
+ Result CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg,
+ int dropped_args);
+ Result CallCodeObject(Handle<Code> ic,
+ RelocInfo::Mode rmode,
+ Result* arg0,
+ Result* arg1,
+ int dropped_args);
+
+ // Drop a number of elements from the top of the expression stack. May
+ // emit code to affect the physical frame. Does not clobber any registers
+ // excepting possibly the stack pointer.
+ void Drop(int count);
+
+ // Drop one element.
+ void Drop() { Drop(1); }
+
+ // Duplicate the top element of the frame.
+ void Dup() { PushFrameSlotAt(element_count() - 1); }
+
+ // Pop an element from the top of the expression stack. Returns a
+ // Result, which may be a constant or a register.
+ Result Pop();
+
+ // Pop and save an element from the top of the expression stack and
+ // emit a corresponding pop instruction.
+ void EmitPop(Register reg);
+
+ // Push an element on top of the expression stack and emit a
+ // corresponding push instruction.
+ void EmitPush(Register reg);
+
+ // Push an element on the virtual frame.
+ void Push(Register reg, StaticType static_type = StaticType());
+ void Push(Handle<Object> value);
+ void Push(Smi* value) { Push(Handle<Object>(value)); }
+
+ // Pushing a result invalidates it (its contents become owned by the frame).
+ void Push(Result* result) {
+ if (result->is_register()) {
+ Push(result->reg(), result->static_type());
+ } else {
+ ASSERT(result->is_constant());
+ Push(result->handle());
+ }
+ result->Unuse();
+ }
+
+ // Nip removes zero or more elements from immediately below the top
+ // of the frame, leaving the previous top-of-frame value on top of
+ // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
+ void Nip(int num_dropped);
+
+ private:
+ static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+ static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+ static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+ static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
+ static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
+
+ ZoneList<FrameElement> elements_;
+
+ // The index of the element that is at the processor's stack pointer
+ // (the sp register).
+ int stack_pointer_;
+
+ // The index of the register frame element using each register, or
+ // kIllegalIndex if a register is not on the frame.
+ int register_locations_[RegisterAllocator::kNumRegisters];
+
+ // The number of frame-allocated locals and parameters respectively.
+ int parameter_count() { return cgen()->scope()->num_parameters(); }
+ int local_count() { return cgen()->scope()->num_stack_slots(); }
+
+ // The index of the element that is at the processor's frame pointer
+ // (the fp register). The parameters, receiver, function, and context
+ // are below the frame pointer.
+ int frame_pointer() { return parameter_count() + 3; }
+
+ // The index of the first parameter. The receiver lies below the first
+ // parameter.
+ int param0_index() { return 1; }
+
+ // The index of the context slot in the frame. It is immediately
+ // below the frame pointer.
+ int context_index() { return frame_pointer() - 1; }
+
+ // The index of the function slot in the frame. It is below the frame
+ // pointer and context slot.
+ int function_index() { return frame_pointer() - 2; }
+
+ // The index of the first local. Between the frame pointer and the
+ // locals lies the return address.
+ int local0_index() { return frame_pointer() + 2; }
+
+ // The index of the base of the expression stack.
+ int expression_base_index() { return local0_index() + local_count(); }
+
+ // Convert a frame index into a frame pointer relative offset into the
+ // actual stack.
+ int fp_relative(int index) {
+ ASSERT(index < element_count());
+ ASSERT(frame_pointer() < element_count()); // FP is on the frame.
+ return (frame_pointer() - index) * kPointerSize;
+ }
+
+ // Record an occurrence of a register in the virtual frame. This has the
+ // effect of incrementing the register's external reference count and
+ // of updating the index of the register's location in the frame.
+ void Use(Register reg, int index) {
+ ASSERT(!is_used(reg));
+ set_register_location(reg, index);
+ cgen()->allocator()->Use(reg);
+ }
+
+ // Record that a register reference has been dropped from the frame. This
+ // decrements the register's external reference count and invalidates the
+ // index of the register's location in the frame.
+ void Unuse(Register reg) {
+ ASSERT(is_used(reg));
+ set_register_location(reg, kIllegalIndex);
+ cgen()->allocator()->Unuse(reg);
+ }
+
+ // Spill the element at a particular index---write it to memory if
+ // necessary, free any associated register, and forget its value if
+ // constant.
+ void SpillElementAt(int index);
+
+ // Sync the element at a particular index. If it is a register or
+ // constant that disagrees with the value on the stack, write it to memory.
+ // Keep the element type as register or constant, and clear the dirty bit.
+ void SyncElementAt(int index);
+
+ // Sync the range of elements in [begin, end] with memory.
+ void SyncRange(int begin, int end);
+
+ // Sync a single unsynced element that lies beneath or at the stack pointer.
+ void SyncElementBelowStackPointer(int index);
+
+ // Sync a single unsynced element that lies just above the stack pointer.
+ void SyncElementByPushing(int index);
+
+ // Push a copy of a frame slot (typically a local or parameter) on top of
+ // the frame.
+ void PushFrameSlotAt(int index);
+
+ // Push a the value of a frame slot (typically a local or parameter) on
+ // top of the frame and invalidate the slot.
+ void TakeFrameSlotAt(int index);
+
+ // Store the value on top of the frame to a frame slot (typically a local
+ // or parameter).
+ void StoreToFrameSlotAt(int index);
+
+ // Spill all elements in registers. Spill the top spilled_args elements
+ // on the frame. Sync all other frame elements.
+ // Then drop dropped_args elements from the virtual frame, to match
+ // the effect of an upcoming call that will drop them from the stack.
+ void PrepareForCall(int spilled_args, int dropped_args);
+
+ // Move frame elements currently in registers or constants, that
+ // should be in memory in the expected frame, to memory.
+ void MergeMoveRegistersToMemory(VirtualFrame* expected);
+
+ // Make the register-to-register moves necessary to
+ // merge this frame with the expected frame.
+ // Register to memory moves must already have been made,
+ // and memory to register moves must follow this call.
+ // This is because some new memory-to-register moves are
+ // created in order to break cycles of register moves.
+ // Used in the implementation of MergeTo().
+ void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+
+ // Make the memory-to-register and constant-to-register moves
+ // needed to make this frame equal the expected frame.
+ // Called after all register-to-memory and register-to-register
+ // moves have been made. After this function returns, the frames
+ // should be equal.
+ void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+
+ // Invalidates a frame slot (puts an invalid frame element in it).
+ // Copies on the frame are correctly handled, and if this slot was
+ // the backing store of copies, the index of the new backing store
+ // is returned. Otherwise, returns kIllegalIndex.
+ // Register counts are correctly updated.
+ int InvalidateFrameSlotAt(int index);
+
+ // Call a code stub that has already been prepared for calling (via
+ // PrepareForCall).
+ Result RawCallStub(CodeStub* stub);
+
+ // Calls a code object which has already been prepared for calling
+ // (via PrepareForCall).
+ Result RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
+
+ bool Equals(VirtualFrame* other);
+
+ // Classes that need raw access to the elements_ array.
+ friend class DeferredCode;
+ friend class JumpTarget;
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_ARM_VIRTUAL_FRAME_ARM_H_