diff options
author | Feng Qian <> | 2009-04-10 18:11:29 -0700 |
---|---|---|
committer | The Android Open Source Project <initial-contribution@android.com> | 2009-04-10 18:11:29 -0700 |
commit | 8f72e70a9fd78eec56623b3a62e68f16b7b27e28 (patch) | |
tree | 181bf9a400c30a1bf34ea6d72560e8d00111d549 /JavaScriptCore/assembler | |
parent | 7ed56f225e0ade046e1c2178977f72b2d896f196 (diff) | |
download | external_webkit-8f72e70a9fd78eec56623b3a62e68f16b7b27e28.zip external_webkit-8f72e70a9fd78eec56623b3a62e68f16b7b27e28.tar.gz external_webkit-8f72e70a9fd78eec56623b3a62e68f16b7b27e28.tar.bz2 |
AI 145796: Land the WebKit merge @r42026.
Automated import of CL 145796
Diffstat (limited to 'JavaScriptCore/assembler')
-rw-r--r-- | JavaScriptCore/assembler/AbstractMacroAssembler.h | 841 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssembler.h | 1887 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86.h | 126 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86Common.h | 583 | ||||
-rw-r--r-- | JavaScriptCore/assembler/MacroAssemblerX86_64.h | 398 | ||||
-rw-r--r-- | JavaScriptCore/assembler/X86Assembler.h | 200 |
6 files changed, 2195 insertions, 1840 deletions
diff --git a/JavaScriptCore/assembler/AbstractMacroAssembler.h b/JavaScriptCore/assembler/AbstractMacroAssembler.h new file mode 100644 index 0000000..851b6d5 --- /dev/null +++ b/JavaScriptCore/assembler/AbstractMacroAssembler.h @@ -0,0 +1,841 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AbstractMacroAssembler_h +#define AbstractMacroAssembler_h + +#include <wtf/Platform.h> + +#if ENABLE(ASSEMBLER) + +namespace JSC { + +template <class AssemblerType> +class AbstractMacroAssembler { +public: + class Jump; + class PatchBuffer; + class CodeLocationLabel; + class CodeLocationJump; + class CodeLocationCall; + class CodeLocationDataLabel32; + class CodeLocationDataLabelPtr; + + typedef typename AssemblerType::RegisterID RegisterID; + typedef typename AssemblerType::JmpSrc JmpSrc; + typedef typename AssemblerType::JmpDst JmpDst; + + + // Section 1: MacroAssembler operand types + // + // The following types are used as operands to MacroAssembler operations, + // describing immediate and memory operands to the instructions to be planted. + + + enum Scale { + TimesOne, + TimesTwo, + TimesFour, + TimesEight, + }; + + // Address: + // + // Describes a simple base-offset address. + struct Address { + explicit Address(RegisterID base, int32_t offset = 0) + : base(base) + , offset(offset) + { + } + + RegisterID base; + int32_t offset; + }; + + // ImplicitAddress: + // + // This class is used for explicit 'load' and 'store' operations + // (as opposed to situations in which a memory operand is provided + // to a generic operation, such as an integer arithmetic instruction). + // + // In the case of a load (or store) operation we want to permit + // addresses to be implicitly constructed, e.g. the two calls: + // + // load32(Address(addrReg), destReg); + // load32(addrReg, destReg); + // + // Are equivalent, and the explicit wrapping of the Address in the former + // is unnecessary. + struct ImplicitAddress { + ImplicitAddress(RegisterID base) + : base(base) + , offset(0) + { + } + + ImplicitAddress(Address address) + : base(address.base) + , offset(address.offset) + { + } + + RegisterID base; + int32_t offset; + }; + + // BaseIndex: + // + // Describes a complex addressing mode. + struct BaseIndex { + BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0) + : base(base) + , index(index) + , scale(scale) + , offset(offset) + { + } + + RegisterID base; + RegisterID index; + Scale scale; + int32_t offset; + }; + + // AbsoluteAddress: + // + // Describes an memory operand given by a pointer. For regular load & store + // operations an unwrapped void* will be used, rather than using this. + struct AbsoluteAddress { + explicit AbsoluteAddress(void* ptr) + : m_ptr(ptr) + { + } + + void* m_ptr; + }; + + // ImmPtr: + // + // A pointer sized immediate operand to an instruction - this is wrapped + // in a class requiring explicit construction in order to differentiate + // from pointers used as absolute addresses to memory operations + struct ImmPtr { + explicit ImmPtr(void* value) + : m_value(value) + { + } + + intptr_t asIntptr() + { + return reinterpret_cast<intptr_t>(m_value); + } + + void* m_value; + }; + + // Imm32: + // + // A 32bit immediate operand to an instruction - this is wrapped in a + // class requiring explicit construction in order to prevent RegisterIDs + // (which are implemented as an enum) from accidentally being passed as + // immediate values. + struct Imm32 { + explicit Imm32(int32_t value) + : m_value(value) + { + } + +#if !PLATFORM(X86_64) + explicit Imm32(ImmPtr ptr) + : m_value(ptr.asIntptr()) + { + } +#endif + + int32_t m_value; + }; + + + // Section 2: MacroAssembler code buffer handles + // + // The following types are used to reference items in the code buffer + // during JIT code generation. For example, the type Jump is used to + // track the location of a jump instruction so that it may later be + // linked to a label marking its destination. + + + // Label: + // + // A Label records a point in the generated instruction stream, typically such that + // it may be used as a destination for a jump. + class Label { + friend class Jump; + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + friend class PatchBuffer; + public: + Label() + { + } + + Label(AbstractMacroAssembler<AssemblerType>* masm) + : m_label(masm->m_assembler.label()) + { + } + + bool isUsed() const { return m_label.isUsed(); } + void used() { m_label.used(); } + private: + JmpDst m_label; + }; + + // DataLabelPtr: + // + // A DataLabelPtr is used to refer to a location in the code containing a pointer to be + // patched after the code has been generated. + class DataLabelPtr { + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + friend class PatchBuffer; + public: + DataLabelPtr() + { + } + + DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm) + : m_label(masm->m_assembler.label()) + { + } + + private: + JmpDst m_label; + }; + + // DataLabel32: + // + // A DataLabelPtr is used to refer to a location in the code containing a pointer to be + // patched after the code has been generated. + class DataLabel32 { + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + friend class PatchBuffer; + public: + DataLabel32() + { + } + + DataLabel32(AbstractMacroAssembler<AssemblerType>* masm) + : m_label(masm->m_assembler.label()) + { + } + + private: + JmpDst m_label; + }; + + // Call: + // + // A Call object is a reference to a call instruction that has been planted + // into the code buffer - it is typically used to link the call, setting the + // relative offset such that when executed it will call to the desired + // destination. + class Call { + friend class PatchBuffer; + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + public: + enum Flags { + None = 0x0, + Linkable = 0x1, + Near = 0x2, + LinkableNear = 0x3, + }; + + Call() + : m_flags(None) + { + } + + Call(JmpSrc jmp, Flags flags) + : m_jmp(jmp) + , m_flags(flags) + { + } + + bool isFlagSet(Flags flag) + { + return m_flags & flag; + } + + static Call fromTailJump(Jump jump) + { + return Call(jump.m_jmp, Linkable); + } + + private: + JmpSrc m_jmp; + Flags m_flags; + }; + + // Jump: + // + // A jump object is a reference to a jump instruction that has been planted + // into the code buffer - it is typically used to link the jump, setting the + // relative offset such that when executed it will jump to the desired + // destination. + class Jump { + friend class PatchBuffer; + template<class AssemblerType_T> + friend class AbstractMacroAssembler; + friend class Call; + public: + Jump() + { + } + + Jump(JmpSrc jmp) + : m_jmp(jmp) + { + } + + void link(AbstractMacroAssembler<AssemblerType>* masm) + { + masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label()); + } + + void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) + { + masm->m_assembler.linkJump(m_jmp, label.m_label); + } + + private: + JmpSrc m_jmp; + }; + + // JumpList: + // + // A JumpList is a set of Jump objects. + // All jumps in the set will be linked to the same destination. + class JumpList { + friend class PatchBuffer; + + public: + void link(AbstractMacroAssembler<AssemblerType>* masm) + { + size_t size = m_jumps.size(); + for (size_t i = 0; i < size; ++i) + m_jumps[i].link(masm); + m_jumps.clear(); + } + + void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) + { + size_t size = m_jumps.size(); + for (size_t i = 0; i < size; ++i) + m_jumps[i].linkTo(label, masm); + m_jumps.clear(); + } + + void append(Jump jump) + { + m_jumps.append(jump); + } + + void append(JumpList& other) + { + m_jumps.append(other.m_jumps.begin(), other.m_jumps.size()); + } + + bool empty() + { + return !m_jumps.size(); + } + + private: + Vector<Jump, 16> m_jumps; + }; + + + // Section 3: MacroAssembler JIT instruction stream handles. + // + // The MacroAssembler supported facilities to modify a JIT generated + // instruction stream after it has been generated (relinking calls and + // jumps, and repatching data values). The following types are used + // to store handles into the underlying instruction stream, the type + // providing semantic information as to what it is that is in the + // instruction stream at this point, and thus what operations may be + // performed on it. + + + // CodeLocationCommon: + // + // Base type for other CodeLocation* types. A postion in the JIT genertaed + // instruction stream, without any semantic information. + class CodeLocationCommon { + public: + CodeLocationCommon() + : m_location(0) + { + } + + // In order to avoid the need to store multiple handles into the + // instructions stream, where the code generation is deterministic + // and the labels will always be a fixed distance apart, these + // methods may be used to recover a handle that has nopw been + // retained, based on a known fixed relative offset from one that has. + CodeLocationLabel labelAtOffset(int offset); + CodeLocationJump jumpAtOffset(int offset); + CodeLocationCall callAtOffset(int offset); + CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset); + CodeLocationDataLabel32 dataLabel32AtOffset(int offset); + + operator bool() { return m_location; } + void reset() { m_location = 0; } + + protected: + explicit CodeLocationCommon(void* location) + : m_location(location) + { + } + + void* m_location; + }; + + // CodeLocationLabel: + // + // A point in the JIT code maked with a label. + class CodeLocationLabel : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class CodeLocationJump; + friend class PatchBuffer; + public: + CodeLocationLabel() + { + } + + void* addressForSwitch() { return this->m_location; } + void* addressForExceptionHandler() { return this->m_location; } + void* addressForJSR() { return this->m_location; } + + private: + explicit CodeLocationLabel(void* location) + : CodeLocationCommon(location) + { + } + + void* getJumpDestination() { return this->m_location; } + }; + + // CodeLocationJump: + // + // A point in the JIT code at which there is a jump instruction. + class CodeLocationJump : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationJump() + { + } + + void relink(CodeLocationLabel destination) + { + AssemblerType::patchJump(reinterpret_cast<intptr_t>(this->m_location), destination.m_location); + } + + private: + explicit CodeLocationJump(void* location) + : CodeLocationCommon(location) + { + } + }; + + // CodeLocationCall: + // + // A point in the JIT code at which there is a call instruction. + class CodeLocationCall : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationCall() + { + } + + template<typename FunctionSig> + void relink(FunctionSig* function) + { + AssemblerType::patchMacroAssemblerCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(function)); + } + + // This methods returns the value that will be set as the return address + // within a function that has been called from this call instruction. + void* calleeReturnAddressValue() + { + return this->m_location; + } + + private: + explicit CodeLocationCall(void* location) + : CodeLocationCommon(location) + { + } + }; + + // CodeLocationNearCall: + // + // A point in the JIT code at which there is a call instruction with near linkage. + class CodeLocationNearCall : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationNearCall() + { + } + + template<typename FunctionSig> + void relink(FunctionSig* function) + { + AssemblerType::patchCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(function)); + } + + // This methods returns the value that will be set as the return address + // within a function that has been called from this call instruction. + void* calleeReturnAddressValue() + { + return this->m_location; + } + + private: + explicit CodeLocationNearCall(void* location) + : CodeLocationCommon(location) + { + } + }; + + // CodeLocationDataLabel32: + // + // A point in the JIT code at which there is an int32_t immediate that may be repatched. + class CodeLocationDataLabel32 : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationDataLabel32() + { + } + + void repatch(int32_t value) + { + AssemblerType::patchImmediate(reinterpret_cast<intptr_t>(this->m_location), value); + } + + private: + explicit CodeLocationDataLabel32(void* location) + : CodeLocationCommon(location) + { + } + }; + + // CodeLocationDataLabelPtr: + // + // A point in the JIT code at which there is a void* immediate that may be repatched. + class CodeLocationDataLabelPtr : public CodeLocationCommon { + friend class CodeLocationCommon; + friend class PatchBuffer; + public: + CodeLocationDataLabelPtr() + { + } + + void repatch(void* value) + { + AssemblerType::patchPointer(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<intptr_t>(value)); + } + + private: + explicit CodeLocationDataLabelPtr(void* location) + : CodeLocationCommon(location) + { + } + }; + + // ProcessorReturnAddress: + // + // This class can be used to relink a call identified by its return address. + class ProcessorReturnAddress { + public: + ProcessorReturnAddress(void* location) + : m_location(location) + { + } + + template<typename FunctionSig> + void relinkCallerToFunction(FunctionSig* newCalleeFunction) + { + AssemblerType::patchMacroAssemblerCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(newCalleeFunction)); + } + + template<typename FunctionSig> + void relinkNearCallerToFunction(FunctionSig* newCalleeFunction) + { + AssemblerType::patchCall(reinterpret_cast<intptr_t>(this->m_location), reinterpret_cast<void*>(newCalleeFunction)); + } + + operator void*() + { + return m_location; + } + + private: + void* m_location; + }; + + + // Section 4: The patch buffer - utility to finalize code generation. + + + // PatchBuffer: + // + // This class assists in linking code generated by the macro assembler, once code generation + // has been completed, and the code has been copied to is final location in memory. At this + // time pointers to labels within the code may be resolved, and relative offsets to external + // addresses may be fixed. + // + // Specifically: + // * Jump objects may be linked to external targets, + // * The address of Jump objects may taken, such that it can later be relinked. + // * The return address of a Jump object representing a call may be acquired. + // * The address of a Label pointing into the code may be resolved. + // * The value referenced by a DataLabel may be fixed. + // + // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return + // address of calls, as opposed to a point that can be used to later relink a Jump - + // possibly wrap the later up in an object that can do just that). + class PatchBuffer { + public: + PatchBuffer(void* code) + : m_code(code) + { + } + + CodeLocationLabel entry() + { + return CodeLocationLabel(m_code); + } + + void* trampolineAt(Label label) + { + return AssemblerType::getRelocatedAddress(m_code, label.m_label); + } + + // These methods are used to link or set values at code generation time. + + template<typename FunctionSig> + void link(Call call, FunctionSig* function) + { + ASSERT(call.isFlagSet(Call::Linkable)); +#if PLATFORM(X86_64) + if (call.isFlagSet(Call::Near)) { + AssemblerType::linkCall(m_code, call.m_jmp, reinterpret_cast<void*>(function)); + } else { + intptr_t callLocation = reinterpret_cast<intptr_t>(AssemblerType::getRelocatedAddress(m_code, call.m_jmp)); + AssemblerType::patchMacroAssemblerCall(callLocation, reinterpret_cast<void*>(function)); + } +#else + AssemblerType::linkCall(m_code, call.m_jmp, reinterpret_cast<void*>(function)); +#endif + } + + template<typename FunctionSig> + void linkTailRecursive(Jump jump, FunctionSig* function) + { + AssemblerType::linkJump(m_code, jump.m_jmp, reinterpret_cast<void*>(function)); + } + + template<typename FunctionSig> + void linkTailRecursive(JumpList list, FunctionSig* function) + { + for (unsigned i = 0; i < list.m_jumps.size(); ++i) { + AssemblerType::linkJump(m_code, list.m_jumps[i].m_jmp, reinterpret_cast<void*>(function)); + } + } + + void link(Jump jump, CodeLocationLabel label) + { + AssemblerType::linkJump(m_code, jump.m_jmp, label.m_location); + } + + void link(JumpList list, CodeLocationLabel label) + { + for (unsigned i = 0; i < list.m_jumps.size(); ++i) + AssemblerType::linkJump(m_code, list.m_jumps[i].m_jmp, label.m_location); + } + + void patch(DataLabelPtr label, void* value) + { + AssemblerType::patchAddress(m_code, label.m_label, value); + } + + // These methods are used to obtain handles to allow the code to be relinked / repatched later. + + CodeLocationCall locationOf(Call call) + { + ASSERT(call.isFlagSet(Call::Linkable)); + ASSERT(!call.isFlagSet(Call::Near)); + return CodeLocationCall(AssemblerType::getRelocatedAddress(m_code, call.m_jmp)); + } + + CodeLocationNearCall locationOfNearCall(Call call) + { + ASSERT(call.isFlagSet(Call::Linkable)); + ASSERT(call.isFlagSet(Call::Near)); + return CodeLocationNearCall(AssemblerType::getRelocatedAddress(m_code, call.m_jmp)); + } + + CodeLocationLabel locationOf(Label label) + { + return CodeLocationLabel(AssemblerType::getRelocatedAddress(m_code, label.m_label)); + } + + CodeLocationDataLabelPtr locationOf(DataLabelPtr label) + { + return CodeLocationDataLabelPtr(AssemblerType::getRelocatedAddress(m_code, label.m_label)); + } + + CodeLocationDataLabel32 locationOf(DataLabel32 label) + { + return CodeLocationDataLabel32(AssemblerType::getRelocatedAddress(m_code, label.m_label)); + } + + // This method obtains the return address of the call, given as an offset from + // the start of the code. + unsigned returnAddressOffset(Call call) + { + return AssemblerType::getCallReturnOffset(call.m_jmp); + } + + private: + void* m_code; + }; + + + // Section 5: Misc admin methods + + size_t size() + { + return m_assembler.size(); + } + + void* copyCode(ExecutablePool* allocator) + { + return m_assembler.executableCopy(allocator); + } + + Label label() + { + return Label(this); + } + + Label align() + { + m_assembler.align(16); + return Label(this); + } + + ptrdiff_t differenceBetween(Label from, Jump to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + } + + ptrdiff_t differenceBetween(Label from, Call to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + } + + ptrdiff_t differenceBetween(Label from, Label to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + } + + ptrdiff_t differenceBetween(Label from, DataLabelPtr to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + } + + ptrdiff_t differenceBetween(Label from, DataLabel32 to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + } + + ptrdiff_t differenceBetween(DataLabelPtr from, Jump to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + } + + ptrdiff_t differenceBetween(DataLabelPtr from, Call to) + { + return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + } + +protected: + AssemblerType m_assembler; +}; + + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationLabel AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::labelAtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationLabel(reinterpret_cast<char*>(m_location) + offset); +} + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationJump AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::jumpAtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationJump(reinterpret_cast<char*>(m_location) + offset); +} + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationCall AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::callAtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationCall(reinterpret_cast<char*>(m_location) + offset); +} + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabelPtr AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabelPtrAtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationDataLabelPtr(reinterpret_cast<char*>(m_location) + offset); +} + +template <class AssemblerType> +typename AbstractMacroAssembler<AssemblerType>::CodeLocationDataLabel32 AbstractMacroAssembler<AssemblerType>::CodeLocationCommon::dataLabel32AtOffset(int offset) +{ + return typename AbstractMacroAssembler::CodeLocationDataLabel32(reinterpret_cast<char*>(m_location) + offset); +} + + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // AbstractMacroAssembler_h diff --git a/JavaScriptCore/assembler/MacroAssembler.h b/JavaScriptCore/assembler/MacroAssembler.h index 9d24653..71ac1f6 100644 --- a/JavaScriptCore/assembler/MacroAssembler.h +++ b/JavaScriptCore/assembler/MacroAssembler.h @@ -30,1986 +30,301 @@ #if ENABLE(ASSEMBLER) -#include "X86Assembler.h" - -namespace JSC { +#if PLATFORM(X86) +#include "MacroAssemblerX86.h" +namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; }; -class MacroAssembler { -protected: - X86Assembler m_assembler; +#elif PLATFORM(X86_64) +#include "MacroAssemblerX86_64.h" +namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; }; -#if PLATFORM(X86_64) - static const X86::RegisterID scratchRegister = X86::r11; +#else +#error "The MacroAssembler is not supported on this platform." #endif + +namespace JSC { + +class MacroAssembler : public MacroAssemblerBase { public: - typedef X86::RegisterID RegisterID; - - // Note: do not rely on values in this enum, these will change (to 0..3). - enum Scale { - TimesOne = 1, - TimesTwo = 2, - TimesFour = 4, - TimesEight = 8, -#if PLATFORM(X86) - ScalePtr = TimesFour -#endif + + using MacroAssemblerBase::pop; + using MacroAssemblerBase::jump; + using MacroAssemblerBase::branch32; + using MacroAssemblerBase::branch16; #if PLATFORM(X86_64) - ScalePtr = TimesEight + using MacroAssemblerBase::branchPtr; + using MacroAssemblerBase::branchTestPtr; #endif - }; - MacroAssembler() + + // Platform agnostic onvenience functions, + // described in terms of other macro assembly methods. + void pop() { + addPtr(Imm32(sizeof(void*)), stackPointerRegister); } - size_t size() { return m_assembler.size(); } - void* copyCode(ExecutablePool* allocator) + void peek(RegisterID dest, int index = 0) { - return m_assembler.executableCopy(allocator); + loadPtr(Address(stackPointerRegister, (index * sizeof(void*))), dest); } - - // Address: - // - // Describes a simple base-offset address. - struct Address { - explicit Address(RegisterID base, int32_t offset = 0) - : base(base) - , offset(offset) - { - } - - RegisterID base; - int32_t offset; - }; - - // ImplicitAddress: - // - // This class is used for explicit 'load' and 'store' operations - // (as opposed to situations in which a memory operand is provided - // to a generic operation, such as an integer arithmetic instruction). - // - // In the case of a load (or store) operation we want to permit - // addresses to be implicitly constructed, e.g. the two calls: - // - // load32(Address(addrReg), destReg); - // load32(addrReg, destReg); - // - // Are equivalent, and the explicit wrapping of the Address in the former - // is unnecessary. - struct ImplicitAddress { - ImplicitAddress(RegisterID base) - : base(base) - , offset(0) - { - } - - ImplicitAddress(Address address) - : base(address.base) - , offset(address.offset) - { - } - - RegisterID base; - int32_t offset; - }; - - // BaseIndex: - // - // Describes a complex addressing mode. - struct BaseIndex { - BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0) - : base(base) - , index(index) - , scale(scale) - , offset(offset) - { - } - - RegisterID base; - RegisterID index; - Scale scale; - int32_t offset; - }; - - // AbsoluteAddress: - // - // Describes an memory operand given by a pointer. For regular load & store - // operations an unwrapped void* will be used, rather than using this. - struct AbsoluteAddress { - explicit AbsoluteAddress(void* ptr) - : m_ptr(ptr) - { - } - - void* m_ptr; - }; - - - class Jump; - class PatchBuffer; - - // DataLabelPtr: - // - // A DataLabelPtr is used to refer to a location in the code containing a pointer to be - // patched after the code has been generated. - class DataLabelPtr { - friend class MacroAssembler; - friend class PatchBuffer; - - public: - DataLabelPtr() - { - } - - DataLabelPtr(MacroAssembler* masm) - : m_label(masm->m_assembler.label()) - { - } - - static void patch(void* address, void* value) - { - X86Assembler::patchPointer(reinterpret_cast<intptr_t>(address), reinterpret_cast<intptr_t>(value)); - } - - private: - X86Assembler::JmpDst m_label; - }; - - // DataLabel32: - // - // A DataLabelPtr is used to refer to a location in the code containing a pointer to be - // patched after the code has been generated. - class DataLabel32 { - friend class MacroAssembler; - friend class PatchBuffer; - - public: - DataLabel32() - { - } - - DataLabel32(MacroAssembler* masm) - : m_label(masm->m_assembler.label()) - { - } - - static void patch(void* address, int32_t value) - { - X86Assembler::patchImmediate(reinterpret_cast<intptr_t>(address), value); - } - - private: - X86Assembler::JmpDst m_label; - }; - - // Label: - // - // A Label records a point in the generated instruction stream, typically such that - // it may be used as a destination for a jump. - class Label { - friend class Jump; - friend class MacroAssembler; - friend class PatchBuffer; - - public: - Label() - { - } - - Label(MacroAssembler* masm) - : m_label(masm->m_assembler.label()) - { - } - - // FIXME: transitionary method, while we replace JmpSrces with Jumps. - operator X86Assembler::JmpDst() - { - return m_label; - } - - private: - X86Assembler::JmpDst m_label; - }; - - - // Jump: - // - // A jump object is a reference to a jump instruction that has been planted - // into the code buffer - it is typically used to link the jump, setting the - // relative offset such that when executed it will jump to the desired - // destination. - // - // Jump objects retain a pointer to the assembler for syntactic purposes - - // to allow the jump object to be able to link itself, e.g.: - // - // Jump forwardsBranch = jne32(Imm32(0), reg1); - // // ... - // forwardsBranch.link(); - // - // Jumps may also be linked to a Label. - class Jump { - friend class PatchBuffer; - friend class MacroAssembler; - - public: - Jump() - { - } - - // FIXME: transitionary method, while we replace JmpSrces with Jumps. - Jump(X86Assembler::JmpSrc jmp) - : m_jmp(jmp) - { - } - - void link(MacroAssembler* masm) - { - masm->m_assembler.link(m_jmp, masm->m_assembler.label()); - } - - void linkTo(Label label, MacroAssembler* masm) - { - masm->m_assembler.link(m_jmp, label.m_label); - } - - // FIXME: transitionary method, while we replace JmpSrces with Jumps. - operator X86Assembler::JmpSrc() - { - return m_jmp; - } - - static void patch(void* address, void* destination) - { - X86Assembler::patchBranchOffset(reinterpret_cast<intptr_t>(address), destination); - } - - private: - X86Assembler::JmpSrc m_jmp; - }; - - // JumpList: - // - // A JumpList is a set of Jump objects. - // All jumps in the set will be linked to the same destination. - class JumpList { - friend class PatchBuffer; - - public: - void link(MacroAssembler* masm) - { - size_t size = m_jumps.size(); - for (size_t i = 0; i < size; ++i) - m_jumps[i].link(masm); - m_jumps.clear(); - } - - void linkTo(Label label, MacroAssembler* masm) - { - size_t size = m_jumps.size(); - for (size_t i = 0; i < size; ++i) - m_jumps[i].linkTo(label, masm); - m_jumps.clear(); - } - - void append(Jump jump) - { - m_jumps.append(jump); - } - - void append(JumpList& other) - { - m_jumps.append(other.m_jumps.begin(), other.m_jumps.size()); - } - - bool empty() - { - return !m_jumps.size(); - } - - private: - Vector<Jump, 16> m_jumps; - }; - - - // PatchBuffer: - // - // This class assists in linking code generated by the macro assembler, once code generation - // has been completed, and the code has been copied to is final location in memory. At this - // time pointers to labels within the code may be resolved, and relative offsets to external - // addresses may be fixed. - // - // Specifically: - // * Jump objects may be linked to external targets, - // * The address of Jump objects may taken, such that it can later be relinked. - // * The return address of a Jump object representing a call may be acquired. - // * The address of a Label pointing into the code may be resolved. - // * The value referenced by a DataLabel may be fixed. - // - // FIXME: distinguish between Calls & Jumps (make a specific call to obtain the return - // address of calls, as opposed to a point that can be used to later relink a Jump - - // possibly wrap the later up in an object that can do just that). - class PatchBuffer { - public: - PatchBuffer(void* code) - : m_code(code) - { - } - - void link(Jump jump, void* target) - { - X86Assembler::link(m_code, jump.m_jmp, target); - } - - void link(JumpList list, void* target) - { - for (unsigned i = 0; i < list.m_jumps.size(); ++i) - X86Assembler::link(m_code, list.m_jumps[i], target); - } - - void* addressOf(Jump jump) - { - return X86Assembler::getRelocatedAddress(m_code, jump.m_jmp); - } - - void* addressOf(Label label) - { - return X86Assembler::getRelocatedAddress(m_code, label.m_label); - } - - void* addressOf(DataLabelPtr label) - { - return X86Assembler::getRelocatedAddress(m_code, label.m_label); - } - - void* addressOf(DataLabel32 label) - { - return X86Assembler::getRelocatedAddress(m_code, label.m_label); - } - - void setPtr(DataLabelPtr label, void* value) - { - X86Assembler::patchAddress(m_code, label.m_label, value); - } - - private: - void* m_code; - }; - - - // ImmPtr: - // - // A pointer sized immediate operand to an instruction - this is wrapped - // in a class requiring explicit construction in order to differentiate - // from pointers used as absolute addresses to memory operations - struct ImmPtr { - explicit ImmPtr(void* value) - : m_value(value) - { - } - - intptr_t asIntptr() - { - return reinterpret_cast<intptr_t>(m_value); - } - - void* m_value; - }; - - - // Imm32: - // - // A 32bit immediate operand to an instruction - this is wrapped in a - // class requiring explicit construction in order to prevent RegisterIDs - // (which are implemented as an enum) from accidentally being passed as - // immediate values. - struct Imm32 { - explicit Imm32(int32_t value) - : m_value(value) - { - } - -#if PLATFORM(X86) - explicit Imm32(ImmPtr ptr) - : m_value(ptr.asIntptr()) - { - } -#endif - - int32_t m_value; - }; - - // Integer arithmetic operations: - // - // Operations are typically two operand - operation(source, srcDst) - // For many operations the source may be an Imm32, the srcDst operand - // may often be a memory location (explictly described using an Address - // object). - - void addPtr(RegisterID src, RegisterID dest) + void poke(RegisterID src, int index = 0) { -#if PLATFORM(X86_64) - m_assembler.addq_rr(src, dest); -#else - add32(src, dest); -#endif + storePtr(src, Address(stackPointerRegister, (index * sizeof(void*)))); } - void addPtr(Imm32 imm, RegisterID srcDest) + void poke(Imm32 value, int index = 0) { -#if PLATFORM(X86_64) - m_assembler.addq_ir(imm.m_value, srcDest); -#else - add32(imm, srcDest); -#endif + store32(value, Address(stackPointerRegister, (index * sizeof(void*)))); } - void addPtr(ImmPtr imm, RegisterID dest) + void poke(ImmPtr imm, int index = 0) { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - m_assembler.addq_rr(scratchRegister, dest); -#else - add32(Imm32(imm), dest); -#endif + storePtr(imm, Address(stackPointerRegister, (index * sizeof(void*)))); } - void addPtr(Imm32 imm, RegisterID src, RegisterID dest) + + // Backwards banches, these are currently all implemented using existing forwards branch mechanisms. + void branchPtr(Condition cond, RegisterID op1, ImmPtr imm, Label target) { - m_assembler.leal_mr(imm.m_value, src, dest); + branchPtr(cond, op1, imm).linkTo(target, this); } - void add32(RegisterID src, RegisterID dest) + void branch32(Condition cond, RegisterID op1, RegisterID op2, Label target) { - m_assembler.addl_rr(src, dest); + branch32(cond, op1, op2).linkTo(target, this); } - void add32(Imm32 imm, Address address) + void branch32(Condition cond, RegisterID op1, Imm32 imm, Label target) { - m_assembler.addl_im(imm.m_value, address.offset, address.base); + branch32(cond, op1, imm).linkTo(target, this); } - void add32(Imm32 imm, RegisterID dest) + void branch32(Condition cond, RegisterID left, Address right, Label target) { - m_assembler.addl_ir(imm.m_value, dest); + branch32(cond, left, right).linkTo(target, this); } - - void add32(Imm32 imm, AbsoluteAddress address) - { -#if PLATFORM(X86_64) - move(ImmPtr(address.m_ptr), scratchRegister); - add32(imm, Address(scratchRegister)); -#else - m_assembler.addl_im(imm.m_value, address.m_ptr); -#endif - } - - void add32(Address src, RegisterID dest) + + void branch16(Condition cond, BaseIndex left, RegisterID right, Label target) { - m_assembler.addl_mr(src.offset, src.base, dest); + branch16(cond, left, right).linkTo(target, this); } - void andPtr(RegisterID src, RegisterID dest) + void branchTestPtr(Condition cond, RegisterID reg, Label target) { -#if PLATFORM(X86_64) - m_assembler.andq_rr(src, dest); -#else - and32(src, dest); -#endif + branchTestPtr(cond, reg).linkTo(target, this); } - void andPtr(Imm32 imm, RegisterID srcDest) + void jump(Label target) { -#if PLATFORM(X86_64) - m_assembler.andq_ir(imm.m_value, srcDest); -#else - and32(imm, srcDest); -#endif + jump().linkTo(target, this); } - void and32(RegisterID src, RegisterID dest) - { - m_assembler.andl_rr(src, dest); - } - void and32(Imm32 imm, RegisterID dest) + // Ptr methods + // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents. +#if !PLATFORM(X86_64) + void addPtr(RegisterID src, RegisterID dest) { - m_assembler.andl_ir(imm.m_value, dest); + add32(src, dest); } - void lshift32(Imm32 imm, RegisterID dest) - { - m_assembler.shll_i8r(imm.m_value, dest); - } - - void lshift32(RegisterID shift_amount, RegisterID dest) + void addPtr(Imm32 imm, RegisterID srcDest) { - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86::ecx) { - swap(shift_amount, X86::ecx); - - // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.shll_CLr(X86::ecx); - // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86::ecx) - m_assembler.shll_CLr(shift_amount); - // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.shll_CLr(dest); - - swap(shift_amount, X86::ecx); - } else - m_assembler.shll_CLr(dest); + add32(imm, srcDest); } - - // Take the value from dividend, divide it by divisor, and put the remainder in remainder. - // For now, this operation has specific register requirements, and the three register must - // be unique. It is unfortunate to expose this in the MacroAssembler interface, however - // given the complexity to fix, the fact that it is not uncommmon for processors to have - // specific register requirements on this operation (e.g. Mips result in 'hi'), or to not - // support a hardware divide at all, it may not be - void mod32(RegisterID divisor, RegisterID dividend, RegisterID remainder) - { -#ifdef NDEBUG -#pragma unused(dividend,remainder) -#else - ASSERT((dividend == X86::eax) && (remainder == X86::edx)); - ASSERT((dividend != divisor) && (remainder != divisor)); -#endif - m_assembler.cdq(); - m_assembler.idivl_r(divisor); + void addPtr(ImmPtr imm, RegisterID dest) + { + add32(Imm32(imm), dest); } - void mul32(RegisterID src, RegisterID dest) + void addPtr(Imm32 imm, RegisterID src, RegisterID dest) { - m_assembler.imull_rr(src, dest); + add32(imm, src, dest); } - - void mul32(Imm32 imm, RegisterID src, RegisterID dest) + + void andPtr(RegisterID src, RegisterID dest) { - m_assembler.imull_i32r(src, imm.m_value, dest); + and32(src, dest); } - - void not32(RegisterID srcDest) + + void andPtr(Imm32 imm, RegisterID srcDest) { - m_assembler.notl_r(srcDest); + and32(imm, srcDest); } - + void orPtr(RegisterID src, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.orq_rr(src, dest); -#else or32(src, dest); -#endif } void orPtr(ImmPtr imm, RegisterID dest) { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - m_assembler.orq_rr(scratchRegister, dest); -#else or32(Imm32(imm), dest); -#endif } void orPtr(Imm32 imm, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.orq_ir(imm.m_value, dest); -#else or32(imm, dest); -#endif - } - - void or32(RegisterID src, RegisterID dest) - { - m_assembler.orl_rr(src, dest); - } - - void or32(Imm32 imm, RegisterID dest) - { - m_assembler.orl_ir(imm.m_value, dest); } void rshiftPtr(RegisterID shift_amount, RegisterID dest) { -#if PLATFORM(X86_64) - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86::ecx) { - swap(shift_amount, X86::ecx); - - // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.sarq_CLr(X86::ecx); - // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86::ecx) - m_assembler.sarq_CLr(shift_amount); - // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.sarq_CLr(dest); - - swap(shift_amount, X86::ecx); - } else - m_assembler.sarq_CLr(dest); -#else rshift32(shift_amount, dest); -#endif } void rshiftPtr(Imm32 imm, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.sarq_i8r(imm.m_value, dest); -#else rshift32(imm, dest); -#endif - } - - void rshift32(RegisterID shift_amount, RegisterID dest) - { - // On x86 we can only shift by ecx; if asked to shift by another register we'll - // need rejig the shift amount into ecx first, and restore the registers afterwards. - if (shift_amount != X86::ecx) { - swap(shift_amount, X86::ecx); - - // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" - if (dest == shift_amount) - m_assembler.sarl_CLr(X86::ecx); - // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" - else if (dest == X86::ecx) - m_assembler.sarl_CLr(shift_amount); - // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" - else - m_assembler.sarl_CLr(dest); - - swap(shift_amount, X86::ecx); - } else - m_assembler.sarl_CLr(dest); - } - - void rshift32(Imm32 imm, RegisterID dest) - { - m_assembler.sarl_i8r(imm.m_value, dest); } void subPtr(RegisterID src, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.subq_rr(src, dest); -#else sub32(src, dest); -#endif } void subPtr(Imm32 imm, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.subq_ir(imm.m_value, dest); -#else sub32(imm, dest); -#endif } void subPtr(ImmPtr imm, RegisterID dest) { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - m_assembler.subq_rr(scratchRegister, dest); -#else sub32(Imm32(imm), dest); -#endif - } - - void sub32(RegisterID src, RegisterID dest) - { - m_assembler.subl_rr(src, dest); - } - - void sub32(Imm32 imm, RegisterID dest) - { - m_assembler.subl_ir(imm.m_value, dest); - } - - void sub32(Imm32 imm, Address address) - { - m_assembler.subl_im(imm.m_value, address.offset, address.base); - } - - void sub32(Imm32 imm, AbsoluteAddress address) - { -#if PLATFORM(X86_64) - move(ImmPtr(address.m_ptr), scratchRegister); - sub32(imm, Address(scratchRegister)); -#else - m_assembler.subl_im(imm.m_value, address.m_ptr); -#endif - } - - void sub32(Address src, RegisterID dest) - { - m_assembler.subl_mr(src.offset, src.base, dest); } void xorPtr(RegisterID src, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.xorq_rr(src, dest); -#else xor32(src, dest); -#endif } void xorPtr(Imm32 imm, RegisterID srcDest) { -#if PLATFORM(X86_64) - m_assembler.xorq_ir(imm.m_value, srcDest); -#else xor32(imm, srcDest); -#endif } - void xor32(RegisterID src, RegisterID dest) - { - m_assembler.xorl_rr(src, dest); - } - - void xor32(Imm32 imm, RegisterID srcDest) - { - m_assembler.xorl_ir(imm.m_value, srcDest); - } - - - // Memory access operations: - // - // Loads are of the form load(address, destination) and stores of the form - // store(source, address). The source for a store may be an Imm32. Address - // operand objects to loads and store will be implicitly constructed if a - // register is passed. void loadPtr(ImplicitAddress address, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.movq_mr(address.offset, address.base, dest); -#else load32(address, dest); -#endif - } - - DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) - { -#if PLATFORM(X86_64) - m_assembler.movq_mr_disp32(address.offset, address.base, dest); - return DataLabel32(this); -#else - m_assembler.movl_mr_disp32(address.offset, address.base, dest); - return DataLabel32(this); -#endif } void loadPtr(BaseIndex address, RegisterID dest) { -#if PLATFORM(X86_64) - m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest); -#else load32(address, dest); -#endif } void loadPtr(void* address, RegisterID dest) { -#if PLATFORM(X86_64) - if (dest == X86::eax) - m_assembler.movq_mEAX(address); - else { - move(X86::eax, dest); - m_assembler.movq_mEAX(address); - swap(X86::eax, dest); - } -#else load32(address, dest); -#endif - } - - void load32(ImplicitAddress address, RegisterID dest) - { - m_assembler.movl_mr(address.offset, address.base, dest); } - void load32(BaseIndex address, RegisterID dest) + DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) { - m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest); + return load32WithAddressOffsetPatch(address, dest); } - void load32(void* address, RegisterID dest) + void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest) { -#if PLATFORM(X86_64) - if (dest == X86::eax) - m_assembler.movl_mEAX(address); - else { - move(X86::eax, dest); - m_assembler.movl_mEAX(address); - swap(X86::eax, dest); - } -#else - m_assembler.movl_mr(address, dest); -#endif - } - - void load16(BaseIndex address, RegisterID dest) - { - m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest); + set32(cond, left, right, dest); } void storePtr(RegisterID src, ImplicitAddress address) { -#if PLATFORM(X86_64) - m_assembler.movq_rm(src, address.offset, address.base); -#else store32(src, address); -#endif - } - - DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) - { -#if PLATFORM(X86_64) - m_assembler.movq_rm_disp32(src, address.offset, address.base); - return DataLabel32(this); -#else - m_assembler.movl_rm_disp32(src, address.offset, address.base); - return DataLabel32(this); -#endif } void storePtr(RegisterID src, BaseIndex address) { -#if PLATFORM(X86_64) - m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale); -#else store32(src, address); -#endif } void storePtr(ImmPtr imm, ImplicitAddress address) { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - storePtr(scratchRegister, address); -#else - m_assembler.movl_i32m(imm.asIntptr(), address.offset, address.base); -#endif + store32(Imm32(imm), address); } -#if !PLATFORM(X86_64) void storePtr(ImmPtr imm, void* address) { store32(Imm32(imm), address); } -#endif - - DataLabelPtr storePtrWithPatch(Address address) - { -#if PLATFORM(X86_64) - m_assembler.movq_i64r(0, scratchRegister); - DataLabelPtr label(this); - storePtr(scratchRegister, address); - return label; -#else - m_assembler.movl_i32m(0, address.offset, address.base); - return DataLabelPtr(this); -#endif - } - - void store32(RegisterID src, ImplicitAddress address) - { - m_assembler.movl_rm(src, address.offset, address.base); - } - - void store32(RegisterID src, BaseIndex address) - { - m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale); - } - - void store32(Imm32 imm, ImplicitAddress address) - { - m_assembler.movl_i32m(imm.m_value, address.offset, address.base); - } - - void store32(Imm32 imm, void* address) - { -#if PLATFORM(X86_64) - move(X86::eax, scratchRegister); - move(imm, X86::eax); - m_assembler.movl_EAXm(address); - move(scratchRegister, X86::eax); -#else - m_assembler.movl_i32m(imm.m_value, address); -#endif - } - - - // Stack manipulation operations: - // - // The ABI is assumed to provide a stack abstraction to memory, - // containing machine word sized units of data. Push and pop - // operations add and remove a single register sized unit of data - // to or from the stack. Peek and poke operations read or write - // values on the stack, without moving the current stack position. - - void pop(RegisterID dest) - { - m_assembler.pop_r(dest); - } - - void push(RegisterID src) - { - m_assembler.push_r(src); - } - - void push(Address address) - { - m_assembler.push_m(address.offset, address.base); - } - - void push(Imm32 imm) - { - m_assembler.push_i32(imm.m_value); - } - - void pop() - { - addPtr(Imm32(sizeof(void*)), X86::esp); - } - - void peek(RegisterID dest, int index = 0) - { - loadPtr(Address(X86::esp, (index * sizeof(void *))), dest); - } - - void poke(RegisterID src, int index = 0) - { - storePtr(src, Address(X86::esp, (index * sizeof(void *)))); - } - - void poke(Imm32 value, int index = 0) - { - store32(value, Address(X86::esp, (index * sizeof(void *)))); - } - - void poke(ImmPtr imm, int index = 0) - { - storePtr(imm, Address(X86::esp, (index * sizeof(void *)))); - } - - // Register move operations: - // - // Move values in registers. - - void move(Imm32 imm, RegisterID dest) - { - // Note: on 64-bit the Imm32 value is zero extended into the register, it - // may be useful to have a separate version that sign extends the value? - if (!imm.m_value) - m_assembler.xorl_rr(dest, dest); - else - m_assembler.movl_i32r(imm.m_value, dest); - } - - void move(RegisterID src, RegisterID dest) - { - // Note: on 64-bit this is is a full register move; perhaps it would be - // useful to have separate move32 & movePtr, with move32 zero extending? -#if PLATFORM(X86_64) - m_assembler.movq_rr(src, dest); -#else - m_assembler.movl_rr(src, dest); -#endif - } - - void move(ImmPtr imm, RegisterID dest) - { -#if PLATFORM(X86_64) - if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr())) - m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest); - else - m_assembler.movq_i64r(imm.asIntptr(), dest); -#else - m_assembler.movl_i32r(imm.asIntptr(), dest); -#endif - } - - void swap(RegisterID reg1, RegisterID reg2) - { -#if PLATFORM(X86_64) - m_assembler.xchgq_rr(reg1, reg2); -#else - m_assembler.xchgl_rr(reg1, reg2); -#endif - } - - void signExtend32ToPtr(RegisterID src, RegisterID dest) - { -#if PLATFORM(X86_64) - m_assembler.movsxd_rr(src, dest); -#else - if (src != dest) - move(src, dest); -#endif - } - - void zeroExtend32ToPtr(RegisterID src, RegisterID dest) - { -#if PLATFORM(X86_64) - m_assembler.movl_rr(src, dest); -#else - if (src != dest) - move(src, dest); -#endif - } - - - // Forwards / external control flow operations: - // - // This set of jump and conditional branch operations return a Jump - // object which may linked at a later point, allow forwards jump, - // or jumps that will require external linkage (after the code has been - // relocated). - // - // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge - // respecitvely, for unsigned comparisons the names b, a, be, and ae are - // used (representing the names 'below' and 'above'). - // - // Operands to the comparision are provided in the expected order, e.g. - // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when - // treated as a signed 32bit value, is less than or equal to 5. - // - // jz and jnz test whether the first operand is equal to zero, and take - // an optional second operand of a mask under which to perform the test. - -private: - void compareImm32ForBranch(RegisterID left, int32_t right) - { - m_assembler.cmpl_ir(right, left); - } - - void compareImm32ForBranchEquality(RegisterID reg, int32_t imm) - { - if (!imm) - m_assembler.testl_rr(reg, reg); - else - m_assembler.cmpl_ir(imm, reg); - } - - void compareImm32ForBranchEquality(Address address, int32_t imm) - { - m_assembler.cmpl_im(imm, address.offset, address.base); - } - - void testImm32(RegisterID reg, Imm32 mask) - { - // if we are only interested in the low seven bits, this can be tested with a testb - if (mask.m_value == -1) - m_assembler.testl_rr(reg, reg); - else if ((mask.m_value & ~0x7f) == 0) - m_assembler.testb_i8r(mask.m_value, reg); - else - m_assembler.testl_i32r(mask.m_value, reg); - } - - void testImm32(Address address, Imm32 mask) - { - if (mask.m_value == -1) - m_assembler.cmpl_im(0, address.offset, address.base); - else - m_assembler.testl_i32m(mask.m_value, address.offset, address.base); - } - - void testImm32(BaseIndex address, Imm32 mask) - { - if (mask.m_value == -1) - m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale); - else - m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); - } - -#if PLATFORM(X86_64) - void compareImm64ForBranch(RegisterID left, int32_t right) - { - m_assembler.cmpq_ir(right, left); - } - - void compareImm64ForBranchEquality(RegisterID reg, int32_t imm) - { - if (!imm) - m_assembler.testq_rr(reg, reg); - else - m_assembler.cmpq_ir(imm, reg); - } - - void testImm64(RegisterID reg, Imm32 mask) - { - // if we are only interested in the low seven bits, this can be tested with a testb - if (mask.m_value == -1) - m_assembler.testq_rr(reg, reg); - else if ((mask.m_value & ~0x7f) == 0) - m_assembler.testb_i8r(mask.m_value, reg); - else - m_assembler.testq_i32r(mask.m_value, reg); - } - - void testImm64(Address address, Imm32 mask) - { - if (mask.m_value == -1) - m_assembler.cmpq_im(0, address.offset, address.base); - else - m_assembler.testq_i32m(mask.m_value, address.offset, address.base); - } - - void testImm64(BaseIndex address, Imm32 mask) - { - if (mask.m_value == -1) - m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale); - else - m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); - } -#endif - -public: - Jump ja32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.ja()); - } - - Jump jaePtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jae()); -#else - return jae32(left, right); -#endif - } - - Jump jaePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jae()); - } else { - move(ptr, scratchRegister); - return jaePtr(reg, scratchRegister); - } -#else - return jae32(reg, Imm32(ptr)); -#endif - } - - Jump jae32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jae()); - } - - Jump jae32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jae()); - } - - Jump jae32(RegisterID left, Address right) - { - m_assembler.cmpl_mr(right.offset, right.base, left); - return Jump(m_assembler.jae()); - } - - Jump jae32(Address left, RegisterID right) - { - m_assembler.cmpl_rm(right, left.offset, left.base); - return Jump(m_assembler.jae()); - } - - Jump jbPtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jb()); -#else - return jb32(left, right); -#endif - } - - Jump jbPtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jb()); - } else { - move(ptr, scratchRegister); - return jbPtr(reg, scratchRegister); - } -#else - return jb32(reg, Imm32(ptr)); -#endif - } - - Jump jb32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jb()); - } - - Jump jb32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jb()); - } - - Jump jb32(RegisterID left, Address right) - { - m_assembler.cmpl_mr(right.offset, right.base, left); - return Jump(m_assembler.jb()); - } - - Jump jePtr(RegisterID op1, RegisterID op2) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(op1, op2); - return Jump(m_assembler.je()); -#else - return je32(op1, op2); -#endif - } - - Jump jePtr(RegisterID reg, Address address) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rm(reg, address.offset, address.base); -#else - m_assembler.cmpl_rm(reg, address.offset, address.base); -#endif - return Jump(m_assembler.je()); - } - - Jump jePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranchEquality(reg, imm); - return Jump(m_assembler.je()); - } else { - move(ptr, scratchRegister); - return jePtr(scratchRegister, reg); - } -#else - return je32(reg, Imm32(ptr)); -#endif - } - - Jump jePtr(Address address, ImmPtr imm) - { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - return jePtr(scratchRegister, address); -#else - return je32(address, Imm32(imm)); -#endif - } - - Jump je32(RegisterID op1, RegisterID op2) - { - m_assembler.cmpl_rr(op1, op2); - return Jump(m_assembler.je()); - } - - Jump je32(Address op1, RegisterID op2) - { - m_assembler.cmpl_mr(op1.offset, op1.base, op2); - return Jump(m_assembler.je()); - } - - Jump je32(RegisterID reg, Imm32 imm) - { - compareImm32ForBranchEquality(reg, imm.m_value); - return Jump(m_assembler.je()); - } - - Jump je32(Address address, Imm32 imm) - { - compareImm32ForBranchEquality(address, imm.m_value); - return Jump(m_assembler.je()); - } - - Jump je16(RegisterID op1, BaseIndex op2) - { - m_assembler.cmpw_rm(op1, op2.offset, op2.base, op2.index, op2.scale); - return Jump(m_assembler.je()); - } - - Jump jg32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jg()); - } - - Jump jg32(RegisterID reg, Address address) - { - m_assembler.cmpl_mr(address.offset, address.base, reg); - return Jump(m_assembler.jg()); - } - - Jump jgePtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jge()); -#else - return jge32(left, right); -#endif - } - - Jump jgePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jge()); - } else { - move(ptr, scratchRegister); - return jgePtr(reg, scratchRegister); - } -#else - return jge32(reg, Imm32(ptr)); -#endif - } - - Jump jge32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jge()); - } - - Jump jge32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jge()); - } - - Jump jlPtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jl()); -#else - return jl32(left, right); -#endif - } - - Jump jlPtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jl()); - } else { - move(ptr, scratchRegister); - return jlPtr(reg, scratchRegister); - } -#else - return jl32(reg, Imm32(ptr)); -#endif - } - - Jump jl32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jl()); - } - - Jump jl32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jl()); - } - - Jump jlePtr(RegisterID left, RegisterID right) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(right, left); - return Jump(m_assembler.jle()); -#else - return jle32(left, right); -#endif - } - - Jump jlePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranch(reg, imm); - return Jump(m_assembler.jle()); - } else { - move(ptr, scratchRegister); - return jlePtr(reg, scratchRegister); - } -#else - return jle32(reg, Imm32(ptr)); -#endif - } - - Jump jle32(RegisterID left, RegisterID right) - { - m_assembler.cmpl_rr(right, left); - return Jump(m_assembler.jle()); - } - - Jump jle32(RegisterID left, Imm32 right) - { - compareImm32ForBranch(left, right.m_value); - return Jump(m_assembler.jle()); - } - - Jump jnePtr(RegisterID op1, RegisterID op2) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rr(op1, op2); - return Jump(m_assembler.jne()); -#else - return jne32(op1, op2); -#endif - } - Jump jnePtr(RegisterID reg, Address address) - { -#if PLATFORM(X86_64) - m_assembler.cmpq_rm(reg, address.offset, address.base); -#else - m_assembler.cmpl_rm(reg, address.offset, address.base); -#endif - return Jump(m_assembler.jne()); - } - - Jump jnePtr(RegisterID reg, AbsoluteAddress address) - { -#if PLATFORM(X86_64) - move(ImmPtr(address.m_ptr), scratchRegister); - return jnePtr(reg, Address(scratchRegister)); -#else - m_assembler.cmpl_rm(reg, address.m_ptr); - return Jump(m_assembler.jne()); -#endif - } - - Jump jnePtr(RegisterID reg, ImmPtr ptr) - { -#if PLATFORM(X86_64) - intptr_t imm = ptr.asIntptr(); - if (CAN_SIGN_EXTEND_32_64(imm)) { - compareImm64ForBranchEquality(reg, imm); - return Jump(m_assembler.jne()); - } else { - move(ptr, scratchRegister); - return jnePtr(scratchRegister, reg); - } -#else - return jne32(reg, Imm32(ptr)); -#endif - } - - Jump jnePtr(Address address, ImmPtr imm) - { -#if PLATFORM(X86_64) - move(imm, scratchRegister); - return jnePtr(scratchRegister, address); -#else - return jne32(address, Imm32(imm)); -#endif - } - -#if !PLATFORM(X86_64) - Jump jnePtr(AbsoluteAddress address, ImmPtr imm) + DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) { - m_assembler.cmpl_im(imm.asIntptr(), address.m_ptr); - return Jump(m_assembler.jne()); + return store32WithAddressOffsetPatch(src, address); } -#endif - Jump jnePtrWithPatch(RegisterID reg, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0)) - { -#if PLATFORM(X86_64) - m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister); - dataLabel = DataLabelPtr(this); - return jnePtr(scratchRegister, reg); -#else - m_assembler.cmpl_ir_force32(initialValue.asIntptr(), reg); - dataLabel = DataLabelPtr(this); - return Jump(m_assembler.jne()); -#endif - } - Jump jnePtrWithPatch(Address address, DataLabelPtr& dataLabel, ImmPtr initialValue = ImmPtr(0)) + Jump branchPtr(Condition cond, RegisterID left, RegisterID right) { -#if PLATFORM(X86_64) - m_assembler.movq_i64r(initialValue.asIntptr(), scratchRegister); - dataLabel = DataLabelPtr(this); - return jnePtr(scratchRegister, address); -#else - m_assembler.cmpl_im_force32(initialValue.asIntptr(), address.offset, address.base); - dataLabel = DataLabelPtr(this); - return Jump(m_assembler.jne()); -#endif + return branch32(cond, left, right); } - Jump jne32(RegisterID op1, RegisterID op2) + Jump branchPtr(Condition cond, RegisterID left, ImmPtr right) { - m_assembler.cmpl_rr(op1, op2); - return Jump(m_assembler.jne()); + return branch32(cond, left, Imm32(right)); } - Jump jne32(RegisterID reg, Imm32 imm) + Jump branchPtr(Condition cond, RegisterID left, Address right) { - compareImm32ForBranchEquality(reg, imm.m_value); - return Jump(m_assembler.jne()); + return branch32(cond, left, right); } - Jump jne32(Address address, Imm32 imm) - { - compareImm32ForBranchEquality(address, imm.m_value); - return Jump(m_assembler.jne()); - } - - Jump jne32(Address address, RegisterID reg) + Jump branchPtr(Condition cond, Address left, RegisterID right) { - m_assembler.cmpl_rm(reg, address.offset, address.base); - return Jump(m_assembler.jne()); - } - - Jump jnzPtr(RegisterID reg, RegisterID mask) - { -#if PLATFORM(X86_64) - m_assembler.testq_rr(reg, mask); - return Jump(m_assembler.jne()); -#else - return jnz32(reg, mask); -#endif + return branch32(cond, left, right); } - Jump jnzPtr(RegisterID reg, Imm32 mask = Imm32(-1)) + Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right) { -#if PLATFORM(X86_64) - testImm64(reg, mask); - return Jump(m_assembler.jne()); -#else - return jnz32(reg, mask); -#endif + return branch32(cond, left, right); } - Jump jnzPtr(RegisterID reg, ImmPtr mask) + Jump branchPtr(Condition cond, Address left, ImmPtr right) { -#if PLATFORM(X86_64) - move(mask, scratchRegister); - m_assembler.testq_rr(scratchRegister, reg); - return Jump(m_assembler.jne()); -#else - return jnz32(reg, Imm32(mask)); -#endif + return branch32(cond, left, Imm32(right)); } - Jump jnzPtr(Address address, Imm32 mask = Imm32(-1)) + Jump branchPtr(Condition cond, AbsoluteAddress left, ImmPtr right) { -#if PLATFORM(X86_64) - testImm64(address, mask); - return Jump(m_assembler.jne()); -#else - return jnz32(address, mask); -#endif + return branch32(cond, left, Imm32(right)); } - Jump jnz32(RegisterID reg, RegisterID mask) + Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask) { - m_assembler.testl_rr(reg, mask); - return Jump(m_assembler.jne()); + return branchTest32(cond, reg, mask); } - Jump jnz32(RegisterID reg, Imm32 mask = Imm32(-1)) + Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) { - testImm32(reg, mask); - return Jump(m_assembler.jne()); + return branchTest32(cond, reg, mask); } - Jump jnz32(Address address, Imm32 mask = Imm32(-1)) + Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1)) { - testImm32(address, mask); - return Jump(m_assembler.jne()); + return branchTest32(cond, address, mask); } - Jump jzPtr(RegisterID reg, RegisterID mask) + Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) { -#if PLATFORM(X86_64) - m_assembler.testq_rr(reg, mask); - return Jump(m_assembler.je()); -#else - return jz32(reg, mask); -#endif + return branchTest32(cond, address, mask); } - Jump jzPtr(RegisterID reg, Imm32 mask = Imm32(-1)) - { -#if PLATFORM(X86_64) - testImm64(reg, mask); - return Jump(m_assembler.je()); -#else - return jz32(reg, mask); -#endif - } - Jump jzPtr(RegisterID reg, ImmPtr mask) + Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest) { -#if PLATFORM(X86_64) - move(mask, scratchRegister); - m_assembler.testq_rr(scratchRegister, reg); - return Jump(m_assembler.je()); -#else - return jz32(reg, Imm32(mask)); -#endif + return branchAdd32(cond, src, dest); } - Jump jzPtr(Address address, Imm32 mask = Imm32(-1)) + Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest) { -#if PLATFORM(X86_64) - testImm64(address, mask); - return Jump(m_assembler.je()); -#else - return jz32(address, mask); -#endif + return branchSub32(cond, imm, dest); } - - Jump jzPtr(BaseIndex address, Imm32 mask = Imm32(-1)) - { -#if PLATFORM(X86_64) - testImm64(address, mask); - return Jump(m_assembler.je()); -#else - return jz32(address, mask); #endif - } - - Jump jz32(RegisterID reg, RegisterID mask) - { - m_assembler.testl_rr(reg, mask); - return Jump(m_assembler.je()); - } - - Jump jz32(RegisterID reg, Imm32 mask = Imm32(-1)) - { - testImm32(reg, mask); - return Jump(m_assembler.je()); - } - - Jump jz32(Address address, Imm32 mask = Imm32(-1)) - { - testImm32(address, mask); - return Jump(m_assembler.je()); - } - - Jump jz32(BaseIndex address, Imm32 mask = Imm32(-1)) - { - testImm32(address, mask); - return Jump(m_assembler.je()); - } - - Jump jump() - { - return Jump(m_assembler.jmp()); - } - - - // Backwards, local control flow operations: - // - // These operations provide a shorter notation for local - // backwards branches, which may be both more convenient - // for the user, and for the programmer, and for the - // assembler (allowing shorter values to be used in - // relative offsets). - // - // The code sequence: - // - // Label topOfLoop(this); - // // ... - // jne32(reg1, reg2, topOfLoop); - // - // Is equivalent to the longer, potentially less efficient form: - // - // Label topOfLoop(this); - // // ... - // jne32(reg1, reg2).linkTo(topOfLoop); - - void jae32(RegisterID left, Address right, Label target) - { - jae32(left, right).linkTo(target, this); - } - - void je32(RegisterID op1, Imm32 imm, Label target) - { - je32(op1, imm).linkTo(target, this); - } - - void je16(RegisterID op1, BaseIndex op2, Label target) - { - je16(op1, op2).linkTo(target, this); - } - - void jl32(RegisterID left, Imm32 right, Label target) - { - jl32(left, right).linkTo(target, this); - } - - void jle32(RegisterID left, RegisterID right, Label target) - { - jle32(left, right).linkTo(target, this); - } - - void jnePtr(RegisterID op1, ImmPtr imm, Label target) - { - jnePtr(op1, imm).linkTo(target, this); - } - - void jne32(RegisterID op1, RegisterID op2, Label target) - { - jne32(op1, op2).linkTo(target, this); - } - - void jne32(RegisterID op1, Imm32 imm, Label target) - { - jne32(op1, imm).linkTo(target, this); - } - - void jzPtr(RegisterID reg, Label target) - { - jzPtr(reg).linkTo(target, this); - } - - void jump(Label target) - { - m_assembler.link(m_assembler.jmp(), target.m_label); - } - - void jump(RegisterID target) - { - m_assembler.jmp_r(target); - } - - // Address is a memory location containing the address to jump to - void jump(Address address) - { - m_assembler.jmp_m(address.offset, address.base); - } - - - // Arithmetic control flow operations: - // - // This set of conditional branch operations branch based - // on the result of an arithmetic operation. The operation - // is performed as normal, storing the result. - // - // * jz operations branch if the result is zero. - // * jo operations branch if the (signed) arithmetic - // operation caused an overflow to occur. - - Jump jnzSubPtr(Imm32 imm, RegisterID dest) - { - subPtr(imm, dest); - return Jump(m_assembler.jne()); - } - - Jump jnzSub32(Imm32 imm, RegisterID dest) - { - sub32(imm, dest); - return Jump(m_assembler.jne()); - } - - Jump joAddPtr(RegisterID src, RegisterID dest) - { - addPtr(src, dest); - return Jump(m_assembler.jo()); - } - - Jump joAdd32(RegisterID src, RegisterID dest) - { - add32(src, dest); - return Jump(m_assembler.jo()); - } - - Jump joAdd32(Imm32 imm, RegisterID dest) - { - add32(imm, dest); - return Jump(m_assembler.jo()); - } - - Jump joMul32(RegisterID src, RegisterID dest) - { - mul32(src, dest); - return Jump(m_assembler.jo()); - } - - Jump joMul32(Imm32 imm, RegisterID src, RegisterID dest) - { - mul32(imm, src, dest); - return Jump(m_assembler.jo()); - } - - Jump joSub32(RegisterID src, RegisterID dest) - { - sub32(src, dest); - return Jump(m_assembler.jo()); - } - - Jump joSub32(Imm32 imm, RegisterID dest) - { - sub32(imm, dest); - return Jump(m_assembler.jo()); - } - - Jump jzSubPtr(Imm32 imm, RegisterID dest) - { - subPtr(imm, dest); - return Jump(m_assembler.je()); - } - - Jump jzSub32(Imm32 imm, RegisterID dest) - { - sub32(imm, dest); - return Jump(m_assembler.je()); - } - - // Miscellaneous operations: - - void breakpoint() - { - m_assembler.int3(); - } - - Jump call() - { - return Jump(m_assembler.call()); - } - - // FIXME: why does this return a Jump object? - it can't be linked. - // This may be to get a reference to the return address of the call. - // - // This should probably be handled by a separate label type to a regular - // jump. Todo: add a CallLabel type, for the regular call - can be linked - // like a jump (possibly a subclass of jump?, or possibly casts to a Jump). - // Also add a CallReturnLabel type for this to return (just a more JmpDsty - // form of label, can get the void* after the code has been linked, but can't - // try to link it like a Jump object), and let the CallLabel be cast into a - // CallReturnLabel. - Jump call(RegisterID target) - { - return Jump(m_assembler.call(target)); - } - - Label label() - { - return Label(this); - } - - Label align() - { - m_assembler.align(16); - return Label(this); - } - - ptrdiff_t differenceBetween(Label from, Jump to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp); - } - - ptrdiff_t differenceBetween(Label from, Label to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label); - } - - ptrdiff_t differenceBetween(Label from, DataLabelPtr to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label); - } - - ptrdiff_t differenceBetween(Label from, DataLabel32 to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_label); - } - - ptrdiff_t differenceBetween(DataLabelPtr from, Jump to) - { - return X86Assembler::getDifferenceBetweenLabels(from.m_label, to.m_jmp); - } - - void ret() - { - m_assembler.ret(); - } - - void sete32(RegisterID src, RegisterID srcDest) - { - m_assembler.cmpl_rr(srcDest, src); - m_assembler.sete_r(srcDest); - m_assembler.movzbl_rr(srcDest, srcDest); - } - - void sete32(Imm32 imm, RegisterID srcDest) - { - compareImm32ForBranchEquality(srcDest, imm.m_value); - m_assembler.sete_r(srcDest); - m_assembler.movzbl_rr(srcDest, srcDest); - } - - void setne32(RegisterID src, RegisterID srcDest) - { - m_assembler.cmpl_rr(srcDest, src); - m_assembler.setne_r(srcDest); - m_assembler.movzbl_rr(srcDest, srcDest); - } - - void setne32(Imm32 imm, RegisterID srcDest) - { - compareImm32ForBranchEquality(srcDest, imm.m_value); - m_assembler.setne_r(srcDest); - m_assembler.movzbl_rr(srcDest, srcDest); - } - - // FIXME: - // The mask should be optional... paerhaps the argument order should be - // dest-src, operations always have a dest? ... possibly not true, considering - // asm ops like test, or pseudo ops like pop(). - void setnz32(Address address, Imm32 mask, RegisterID dest) - { - testImm32(address, mask); - m_assembler.setnz_r(dest); - m_assembler.movzbl_rr(dest, dest); - } - - void setz32(Address address, Imm32 mask, RegisterID dest) - { - testImm32(address, mask); - m_assembler.setz_r(dest); - m_assembler.movzbl_rr(dest, dest); - } }; } // namespace JSC diff --git a/JavaScriptCore/assembler/MacroAssemblerX86.h b/JavaScriptCore/assembler/MacroAssemblerX86.h new file mode 100644 index 0000000..b85b8b2 --- /dev/null +++ b/JavaScriptCore/assembler/MacroAssemblerX86.h @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MacroAssemblerX86_h +#define MacroAssemblerX86_h + +#include <wtf/Platform.h> + +#if ENABLE(ASSEMBLER) && PLATFORM(X86) + +#include "MacroAssemblerX86Common.h" + +namespace JSC { + +class MacroAssemblerX86 : public MacroAssemblerX86Common { +public: + static const Scale ScalePtr = TimesFour; + + using MacroAssemblerX86Common::add32; + using MacroAssemblerX86Common::sub32; + using MacroAssemblerX86Common::load32; + using MacroAssemblerX86Common::store32; + using MacroAssemblerX86Common::branch32; + using MacroAssemblerX86Common::call; + + void add32(Imm32 imm, RegisterID src, RegisterID dest) + { + m_assembler.leal_mr(imm.m_value, src, dest); + } + + void add32(Imm32 imm, AbsoluteAddress address) + { + m_assembler.addl_im(imm.m_value, address.m_ptr); + } + + void sub32(Imm32 imm, AbsoluteAddress address) + { + m_assembler.subl_im(imm.m_value, address.m_ptr); + } + + void load32(void* address, RegisterID dest) + { + m_assembler.movl_mr(address, dest); + } + + void store32(Imm32 imm, void* address) + { + m_assembler.movl_i32m(imm.m_value, address); + } + + Jump branch32(Condition cond, AbsoluteAddress left, RegisterID right) + { + m_assembler.cmpl_rm(right, left.m_ptr); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, AbsoluteAddress left, Imm32 right) + { + m_assembler.cmpl_im(right.m_value, left.m_ptr); + return Jump(m_assembler.jCC(cond)); + } + + Call call() + { + return Call(m_assembler.call(), Call::Linkable); + } + + Call tailRecursiveCall() + { + return Call::fromTailJump(jump()); + } + + Call makeTailRecursiveCall(Jump oldJump) + { + return Call::fromTailJump(oldJump); + } + + + Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + { + m_assembler.cmpl_ir_force32(initialRightValue.asIntptr(), left); + dataLabel = DataLabelPtr(this); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + { + m_assembler.cmpl_im_force32(initialRightValue.asIntptr(), left.offset, left.base); + dataLabel = DataLabelPtr(this); + return Jump(m_assembler.jCC(cond)); + } + + DataLabelPtr storePtrWithPatch(Address address) + { + m_assembler.movl_i32m(0, address.offset, address.base); + return DataLabelPtr(this); + } +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // MacroAssemblerX86_h diff --git a/JavaScriptCore/assembler/MacroAssemblerX86Common.h b/JavaScriptCore/assembler/MacroAssemblerX86Common.h new file mode 100644 index 0000000..5fcd25d --- /dev/null +++ b/JavaScriptCore/assembler/MacroAssemblerX86Common.h @@ -0,0 +1,583 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MacroAssemblerX86Common_h +#define MacroAssemblerX86Common_h + +#include <wtf/Platform.h> + +#if ENABLE(ASSEMBLER) + +#include "X86Assembler.h" +#include "AbstractMacroAssembler.h" + +namespace JSC { + +class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> { +public: + + typedef X86Assembler::Condition Condition; + static const Condition Equal = X86Assembler::ConditionE; + static const Condition NotEqual = X86Assembler::ConditionNE; + static const Condition Above = X86Assembler::ConditionA; + static const Condition AboveOrEqual = X86Assembler::ConditionAE; + static const Condition Below = X86Assembler::ConditionB; + static const Condition BelowOrEqual = X86Assembler::ConditionBE; + static const Condition GreaterThan = X86Assembler::ConditionG; + static const Condition GreaterThanOrEqual = X86Assembler::ConditionGE; + static const Condition LessThan = X86Assembler::ConditionL; + static const Condition LessThanOrEqual = X86Assembler::ConditionLE; + static const Condition Overflow = X86Assembler::ConditionO; + static const Condition Zero = X86Assembler::ConditionE; + static const Condition NonZero = X86Assembler::ConditionNE; + + static const RegisterID stackPointerRegister = X86::esp; + + // Integer arithmetic operations: + // + // Operations are typically two operand - operation(source, srcDst) + // For many operations the source may be an Imm32, the srcDst operand + // may often be a memory location (explictly described using an Address + // object). + + void add32(RegisterID src, RegisterID dest) + { + m_assembler.addl_rr(src, dest); + } + + void add32(Imm32 imm, Address address) + { + m_assembler.addl_im(imm.m_value, address.offset, address.base); + } + + void add32(Imm32 imm, RegisterID dest) + { + m_assembler.addl_ir(imm.m_value, dest); + } + + void add32(Address src, RegisterID dest) + { + m_assembler.addl_mr(src.offset, src.base, dest); + } + + void and32(RegisterID src, RegisterID dest) + { + m_assembler.andl_rr(src, dest); + } + + void and32(Imm32 imm, RegisterID dest) + { + m_assembler.andl_ir(imm.m_value, dest); + } + + void lshift32(Imm32 imm, RegisterID dest) + { + m_assembler.shll_i8r(imm.m_value, dest); + } + + void lshift32(RegisterID shift_amount, RegisterID dest) + { + // On x86 we can only shift by ecx; if asked to shift by another register we'll + // need rejig the shift amount into ecx first, and restore the registers afterwards. + if (shift_amount != X86::ecx) { + swap(shift_amount, X86::ecx); + + // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" + if (dest == shift_amount) + m_assembler.shll_CLr(X86::ecx); + // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" + else if (dest == X86::ecx) + m_assembler.shll_CLr(shift_amount); + // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" + else + m_assembler.shll_CLr(dest); + + swap(shift_amount, X86::ecx); + } else + m_assembler.shll_CLr(dest); + } + + void mul32(RegisterID src, RegisterID dest) + { + m_assembler.imull_rr(src, dest); + } + + void mul32(Imm32 imm, RegisterID src, RegisterID dest) + { + m_assembler.imull_i32r(src, imm.m_value, dest); + } + + void not32(RegisterID srcDest) + { + m_assembler.notl_r(srcDest); + } + + void or32(RegisterID src, RegisterID dest) + { + m_assembler.orl_rr(src, dest); + } + + void or32(Imm32 imm, RegisterID dest) + { + m_assembler.orl_ir(imm.m_value, dest); + } + + void rshift32(RegisterID shift_amount, RegisterID dest) + { + // On x86 we can only shift by ecx; if asked to shift by another register we'll + // need rejig the shift amount into ecx first, and restore the registers afterwards. + if (shift_amount != X86::ecx) { + swap(shift_amount, X86::ecx); + + // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" + if (dest == shift_amount) + m_assembler.sarl_CLr(X86::ecx); + // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" + else if (dest == X86::ecx) + m_assembler.sarl_CLr(shift_amount); + // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" + else + m_assembler.sarl_CLr(dest); + + swap(shift_amount, X86::ecx); + } else + m_assembler.sarl_CLr(dest); + } + + void rshift32(Imm32 imm, RegisterID dest) + { + m_assembler.sarl_i8r(imm.m_value, dest); + } + + void sub32(RegisterID src, RegisterID dest) + { + m_assembler.subl_rr(src, dest); + } + + void sub32(Imm32 imm, RegisterID dest) + { + m_assembler.subl_ir(imm.m_value, dest); + } + + void sub32(Imm32 imm, Address address) + { + m_assembler.subl_im(imm.m_value, address.offset, address.base); + } + + void sub32(Address src, RegisterID dest) + { + m_assembler.subl_mr(src.offset, src.base, dest); + } + + void xor32(RegisterID src, RegisterID dest) + { + m_assembler.xorl_rr(src, dest); + } + + void xor32(Imm32 imm, RegisterID srcDest) + { + m_assembler.xorl_ir(imm.m_value, srcDest); + } + + + // Memory access operations: + // + // Loads are of the form load(address, destination) and stores of the form + // store(source, address). The source for a store may be an Imm32. Address + // operand objects to loads and store will be implicitly constructed if a + // register is passed. + + void load32(ImplicitAddress address, RegisterID dest) + { + m_assembler.movl_mr(address.offset, address.base, dest); + } + + void load32(BaseIndex address, RegisterID dest) + { + m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest); + } + + DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) + { + m_assembler.movl_mr_disp32(address.offset, address.base, dest); + return DataLabel32(this); + } + + void load16(BaseIndex address, RegisterID dest) + { + m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest); + } + + DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) + { + m_assembler.movl_rm_disp32(src, address.offset, address.base); + return DataLabel32(this); + } + + void store32(RegisterID src, ImplicitAddress address) + { + m_assembler.movl_rm(src, address.offset, address.base); + } + + void store32(RegisterID src, BaseIndex address) + { + m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale); + } + + void store32(Imm32 imm, ImplicitAddress address) + { + m_assembler.movl_i32m(imm.m_value, address.offset, address.base); + } + + + // Stack manipulation operations: + // + // The ABI is assumed to provide a stack abstraction to memory, + // containing machine word sized units of data. Push and pop + // operations add and remove a single register sized unit of data + // to or from the stack. Peek and poke operations read or write + // values on the stack, without moving the current stack position. + + void pop(RegisterID dest) + { + m_assembler.pop_r(dest); + } + + void push(RegisterID src) + { + m_assembler.push_r(src); + } + + void push(Address address) + { + m_assembler.push_m(address.offset, address.base); + } + + void push(Imm32 imm) + { + m_assembler.push_i32(imm.m_value); + } + + // Register move operations: + // + // Move values in registers. + + void move(Imm32 imm, RegisterID dest) + { + // Note: on 64-bit the Imm32 value is zero extended into the register, it + // may be useful to have a separate version that sign extends the value? + if (!imm.m_value) + m_assembler.xorl_rr(dest, dest); + else + m_assembler.movl_i32r(imm.m_value, dest); + } + +#if PLATFORM(X86_64) + void move(RegisterID src, RegisterID dest) + { + // Note: on 64-bit this is is a full register move; perhaps it would be + // useful to have separate move32 & movePtr, with move32 zero extending? + m_assembler.movq_rr(src, dest); + } + + void move(ImmPtr imm, RegisterID dest) + { + if (CAN_SIGN_EXTEND_U32_64(imm.asIntptr())) + m_assembler.movl_i32r(static_cast<int32_t>(imm.asIntptr()), dest); + else + m_assembler.movq_i64r(imm.asIntptr(), dest); + } + + void swap(RegisterID reg1, RegisterID reg2) + { + m_assembler.xchgq_rr(reg1, reg2); + } + + void signExtend32ToPtr(RegisterID src, RegisterID dest) + { + m_assembler.movsxd_rr(src, dest); + } + + void zeroExtend32ToPtr(RegisterID src, RegisterID dest) + { + m_assembler.movl_rr(src, dest); + } +#else + void move(RegisterID src, RegisterID dest) + { + m_assembler.movl_rr(src, dest); + } + + void move(ImmPtr imm, RegisterID dest) + { + m_assembler.movl_i32r(imm.asIntptr(), dest); + } + + void swap(RegisterID reg1, RegisterID reg2) + { + m_assembler.xchgl_rr(reg1, reg2); + } + + void signExtend32ToPtr(RegisterID src, RegisterID dest) + { + if (src != dest) + move(src, dest); + } + + void zeroExtend32ToPtr(RegisterID src, RegisterID dest) + { + if (src != dest) + move(src, dest); + } +#endif + + + // Forwards / external control flow operations: + // + // This set of jump and conditional branch operations return a Jump + // object which may linked at a later point, allow forwards jump, + // or jumps that will require external linkage (after the code has been + // relocated). + // + // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge + // respecitvely, for unsigned comparisons the names b, a, be, and ae are + // used (representing the names 'below' and 'above'). + // + // Operands to the comparision are provided in the expected order, e.g. + // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when + // treated as a signed 32bit value, is less than or equal to 5. + // + // jz and jnz test whether the first operand is equal to zero, and take + // an optional second operand of a mask under which to perform the test. + +public: + Jump branch32(Condition cond, RegisterID left, RegisterID right) + { + m_assembler.cmpl_rr(right, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, RegisterID left, Imm32 right) + { + if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) + m_assembler.testl_rr(left, left); + else + m_assembler.cmpl_ir(right.m_value, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, RegisterID left, Address right) + { + m_assembler.cmpl_mr(right.offset, right.base, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, Address left, RegisterID right) + { + m_assembler.cmpl_rm(right, left.offset, left.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch32(Condition cond, Address left, Imm32 right) + { + m_assembler.cmpl_im(right.m_value, left.offset, left.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branch16(Condition cond, BaseIndex left, RegisterID right) + { + m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask) + { + ASSERT((cond == Zero) || (cond == NonZero)); + m_assembler.testl_rr(reg, mask); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) + { + ASSERT((cond == Zero) || (cond == NonZero)); + // if we are only interested in the low seven bits, this can be tested with a testb + if (mask.m_value == -1) + m_assembler.testl_rr(reg, reg); + else if ((mask.m_value & ~0x7f) == 0) + m_assembler.testb_i8r(mask.m_value, reg); + else + m_assembler.testl_i32r(mask.m_value, reg); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1)) + { + ASSERT((cond == Zero) || (cond == NonZero)); + if (mask.m_value == -1) + m_assembler.cmpl_im(0, address.offset, address.base); + else + m_assembler.testl_i32m(mask.m_value, address.offset, address.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) + { + ASSERT((cond == Zero) || (cond == NonZero)); + if (mask.m_value == -1) + m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale); + else + m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); + return Jump(m_assembler.jCC(cond)); + } + + Jump jump() + { + return Jump(m_assembler.jmp()); + } + + void jump(RegisterID target) + { + m_assembler.jmp_r(target); + } + + // Address is a memory location containing the address to jump to + void jump(Address address) + { + m_assembler.jmp_m(address.offset, address.base); + } + + + // Arithmetic control flow operations: + // + // This set of conditional branch operations branch based + // on the result of an arithmetic operation. The operation + // is performed as normal, storing the result. + // + // * jz operations branch if the result is zero. + // * jo operations branch if the (signed) arithmetic + // operation caused an overflow to occur. + + Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + add32(src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + add32(imm, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchMul32(Condition cond, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + mul32(src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + mul32(imm, src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchSub32(Condition cond, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + sub32(src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + sub32(imm, dest); + return Jump(m_assembler.jCC(cond)); + } + + + // Miscellaneous operations: + + void breakpoint() + { + m_assembler.int3(); + } + + Call nearCall() + { + return Call(m_assembler.call(), Call::LinkableNear); + } + + Call call(RegisterID target) + { + return Call(m_assembler.call(target), Call::None); + } + + void ret() + { + m_assembler.ret(); + } + + void set32(Condition cond, RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.cmpl_rr(right, left); + m_assembler.setCC_r(cond, dest); + m_assembler.movzbl_rr(dest, dest); + } + + void set32(Condition cond, RegisterID left, Imm32 right, RegisterID dest) + { + if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) + m_assembler.testl_rr(left, left); + else + m_assembler.cmpl_ir(right.m_value, left); + m_assembler.setCC_r(cond, dest); + m_assembler.movzbl_rr(dest, dest); + } + + // FIXME: + // The mask should be optional... paerhaps the argument order should be + // dest-src, operations always have a dest? ... possibly not true, considering + // asm ops like test, or pseudo ops like pop(). + void setTest32(Condition cond, Address address, Imm32 mask, RegisterID dest) + { + if (mask.m_value == -1) + m_assembler.cmpl_im(0, address.offset, address.base); + else + m_assembler.testl_i32m(mask.m_value, address.offset, address.base); + m_assembler.setCC_r(cond, dest); + m_assembler.movzbl_rr(dest, dest); + } +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // MacroAssemblerX86Common_h diff --git a/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/JavaScriptCore/assembler/MacroAssemblerX86_64.h new file mode 100644 index 0000000..971787b --- /dev/null +++ b/JavaScriptCore/assembler/MacroAssemblerX86_64.h @@ -0,0 +1,398 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef MacroAssemblerX86_64_h +#define MacroAssemblerX86_64_h + +#include <wtf/Platform.h> + +#if ENABLE(ASSEMBLER) && PLATFORM(X86_64) + +#include "MacroAssemblerX86Common.h" + +namespace JSC { + +class MacroAssemblerX86_64 : public MacroAssemblerX86Common { +protected: + static const X86::RegisterID scratchRegister = X86::r11; + +public: + static const Scale ScalePtr = TimesEight; + + using MacroAssemblerX86Common::add32; + using MacroAssemblerX86Common::sub32; + using MacroAssemblerX86Common::load32; + using MacroAssemblerX86Common::store32; + using MacroAssemblerX86Common::call; + + void add32(Imm32 imm, AbsoluteAddress address) + { + move(ImmPtr(address.m_ptr), scratchRegister); + add32(imm, Address(scratchRegister)); + } + + void sub32(Imm32 imm, AbsoluteAddress address) + { + move(ImmPtr(address.m_ptr), scratchRegister); + sub32(imm, Address(scratchRegister)); + } + + void load32(void* address, RegisterID dest) + { + if (dest == X86::eax) + m_assembler.movl_mEAX(address); + else { + move(X86::eax, dest); + m_assembler.movl_mEAX(address); + swap(X86::eax, dest); + } + } + + void store32(Imm32 imm, void* address) + { + move(X86::eax, scratchRegister); + move(imm, X86::eax); + m_assembler.movl_EAXm(address); + move(scratchRegister, X86::eax); + } + + Call call() + { + DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + Call result = Call(m_assembler.call(scratchRegister), Call::Linkable); + ASSERT(differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11); + return result; + } + + Call tailRecursiveCall() + { + DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); + ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + return Call::fromTailJump(newJump); + } + + Call makeTailRecursiveCall(Jump oldJump) + { + oldJump.link(this); + DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); + ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + return Call::fromTailJump(newJump); + } + + + void addPtr(RegisterID src, RegisterID dest) + { + m_assembler.addq_rr(src, dest); + } + + void addPtr(Imm32 imm, RegisterID srcDest) + { + m_assembler.addq_ir(imm.m_value, srcDest); + } + + void addPtr(ImmPtr imm, RegisterID dest) + { + move(imm, scratchRegister); + m_assembler.addq_rr(scratchRegister, dest); + } + + void addPtr(Imm32 imm, RegisterID src, RegisterID dest) + { + m_assembler.leal_mr(imm.m_value, src, dest); + } + + void andPtr(RegisterID src, RegisterID dest) + { + m_assembler.andq_rr(src, dest); + } + + void andPtr(Imm32 imm, RegisterID srcDest) + { + m_assembler.andq_ir(imm.m_value, srcDest); + } + + void orPtr(RegisterID src, RegisterID dest) + { + m_assembler.orq_rr(src, dest); + } + + void orPtr(ImmPtr imm, RegisterID dest) + { + move(imm, scratchRegister); + m_assembler.orq_rr(scratchRegister, dest); + } + + void orPtr(Imm32 imm, RegisterID dest) + { + m_assembler.orq_ir(imm.m_value, dest); + } + + void rshiftPtr(RegisterID shift_amount, RegisterID dest) + { + // On x86 we can only shift by ecx; if asked to shift by another register we'll + // need rejig the shift amount into ecx first, and restore the registers afterwards. + if (shift_amount != X86::ecx) { + swap(shift_amount, X86::ecx); + + // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx" + if (dest == shift_amount) + m_assembler.sarq_CLr(X86::ecx); + // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx" + else if (dest == X86::ecx) + m_assembler.sarq_CLr(shift_amount); + // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx" + else + m_assembler.sarq_CLr(dest); + + swap(shift_amount, X86::ecx); + } else + m_assembler.sarq_CLr(dest); + } + + void rshiftPtr(Imm32 imm, RegisterID dest) + { + m_assembler.sarq_i8r(imm.m_value, dest); + } + + void subPtr(RegisterID src, RegisterID dest) + { + m_assembler.subq_rr(src, dest); + } + + void subPtr(Imm32 imm, RegisterID dest) + { + m_assembler.subq_ir(imm.m_value, dest); + } + + void subPtr(ImmPtr imm, RegisterID dest) + { + move(imm, scratchRegister); + m_assembler.subq_rr(scratchRegister, dest); + } + + void xorPtr(RegisterID src, RegisterID dest) + { + m_assembler.xorq_rr(src, dest); + } + + void xorPtr(Imm32 imm, RegisterID srcDest) + { + m_assembler.xorq_ir(imm.m_value, srcDest); + } + + + void loadPtr(ImplicitAddress address, RegisterID dest) + { + m_assembler.movq_mr(address.offset, address.base, dest); + } + + void loadPtr(BaseIndex address, RegisterID dest) + { + m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest); + } + + void loadPtr(void* address, RegisterID dest) + { + if (dest == X86::eax) + m_assembler.movq_mEAX(address); + else { + move(X86::eax, dest); + m_assembler.movq_mEAX(address); + swap(X86::eax, dest); + } + } + + DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) + { + m_assembler.movq_mr_disp32(address.offset, address.base, dest); + return DataLabel32(this); + } + + void storePtr(RegisterID src, ImplicitAddress address) + { + m_assembler.movq_rm(src, address.offset, address.base); + } + + void storePtr(RegisterID src, BaseIndex address) + { + m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale); + } + + void storePtr(ImmPtr imm, ImplicitAddress address) + { + intptr_t ptr = imm.asIntptr(); + if (CAN_SIGN_EXTEND_32_64(ptr)) + m_assembler.movq_i32m(static_cast<int>(ptr), address.offset, address.base); + else { + move(imm, scratchRegister); + storePtr(scratchRegister, address); + } + } + + DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) + { + m_assembler.movq_rm_disp32(src, address.offset, address.base); + return DataLabel32(this); + } + + void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest) + { + if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) + m_assembler.testq_rr(left, left); + else + m_assembler.cmpq_ir(right.m_value, left); + m_assembler.setCC_r(cond, dest); + m_assembler.movzbl_rr(dest, dest); + } + + Jump branchPtr(Condition cond, RegisterID left, RegisterID right) + { + m_assembler.cmpq_rr(right, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchPtr(Condition cond, RegisterID left, ImmPtr right) + { + intptr_t imm = right.asIntptr(); + if (CAN_SIGN_EXTEND_32_64(imm)) { + if (!imm) + m_assembler.testq_rr(left, left); + else + m_assembler.cmpq_ir(imm, left); + return Jump(m_assembler.jCC(cond)); + } else { + move(right, scratchRegister); + return branchPtr(cond, left, scratchRegister); + } + } + + Jump branchPtr(Condition cond, RegisterID left, Address right) + { + m_assembler.cmpq_mr(right.offset, right.base, left); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right) + { + move(ImmPtr(left.m_ptr), scratchRegister); + return branchPtr(cond, Address(scratchRegister), right); + } + + Jump branchPtr(Condition cond, Address left, RegisterID right) + { + m_assembler.cmpq_rm(right, left.offset, left.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchPtr(Condition cond, Address left, ImmPtr right) + { + move(right, scratchRegister); + return branchPtr(cond, left, scratchRegister); + } + + Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask) + { + m_assembler.testq_rr(reg, mask); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) + { + // if we are only interested in the low seven bits, this can be tested with a testb + if (mask.m_value == -1) + m_assembler.testq_rr(reg, reg); + else if ((mask.m_value & ~0x7f) == 0) + m_assembler.testb_i8r(mask.m_value, reg); + else + m_assembler.testq_i32r(mask.m_value, reg); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1)) + { + if (mask.m_value == -1) + m_assembler.cmpq_im(0, address.offset, address.base); + else + m_assembler.testq_i32m(mask.m_value, address.offset, address.base); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) + { + if (mask.m_value == -1) + m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale); + else + m_assembler.testq_i32m(mask.m_value, address.offset, address.base, address.index, address.scale); + return Jump(m_assembler.jCC(cond)); + } + + + Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + addPtr(src, dest); + return Jump(m_assembler.jCC(cond)); + } + + Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); + subPtr(imm, dest); + return Jump(m_assembler.jCC(cond)); + } + + DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest) + { + m_assembler.movq_i64r(initialValue.asIntptr(), dest); + return DataLabelPtr(this); + } + + Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + { + dataLabel = moveWithPatch(initialRightValue, scratchRegister); + return branchPtr(cond, left, scratchRegister); + } + + Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + { + dataLabel = moveWithPatch(initialRightValue, scratchRegister); + return branchPtr(cond, left, scratchRegister); + } + + DataLabelPtr storePtrWithPatch(Address address) + { + DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + storePtr(scratchRegister, address); + return label; + } +}; + +} // namespace JSC + +#endif // ENABLE(ASSEMBLER) + +#endif // MacroAssemblerX86_64_h diff --git a/JavaScriptCore/assembler/X86Assembler.h b/JavaScriptCore/assembler/X86Assembler.h index de23e45..bcafda1 100644 --- a/JavaScriptCore/assembler/X86Assembler.h +++ b/JavaScriptCore/assembler/X86Assembler.h @@ -41,6 +41,8 @@ inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(sign #if PLATFORM(X86_64) inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; } inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; } + +#define REPTACH_OFFSET_CALL_R11 3 #endif namespace X86 { @@ -84,6 +86,29 @@ public: typedef X86::XMMRegisterID XMMRegisterID; typedef enum { + ConditionO, + ConditionNO, + ConditionB, + ConditionAE, + ConditionE, + ConditionNE, + ConditionBE, + ConditionA, + ConditionS, + ConditionNS, + ConditionP, + ConditionNP, + ConditionL, + ConditionGE, + ConditionLE, + ConditionG, + + ConditionC = ConditionB, + ConditionNC = ConditionAE, + } Condition; + +private: + typedef enum { OP_ADD_EvGv = 0x01, OP_ADD_GvEv = 0x03, OP_OR_EvGv = 0x09, @@ -147,27 +172,24 @@ public: OP2_SUBSD_VsdWsd = 0x5C, OP2_MOVD_VdEd = 0x6E, OP2_MOVD_EdVd = 0x7E, - OP2_JO_rel32 = 0x80, - OP2_JB_rel32 = 0x82, - OP2_JAE_rel32 = 0x83, - OP2_JE_rel32 = 0x84, - OP2_JNE_rel32 = 0x85, - OP2_JBE_rel32 = 0x86, - OP2_JA_rel32 = 0x87, - OP2_JS_rel32 = 0x88, - OP2_JP_rel32 = 0x8A, - OP2_JL_rel32 = 0x8C, - OP2_JGE_rel32 = 0x8D, - OP2_JLE_rel32 = 0x8E, - OP2_JG_rel32 = 0x8F, - OP_SETE = 0x94, - OP_SETNE = 0x95, + OP2_JCC_rel32 = 0x80, + OP_SETCC = 0x90, OP2_IMUL_GvEv = 0xAF, OP2_MOVZX_GvEb = 0xB6, OP2_MOVZX_GvEw = 0xB7, OP2_PEXTRW_GdUdIb = 0xC5, } TwoByteOpcodeID; + TwoByteOpcodeID jccRel32(Condition cond) + { + return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond); + } + + TwoByteOpcodeID setccOpcode(Condition cond) + { + return (TwoByteOpcodeID)(OP_SETCC + cond); + } + typedef enum { GROUP1_OP_ADD = 0, GROUP1_OP_OR = 1, @@ -192,9 +214,6 @@ public: GROUP11_MOV = 0, } GroupOpcodeID; - // Opaque label types - -private: class X86InstructionFormatter; public: @@ -222,16 +241,22 @@ public: public: JmpDst() : m_offset(-1) + , m_used(false) { } + bool isUsed() const { return m_used; } + void used() { m_used = true; } private: JmpDst(int offset) : m_offset(offset) + , m_used(false) { + ASSERT(m_offset == offset); } - int m_offset; + int m_offset : 31; + bool m_used : 1; }; X86Assembler() @@ -640,6 +665,11 @@ public: m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset); } + void cmpq_mr(int offset, RegisterID base, RegisterID src) + { + m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset); + } + void cmpq_ir(int imm, RegisterID dst) { if (CAN_SIGN_EXTEND_8_32(imm)) { @@ -750,9 +780,14 @@ public: m_formatter.immediate8(imm); } + void setCC_r(Condition cond, RegisterID dst) + { + m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst); + } + void sete_r(RegisterID dst) { - m_formatter.twoByteOp8(OP_SETE, (GroupOpcodeID)0, dst); + m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst); } void setz_r(RegisterID dst) @@ -762,7 +797,7 @@ public: void setne_r(RegisterID dst) { - m_formatter.twoByteOp8(OP_SETNE, (GroupOpcodeID)0, dst); + m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst); } void setnz_r(RegisterID dst) @@ -898,6 +933,12 @@ public: m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset); } + void movq_i32m(int imm, int offset, RegisterID base) + { + m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset); + m_formatter.immediate32(imm); + } + void movq_i64r(int64_t imm, RegisterID dst) { m_formatter.oneByteOp64(OP_MOV_EAXIv, dst); @@ -969,9 +1010,13 @@ public: return m_formatter.immediateRel32(); } - void jmp_r(RegisterID dst) + // Return a JmpSrc so we have a label to the jump, so we can use this + // To make a tail recursive call on x86-64. The MacroAssembler + // really shouldn't wrap this as a Jump, since it can't be linked. :-/ + JmpSrc jmp_r(RegisterID dst) { m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst); + return JmpSrc(m_formatter.size()); } void jmp_m(int offset, RegisterID base) @@ -981,7 +1026,7 @@ public: JmpSrc jne() { - m_formatter.twoByteOp(OP2_JNE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionNE)); return m_formatter.immediateRel32(); } @@ -992,73 +1037,79 @@ public: JmpSrc je() { - m_formatter.twoByteOp(OP2_JE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionE)); return m_formatter.immediateRel32(); } JmpSrc jl() { - m_formatter.twoByteOp(OP2_JL_rel32); + m_formatter.twoByteOp(jccRel32(ConditionL)); return m_formatter.immediateRel32(); } JmpSrc jb() { - m_formatter.twoByteOp(OP2_JB_rel32); + m_formatter.twoByteOp(jccRel32(ConditionB)); return m_formatter.immediateRel32(); } JmpSrc jle() { - m_formatter.twoByteOp(OP2_JLE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionLE)); return m_formatter.immediateRel32(); } JmpSrc jbe() { - m_formatter.twoByteOp(OP2_JBE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionBE)); return m_formatter.immediateRel32(); } JmpSrc jge() { - m_formatter.twoByteOp(OP2_JGE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionGE)); return m_formatter.immediateRel32(); } JmpSrc jg() { - m_formatter.twoByteOp(OP2_JG_rel32); + m_formatter.twoByteOp(jccRel32(ConditionG)); return m_formatter.immediateRel32(); } JmpSrc ja() { - m_formatter.twoByteOp(OP2_JA_rel32); + m_formatter.twoByteOp(jccRel32(ConditionA)); return m_formatter.immediateRel32(); } JmpSrc jae() { - m_formatter.twoByteOp(OP2_JAE_rel32); + m_formatter.twoByteOp(jccRel32(ConditionAE)); return m_formatter.immediateRel32(); } JmpSrc jo() { - m_formatter.twoByteOp(OP2_JO_rel32); + m_formatter.twoByteOp(jccRel32(ConditionO)); return m_formatter.immediateRel32(); } JmpSrc jp() { - m_formatter.twoByteOp(OP2_JP_rel32); + m_formatter.twoByteOp(jccRel32(ConditionP)); return m_formatter.immediateRel32(); } JmpSrc js() { - m_formatter.twoByteOp(OP2_JS_rel32); + m_formatter.twoByteOp(jccRel32(ConditionS)); + return m_formatter.immediateRel32(); + } + + JmpSrc jCC(Condition cond) + { + m_formatter.twoByteOp(jccRel32(cond)); return m_formatter.immediateRel32(); } @@ -1191,7 +1242,7 @@ public: // Linking & patching: - void link(JmpSrc from, JmpDst to) + void linkJump(JmpSrc from, JmpDst to) { ASSERT(to.m_offset != -1); ASSERT(from.m_offset != -1); @@ -1199,20 +1250,73 @@ public: reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset; } - static void patchAddress(void* code, JmpDst position, void* value) + static void linkJump(void* code, JmpSrc from, void* to) { - ASSERT(position.m_offset != -1); + ASSERT(from.m_offset != -1); + ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset); + ASSERT(linkOffset == static_cast<int>(linkOffset)); + reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset; + } + + static void patchJump(intptr_t where, void* destination) + { + intptr_t offset = reinterpret_cast<intptr_t>(destination) - where; + ASSERT(offset == static_cast<int32_t>(offset)); + reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset); + } + +#if PLATFORM(X86_64) + // FIXME: transition these functions out of here - the assembler + // shouldn't know that that this is mov/call pair using r11. :-/ + static void patchMacroAssemblerCall(intptr_t where, void* destination) + { + patchAddress(reinterpret_cast<void*>(where - REPTACH_OFFSET_CALL_R11), JmpDst(0), destination); + } +#else + static void patchMacroAssemblerCall(intptr_t where, void* destination) + { + intptr_t offset = reinterpret_cast<intptr_t>(destination) - where; + ASSERT(offset == static_cast<int32_t>(offset)); + reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset); + } +#endif + + void linkCall(JmpSrc from, JmpDst to) + { + ASSERT(to.m_offset != -1); + ASSERT(from.m_offset != -1); - reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value; + reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset; } - static void link(void* code, JmpSrc from, void* to) + static void linkCall(void* code, JmpSrc from, void* to) { ASSERT(from.m_offset != -1); + ptrdiff_t linkOffset = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset); + ASSERT(linkOffset == static_cast<int>(linkOffset)); + reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = linkOffset; + } + + static void patchCall(intptr_t where, void* destination) + { + intptr_t offset = reinterpret_cast<intptr_t>(destination) - where; + ASSERT(offset == static_cast<int32_t>(offset)); + reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset); + } + + static void patchAddress(void* code, JmpDst position, void* value) + { + ASSERT(position.m_offset != -1); - reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset); + reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value; } + static unsigned getCallReturnOffset(JmpSrc call) + { + ASSERT(call.m_offset >= 0); + return call.m_offset; + } + static void* getRelocatedAddress(void* code, JmpSrc jump) { return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset); @@ -1250,13 +1354,6 @@ public: reinterpret_cast<intptr_t*>(where)[-1] = value; } - static void patchBranchOffset(intptr_t where, void* destination) - { - intptr_t offset = reinterpret_cast<intptr_t>(destination) - where; - ASSERT(offset == static_cast<int32_t>(offset)); - reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset); - } - void* executableCopy(ExecutablePool* allocator) { void* copy = m_formatter.executableCopy(allocator); @@ -1601,13 +1698,8 @@ private: { ASSERT(mode != ModRmRegister); - // Encode sacle of (1,2,4,8) -> (0,1,2,3) - int shift = 0; - while (scale >>= 1) - shift++; - putModRm(mode, reg, hasSib); - m_buffer.putByteUnchecked((shift << 6) | ((index & 7) << 3) | (base & 7)); + m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7)); } void registerModRM(int reg, RegisterID rm) |