summaryrefslogtreecommitdiffstats
path: root/libpixelflinger/codeflinger
diff options
context:
space:
mode:
Diffstat (limited to 'libpixelflinger/codeflinger')
-rw-r--r--libpixelflinger/codeflinger/ARMAssemblerInterface.h2
-rw-r--r--libpixelflinger/codeflinger/Arm64Assembler.cpp (renamed from libpixelflinger/codeflinger/Aarch64Assembler.cpp)240
-rw-r--r--libpixelflinger/codeflinger/Arm64Assembler.h (renamed from libpixelflinger/codeflinger/Aarch64Assembler.h)20
-rw-r--r--libpixelflinger/codeflinger/Arm64Disassembler.cpp (renamed from libpixelflinger/codeflinger/Aarch64Disassembler.cpp)2
-rw-r--r--libpixelflinger/codeflinger/Arm64Disassembler.h (renamed from libpixelflinger/codeflinger/Aarch64Disassembler.h)8
-rw-r--r--libpixelflinger/codeflinger/GGLAssembler.cpp2
6 files changed, 137 insertions, 137 deletions
diff --git a/libpixelflinger/codeflinger/ARMAssemblerInterface.h b/libpixelflinger/codeflinger/ARMAssemblerInterface.h
index 6e0d7c6..40cbfcf 100644
--- a/libpixelflinger/codeflinger/ARMAssemblerInterface.h
+++ b/libpixelflinger/codeflinger/ARMAssemblerInterface.h
@@ -63,7 +63,7 @@ public:
};
enum {
- CODEGEN_ARCH_ARM = 1, CODEGEN_ARCH_MIPS, CODEGEN_ARCH_AARCH64
+ CODEGEN_ARCH_ARM = 1, CODEGEN_ARCH_MIPS, CODEGEN_ARCH_ARM64
};
// -----------------------------------------------------------------------
diff --git a/libpixelflinger/codeflinger/Aarch64Assembler.cpp b/libpixelflinger/codeflinger/Arm64Assembler.cpp
index 0e4f7df..f37072a 100644
--- a/libpixelflinger/codeflinger/Aarch64Assembler.cpp
+++ b/libpixelflinger/codeflinger/Arm64Assembler.cpp
@@ -26,7 +26,7 @@
* SUCH DAMAGE.
*/
-#define LOG_TAG "ArmToAarch64Assembler"
+#define LOG_TAG "ArmToArm64Assembler"
#include <stdio.h>
#include <stdlib.h>
@@ -36,45 +36,45 @@
#include <cutils/properties.h>
#include <private/pixelflinger/ggl_context.h>
-#include "codeflinger/Aarch64Assembler.h"
+#include "codeflinger/Arm64Assembler.h"
#include "codeflinger/CodeCache.h"
-#include "codeflinger/Aarch64Disassembler.h"
+#include "codeflinger/Arm64Disassembler.h"
/*
** --------------------------------------------
-** Support for Aarch64 in GGLAssembler JIT
+** Support for Arm64 in GGLAssembler JIT
** --------------------------------------------
**
** Approach
** - GGLAssembler and associated files are largely un-changed.
** - A translator class maps ArmAssemblerInterface calls to
-** generate AArch64 instructions.
+** generate Arm64 instructions.
**
** ----------------------
-** ArmToAarch64Assembler
+** ArmToArm64Assembler
** ----------------------
**
** - Subclassed from ArmAssemblerInterface
**
** - Translates each ArmAssemblerInterface call to generate
-** one or more Aarch64 instructions as necessary.
+** one or more Arm64 instructions as necessary.
**
** - Does not implement ArmAssemblerInterface portions unused by GGLAssembler
** It calls NOT_IMPLEMENTED() for such cases, which in turn logs
** a fatal message.
**
** - Uses A64_.. series of functions to generate instruction machine code
-** for Aarch64 instructions. These functions also log the instruction
-** to LOG, if AARCH64_ASM_DEBUG define is set to 1
+** for Arm64 instructions. These functions also log the instruction
+** to LOG, if ARM64_ASM_DEBUG define is set to 1
**
** - Dumps machine code and eqvt assembly if "debug.pf.disasm" option is set
-** It uses aarch64_disassemble to perform disassembly
+** It uses arm64_disassemble to perform disassembly
**
** - Uses register 13 (SP in ARM), 15 (PC in ARM), 16, 17 for storing
** intermediate results. GGLAssembler does not use SP and PC as these
** registers are marked as reserved. The temporary registers are not
-** saved/restored on stack as these are caller-saved registers in Aarch64
+** saved/restored on stack as these are caller-saved registers in Arm64
**
** - Uses CSEL instruction to support conditional execution. The result is
** stored in a temporary register and then copied to the target register
@@ -89,10 +89,10 @@
** move immediate instructions followed by register-register instruction.
**
** --------------------------------------------
-** ArmToAarch64Assembler unit test bench
+** ArmToArm64Assembler unit test bench
** --------------------------------------------
**
-** - Tests ArmToAarch64Assembler interface for all the possible
+** - Tests ArmToArm64Assembler interface for all the possible
** ways in which GGLAssembler uses ArmAssemblerInterface interface.
**
** - Uses test jacket (written in assembly) to set the registers,
@@ -105,10 +105,10 @@
** (ii) data transfer tests and (iii) LDM/STM tests.
**
** ----------------------
-** Aarch64 disassembler
+** Arm64 disassembler
** ----------------------
** - This disassembler disassembles only those machine codes which can be
-** generated by ArmToAarch64Assembler. It has a unit testbench which
+** generated by ArmToArm64Assembler. It has a unit testbench which
** tests all the instructions supported by the disassembler.
**
** ------------------------------------------------------------------
@@ -122,13 +122,13 @@
** These are ADDR_LDR, ADDR_STR, ADDR_ADD, ADDR_SUB and they map to
** default 32 bit implementations in ARMAssemblerInterface.
**
-** - ArmToAarch64Assembler maps these functions to appropriate 64 bit
+** - ArmToArm64Assembler maps these functions to appropriate 64 bit
** functions.
**
** ----------------------
** GGLAssembler changes
** ----------------------
-** - Since ArmToAarch64Assembler can generate 4 Aarch64 instructions for
+** - Since ArmToArm64Assembler can generate 4 Arm64 instructions for
** each call in worst case, the memory required is set to 4 times
** ARM memory
**
@@ -140,9 +140,9 @@
#define NOT_IMPLEMENTED() LOG_FATAL("Arm instruction %s not yet implemented\n", __func__)
-#define AARCH64_ASM_DEBUG 0
+#define ARM64_ASM_DEBUG 0
-#if AARCH64_ASM_DEBUG
+#if ARM64_ASM_DEBUG
#define LOG_INSTR(...) ALOGD("\t" __VA_ARGS__)
#define LOG_LABEL(...) ALOGD(__VA_ARGS__)
#else
@@ -163,7 +163,7 @@ static const char *cc_codes[] =
"GE", "LT", "GT", "LE", "AL", "NV"
};
-ArmToAarch64Assembler::ArmToAarch64Assembler(const sp<Assembly>& assembly)
+ArmToArm64Assembler::ArmToArm64Assembler(const sp<Assembly>& assembly)
: ARMAssemblerInterface(),
mAssembly(assembly)
{
@@ -175,7 +175,7 @@ ArmToAarch64Assembler::ArmToAarch64Assembler(const sp<Assembly>& assembly)
mTmpReg3 = 17;
}
-ArmToAarch64Assembler::ArmToAarch64Assembler(void *base)
+ArmToArm64Assembler::ArmToArm64Assembler(void *base)
: ARMAssemblerInterface(), mAssembly(NULL)
{
mBase = mPC = (uint32_t *)base;
@@ -187,21 +187,21 @@ ArmToAarch64Assembler::ArmToAarch64Assembler(void *base)
mTmpReg3 = 17;
}
-ArmToAarch64Assembler::~ArmToAarch64Assembler()
+ArmToArm64Assembler::~ArmToArm64Assembler()
{
}
-uint32_t* ArmToAarch64Assembler::pc() const
+uint32_t* ArmToArm64Assembler::pc() const
{
return mPC;
}
-uint32_t* ArmToAarch64Assembler::base() const
+uint32_t* ArmToArm64Assembler::base() const
{
return mBase;
}
-void ArmToAarch64Assembler::reset()
+void ArmToArm64Assembler::reset()
{
if(mAssembly == NULL)
mPC = mBase;
@@ -211,19 +211,19 @@ void ArmToAarch64Assembler::reset()
mLabels.clear();
mLabelsInverseMapping.clear();
mComments.clear();
-#if AARCH64_ASM_DEBUG
+#if ARM64_ASM_DEBUG
ALOGI("RESET\n");
#endif
}
-int ArmToAarch64Assembler::getCodegenArch()
+int ArmToArm64Assembler::getCodegenArch()
{
- return CODEGEN_ARCH_AARCH64;
+ return CODEGEN_ARCH_ARM64;
}
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::disassemble(const char* name)
+void ArmToArm64Assembler::disassemble(const char* name)
{
if(name)
{
@@ -246,34 +246,34 @@ void ArmToAarch64Assembler::disassemble(const char* name)
printf("%p: %08x ", i, uint32_t(i[0]));
{
char instr[256];
- ::aarch64_disassemble(*i, instr);
+ ::arm64_disassemble(*i, instr);
printf("%s\n", instr);
}
i++;
}
}
-void ArmToAarch64Assembler::comment(const char* string)
+void ArmToArm64Assembler::comment(const char* string)
{
mComments.add(mPC, string);
LOG_INSTR("//%s\n", string);
}
-void ArmToAarch64Assembler::label(const char* theLabel)
+void ArmToArm64Assembler::label(const char* theLabel)
{
mLabels.add(theLabel, mPC);
mLabelsInverseMapping.add(mPC, theLabel);
LOG_LABEL("%s:\n", theLabel);
}
-void ArmToAarch64Assembler::B(int cc, const char* label)
+void ArmToArm64Assembler::B(int cc, const char* label)
{
mBranchTargets.add(branch_target_t(label, mPC));
LOG_INSTR("B%s %s\n", cc_codes[cc], label );
*mPC++ = (0x54 << 24) | cc;
}
-void ArmToAarch64Assembler::BL(int cc, const char* label)
+void ArmToArm64Assembler::BL(int cc, const char* label)
{
NOT_IMPLEMENTED(); //Not Required
}
@@ -282,21 +282,21 @@ void ArmToAarch64Assembler::BL(int cc, const char* label)
//Prolog/Epilog & Generate...
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::prolog()
+void ArmToArm64Assembler::prolog()
{
// write prolog code
mPrologPC = mPC;
*mPC++ = A64_MOVZ_X(mZeroReg,0,0);
}
-void ArmToAarch64Assembler::epilog(uint32_t touched)
+void ArmToArm64Assembler::epilog(uint32_t touched)
{
// write epilog code
static const int XLR = 30;
*mPC++ = A64_RET(XLR);
}
-int ArmToAarch64Assembler::generate(const char* name)
+int ArmToArm64Assembler::generate(const char* name)
{
// fixup all the branches
size_t count = mBranchTargets.size();
@@ -329,7 +329,7 @@ int ArmToAarch64Assembler::generate(const char* name)
return NO_ERROR;
}
-uint32_t* ArmToAarch64Assembler::pcForLabel(const char* label)
+uint32_t* ArmToArm64Assembler::pcForLabel(const char* label)
{
return mLabels.valueFor(label);
}
@@ -337,7 +337,7 @@ uint32_t* ArmToAarch64Assembler::pcForLabel(const char* label)
// ----------------------------------------------------------------------------
// Data Processing...
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::dataProcessingCommon(int opcode,
+void ArmToArm64Assembler::dataProcessingCommon(int opcode,
int s, int Rd, int Rn, uint32_t Op2)
{
if(opcode != opSUB && s == 1)
@@ -405,7 +405,7 @@ void ArmToAarch64Assembler::dataProcessingCommon(int opcode,
}
}
-void ArmToAarch64Assembler::dataProcessing(int opcode, int cc,
+void ArmToArm64Assembler::dataProcessing(int opcode, int cc,
int s, int Rd, int Rn, uint32_t Op2)
{
uint32_t Wd;
@@ -460,7 +460,7 @@ void ArmToAarch64Assembler::dataProcessing(int opcode, int cc,
// Address Processing...
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::ADDR_ADD(int cc,
+void ArmToArm64Assembler::ADDR_ADD(int cc,
int s, int Rd, int Rn, uint32_t Op2)
{
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@@ -495,7 +495,7 @@ void ArmToAarch64Assembler::ADDR_ADD(int cc,
}
}
-void ArmToAarch64Assembler::ADDR_SUB(int cc,
+void ArmToArm64Assembler::ADDR_SUB(int cc,
int s, int Rd, int Rn, uint32_t Op2)
{
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@@ -516,7 +516,7 @@ void ArmToAarch64Assembler::ADDR_SUB(int cc,
// ----------------------------------------------------------------------------
// multiply...
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::MLA(int cc, int s,int Rd, int Rm, int Rs, int Rn)
+void ArmToArm64Assembler::MLA(int cc, int s,int Rd, int Rm, int Rs, int Rn)
{
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@@ -524,28 +524,28 @@ void ArmToAarch64Assembler::MLA(int cc, int s,int Rd, int Rm, int Rs, int Rn)
if(s == 1)
dataProcessingCommon(opSUB, 1, mTmpReg1, Rd, mZeroReg);
}
-void ArmToAarch64Assembler::MUL(int cc, int s, int Rd, int Rm, int Rs)
+void ArmToArm64Assembler::MUL(int cc, int s, int Rd, int Rm, int Rs)
{
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
if(s != 0) { NOT_IMPLEMENTED(); return;} //Not required
*mPC++ = A64_MADD_W(Rd, Rm, Rs, mZeroReg);
}
-void ArmToAarch64Assembler::UMULL(int cc, int s,
+void ArmToArm64Assembler::UMULL(int cc, int s,
int RdLo, int RdHi, int Rm, int Rs)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::UMUAL(int cc, int s,
+void ArmToArm64Assembler::UMUAL(int cc, int s,
int RdLo, int RdHi, int Rm, int Rs)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::SMULL(int cc, int s,
+void ArmToArm64Assembler::SMULL(int cc, int s,
int RdLo, int RdHi, int Rm, int Rs)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::SMUAL(int cc, int s,
+void ArmToArm64Assembler::SMUAL(int cc, int s,
int RdLo, int RdHi, int Rm, int Rs)
{
NOT_IMPLEMENTED(); //Not required
@@ -554,15 +554,15 @@ void ArmToAarch64Assembler::SMUAL(int cc, int s,
// ----------------------------------------------------------------------------
// branches relative to PC...
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::B(int cc, uint32_t* pc){
+void ArmToArm64Assembler::B(int cc, uint32_t* pc){
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::BL(int cc, uint32_t* pc){
+void ArmToArm64Assembler::BL(int cc, uint32_t* pc){
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::BX(int cc, int Rn){
+void ArmToArm64Assembler::BX(int cc, int Rn){
NOT_IMPLEMENTED(); //Not required
}
@@ -574,7 +574,7 @@ enum dataTransferOp
opLDR,opLDRB,opLDRH,opSTR,opSTRB,opSTRH
};
-void ArmToAarch64Assembler::dataTransfer(int op, int cc,
+void ArmToArm64Assembler::dataTransfer(int op, int cc,
int Rd, int Rn, uint32_t op_type, uint32_t size)
{
const int XSP = 31;
@@ -631,46 +631,46 @@ void ArmToAarch64Assembler::dataTransfer(int op, int cc,
return;
}
-void ArmToAarch64Assembler::ADDR_LDR(int cc, int Rd, int Rn, uint32_t op_type)
+void ArmToArm64Assembler::ADDR_LDR(int cc, int Rd, int Rn, uint32_t op_type)
{
return dataTransfer(opLDR, cc, Rd, Rn, op_type, 64);
}
-void ArmToAarch64Assembler::ADDR_STR(int cc, int Rd, int Rn, uint32_t op_type)
+void ArmToArm64Assembler::ADDR_STR(int cc, int Rd, int Rn, uint32_t op_type)
{
return dataTransfer(opSTR, cc, Rd, Rn, op_type, 64);
}
-void ArmToAarch64Assembler::LDR(int cc, int Rd, int Rn, uint32_t op_type)
+void ArmToArm64Assembler::LDR(int cc, int Rd, int Rn, uint32_t op_type)
{
return dataTransfer(opLDR, cc, Rd, Rn, op_type);
}
-void ArmToAarch64Assembler::LDRB(int cc, int Rd, int Rn, uint32_t op_type)
+void ArmToArm64Assembler::LDRB(int cc, int Rd, int Rn, uint32_t op_type)
{
return dataTransfer(opLDRB, cc, Rd, Rn, op_type);
}
-void ArmToAarch64Assembler::STR(int cc, int Rd, int Rn, uint32_t op_type)
+void ArmToArm64Assembler::STR(int cc, int Rd, int Rn, uint32_t op_type)
{
return dataTransfer(opSTR, cc, Rd, Rn, op_type);
}
-void ArmToAarch64Assembler::STRB(int cc, int Rd, int Rn, uint32_t op_type)
+void ArmToArm64Assembler::STRB(int cc, int Rd, int Rn, uint32_t op_type)
{
return dataTransfer(opSTRB, cc, Rd, Rn, op_type);
}
-void ArmToAarch64Assembler::LDRH(int cc, int Rd, int Rn, uint32_t op_type)
+void ArmToArm64Assembler::LDRH(int cc, int Rd, int Rn, uint32_t op_type)
{
return dataTransfer(opLDRH, cc, Rd, Rn, op_type);
}
-void ArmToAarch64Assembler::LDRSB(int cc, int Rd, int Rn, uint32_t offset)
+void ArmToArm64Assembler::LDRSB(int cc, int Rd, int Rn, uint32_t offset)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::LDRSH(int cc, int Rd, int Rn, uint32_t offset)
+void ArmToArm64Assembler::LDRSH(int cc, int Rd, int Rn, uint32_t offset)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::STRH(int cc, int Rd, int Rn, uint32_t op_type)
+void ArmToArm64Assembler::STRH(int cc, int Rd, int Rn, uint32_t op_type)
{
return dataTransfer(opSTRH, cc, Rd, Rn, op_type);
}
@@ -678,7 +678,7 @@ void ArmToAarch64Assembler::STRH(int cc, int Rd, int Rn, uint32_t op_type)
// ----------------------------------------------------------------------------
// block data transfer...
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::LDM(int cc, int dir,
+void ArmToArm64Assembler::LDM(int cc, int dir,
int Rn, int W, uint32_t reg_list)
{
const int XSP = 31;
@@ -699,7 +699,7 @@ void ArmToAarch64Assembler::LDM(int cc, int dir,
}
}
-void ArmToAarch64Assembler::STM(int cc, int dir,
+void ArmToArm64Assembler::STM(int cc, int dir,
int Rn, int W, uint32_t reg_list)
{
const int XSP = 31;
@@ -723,15 +723,15 @@ void ArmToAarch64Assembler::STM(int cc, int dir,
// ----------------------------------------------------------------------------
// special...
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::SWP(int cc, int Rn, int Rd, int Rm)
+void ArmToArm64Assembler::SWP(int cc, int Rn, int Rd, int Rm)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::SWPB(int cc, int Rn, int Rd, int Rm)
+void ArmToArm64Assembler::SWPB(int cc, int Rn, int Rd, int Rm)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::SWI(int cc, uint32_t comment)
+void ArmToArm64Assembler::SWI(int cc, uint32_t comment)
{
NOT_IMPLEMENTED(); //Not required
}
@@ -739,31 +739,31 @@ void ArmToAarch64Assembler::SWI(int cc, uint32_t comment)
// ----------------------------------------------------------------------------
// DSP instructions...
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::PLD(int Rn, uint32_t offset) {
+void ArmToArm64Assembler::PLD(int Rn, uint32_t offset) {
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::CLZ(int cc, int Rd, int Rm)
+void ArmToArm64Assembler::CLZ(int cc, int Rd, int Rm)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::QADD(int cc, int Rd, int Rm, int Rn)
+void ArmToArm64Assembler::QADD(int cc, int Rd, int Rm, int Rn)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::QDADD(int cc, int Rd, int Rm, int Rn)
+void ArmToArm64Assembler::QDADD(int cc, int Rd, int Rm, int Rn)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::QSUB(int cc, int Rd, int Rm, int Rn)
+void ArmToArm64Assembler::QSUB(int cc, int Rd, int Rm, int Rn)
{
NOT_IMPLEMENTED(); //Not required
}
-void ArmToAarch64Assembler::QDSUB(int cc, int Rd, int Rm, int Rn)
+void ArmToArm64Assembler::QDSUB(int cc, int Rd, int Rm, int Rn)
{
NOT_IMPLEMENTED(); //Not required
}
@@ -771,7 +771,7 @@ void ArmToAarch64Assembler::QDSUB(int cc, int Rd, int Rm, int Rn)
// ----------------------------------------------------------------------------
// 16 x 16 multiplication
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::SMUL(int cc, int xy,
+void ArmToArm64Assembler::SMUL(int cc, int xy,
int Rd, int Rm, int Rs)
{
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@@ -791,7 +791,7 @@ void ArmToAarch64Assembler::SMUL(int cc, int xy,
// ----------------------------------------------------------------------------
// 32 x 16 multiplication
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::SMULW(int cc, int y, int Rd, int Rm, int Rs)
+void ArmToArm64Assembler::SMULW(int cc, int y, int Rd, int Rm, int Rs)
{
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@@ -807,7 +807,7 @@ void ArmToAarch64Assembler::SMULW(int cc, int y, int Rd, int Rm, int Rs)
// ----------------------------------------------------------------------------
// 16 x 16 multiplication and accumulate
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::SMLA(int cc, int xy, int Rd, int Rm, int Rs, int Rn)
+void ArmToArm64Assembler::SMLA(int cc, int xy, int Rd, int Rm, int Rs, int Rn)
{
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
if(xy != xyBB) { NOT_IMPLEMENTED(); return;} //Not required
@@ -817,14 +817,14 @@ void ArmToAarch64Assembler::SMLA(int cc, int xy, int Rd, int Rm, int Rs, int Rn)
*mPC++ = A64_MADD_W(Rd, mTmpReg1, mTmpReg2, Rn);
}
-void ArmToAarch64Assembler::SMLAL(int cc, int xy,
+void ArmToArm64Assembler::SMLAL(int cc, int xy,
int RdHi, int RdLo, int Rs, int Rm)
{
NOT_IMPLEMENTED(); //Not required
return;
}
-void ArmToAarch64Assembler::SMLAW(int cc, int y,
+void ArmToArm64Assembler::SMLAW(int cc, int y,
int Rd, int Rm, int Rs, int Rn)
{
NOT_IMPLEMENTED(); //Not required
@@ -834,7 +834,7 @@ void ArmToAarch64Assembler::SMLAW(int cc, int y,
// ----------------------------------------------------------------------------
// Byte/half word extract and extend
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate)
+void ArmToArm64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate)
{
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@@ -849,7 +849,7 @@ void ArmToAarch64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate)
// ----------------------------------------------------------------------------
// Bit manipulation
// ----------------------------------------------------------------------------
-void ArmToAarch64Assembler::UBFX(int cc, int Rd, int Rn, int lsb, int width)
+void ArmToArm64Assembler::UBFX(int cc, int Rd, int Rn, int lsb, int width)
{
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
*mPC++ = A64_UBFM_W(Rd, Rn, lsb, lsb + width - 1);
@@ -857,7 +857,7 @@ void ArmToAarch64Assembler::UBFX(int cc, int Rd, int Rn, int lsb, int width)
// ----------------------------------------------------------------------------
// Shifters...
// ----------------------------------------------------------------------------
-int ArmToAarch64Assembler::buildImmediate(
+int ArmToArm64Assembler::buildImmediate(
uint32_t immediate, uint32_t& rot, uint32_t& imm)
{
rot = 0;
@@ -866,13 +866,13 @@ int ArmToAarch64Assembler::buildImmediate(
}
-bool ArmToAarch64Assembler::isValidImmediate(uint32_t immediate)
+bool ArmToArm64Assembler::isValidImmediate(uint32_t immediate)
{
uint32_t rot, imm;
return buildImmediate(immediate, rot, imm) == 0;
}
-uint32_t ArmToAarch64Assembler::imm(uint32_t immediate)
+uint32_t ArmToArm64Assembler::imm(uint32_t immediate)
{
mAddrMode.immediate = immediate;
mAddrMode.writeback = false;
@@ -882,7 +882,7 @@ uint32_t ArmToAarch64Assembler::imm(uint32_t immediate)
}
-uint32_t ArmToAarch64Assembler::reg_imm(int Rm, int type, uint32_t shift)
+uint32_t ArmToArm64Assembler::reg_imm(int Rm, int type, uint32_t shift)
{
mAddrMode.reg_imm_Rm = Rm;
mAddrMode.reg_imm_type = type;
@@ -890,13 +890,13 @@ uint32_t ArmToAarch64Assembler::reg_imm(int Rm, int type, uint32_t shift)
return OPERAND_REG_IMM;
}
-uint32_t ArmToAarch64Assembler::reg_rrx(int Rm)
+uint32_t ArmToArm64Assembler::reg_rrx(int Rm)
{
NOT_IMPLEMENTED();
return OPERAND_UNSUPPORTED;
}
-uint32_t ArmToAarch64Assembler::reg_reg(int Rm, int type, int Rs)
+uint32_t ArmToArm64Assembler::reg_reg(int Rm, int type, int Rs)
{
NOT_IMPLEMENTED(); //Not required
return OPERAND_UNSUPPORTED;
@@ -904,7 +904,7 @@ uint32_t ArmToAarch64Assembler::reg_reg(int Rm, int type, int Rs)
// ----------------------------------------------------------------------------
// Addressing modes...
// ----------------------------------------------------------------------------
-uint32_t ArmToAarch64Assembler::immed12_pre(int32_t immed12, int W)
+uint32_t ArmToArm64Assembler::immed12_pre(int32_t immed12, int W)
{
mAddrMode.immediate = immed12;
mAddrMode.writeback = W;
@@ -913,7 +913,7 @@ uint32_t ArmToAarch64Assembler::immed12_pre(int32_t immed12, int W)
return OPERAND_IMM;
}
-uint32_t ArmToAarch64Assembler::immed12_post(int32_t immed12)
+uint32_t ArmToArm64Assembler::immed12_post(int32_t immed12)
{
mAddrMode.immediate = immed12;
mAddrMode.writeback = true;
@@ -922,7 +922,7 @@ uint32_t ArmToAarch64Assembler::immed12_post(int32_t immed12)
return OPERAND_IMM;
}
-uint32_t ArmToAarch64Assembler::reg_scale_pre(int Rm, int type,
+uint32_t ArmToArm64Assembler::reg_scale_pre(int Rm, int type,
uint32_t shift, int W)
{
if(type != 0 || shift != 0 || W != 0)
@@ -937,13 +937,13 @@ uint32_t ArmToAarch64Assembler::reg_scale_pre(int Rm, int type,
}
}
-uint32_t ArmToAarch64Assembler::reg_scale_post(int Rm, int type, uint32_t shift)
+uint32_t ArmToArm64Assembler::reg_scale_post(int Rm, int type, uint32_t shift)
{
NOT_IMPLEMENTED(); //Not required
return OPERAND_UNSUPPORTED;
}
-uint32_t ArmToAarch64Assembler::immed8_pre(int32_t immed8, int W)
+uint32_t ArmToArm64Assembler::immed8_pre(int32_t immed8, int W)
{
mAddrMode.immediate = immed8;
mAddrMode.writeback = W;
@@ -952,7 +952,7 @@ uint32_t ArmToAarch64Assembler::immed8_pre(int32_t immed8, int W)
return OPERAND_IMM;
}
-uint32_t ArmToAarch64Assembler::immed8_post(int32_t immed8)
+uint32_t ArmToArm64Assembler::immed8_post(int32_t immed8)
{
mAddrMode.immediate = immed8;
mAddrMode.writeback = true;
@@ -961,7 +961,7 @@ uint32_t ArmToAarch64Assembler::immed8_post(int32_t immed8)
return OPERAND_IMM;
}
-uint32_t ArmToAarch64Assembler::reg_pre(int Rm, int W)
+uint32_t ArmToArm64Assembler::reg_pre(int Rm, int W)
{
if(W != 0)
{
@@ -975,7 +975,7 @@ uint32_t ArmToAarch64Assembler::reg_pre(int Rm, int W)
}
}
-uint32_t ArmToAarch64Assembler::reg_post(int Rm)
+uint32_t ArmToArm64Assembler::reg_post(int Rm)
{
NOT_IMPLEMENTED(); //Not required
return OPERAND_UNSUPPORTED;
@@ -999,7 +999,7 @@ static const uint32_t dataTransferOpCode [] =
((0x38u << 24) | (0x1 << 21) | (0x6 << 13) | (0x1 << 12) |(0x1 << 11)),
((0x78u << 24) | (0x1 << 21) | (0x6 << 13) | (0x0 << 12) |(0x1 << 11))
};
-uint32_t ArmToAarch64Assembler::A64_LDRSTR_Wm_SXTW_0(uint32_t op,
+uint32_t ArmToArm64Assembler::A64_LDRSTR_Wm_SXTW_0(uint32_t op,
uint32_t size, uint32_t Rt,
uint32_t Rn, uint32_t Rm)
{
@@ -1017,7 +1017,7 @@ uint32_t ArmToAarch64Assembler::A64_LDRSTR_Wm_SXTW_0(uint32_t op,
}
}
-uint32_t ArmToAarch64Assembler::A64_STR_IMM_PreIndex(uint32_t Rt,
+uint32_t ArmToArm64Assembler::A64_STR_IMM_PreIndex(uint32_t Rt,
uint32_t Rn, int32_t simm)
{
if(Rn == 31)
@@ -1029,7 +1029,7 @@ uint32_t ArmToAarch64Assembler::A64_STR_IMM_PreIndex(uint32_t Rt,
return (0xB8 << 24) | (imm9 << 12) | (0x3 << 10) | (Rn << 5) | Rt;
}
-uint32_t ArmToAarch64Assembler::A64_LDR_IMM_PostIndex(uint32_t Rt,
+uint32_t ArmToArm64Assembler::A64_LDR_IMM_PostIndex(uint32_t Rt,
uint32_t Rn, int32_t simm)
{
if(Rn == 31)
@@ -1042,7 +1042,7 @@ uint32_t ArmToAarch64Assembler::A64_LDR_IMM_PostIndex(uint32_t Rt,
(imm9 << 12) | (0x1 << 10) | (Rn << 5) | Rt;
}
-uint32_t ArmToAarch64Assembler::A64_ADD_X_Wm_SXTW(uint32_t Rd,
+uint32_t ArmToArm64Assembler::A64_ADD_X_Wm_SXTW(uint32_t Rd,
uint32_t Rn,
uint32_t Rm,
uint32_t amount)
@@ -1053,7 +1053,7 @@ uint32_t ArmToAarch64Assembler::A64_ADD_X_Wm_SXTW(uint32_t Rd,
}
-uint32_t ArmToAarch64Assembler::A64_SUB_X_Wm_SXTW(uint32_t Rd,
+uint32_t ArmToArm64Assembler::A64_SUB_X_Wm_SXTW(uint32_t Rd,
uint32_t Rn,
uint32_t Rm,
uint32_t amount)
@@ -1064,13 +1064,13 @@ uint32_t ArmToAarch64Assembler::A64_SUB_X_Wm_SXTW(uint32_t Rd,
}
-uint32_t ArmToAarch64Assembler::A64_B_COND(uint32_t cc, uint32_t offset)
+uint32_t ArmToArm64Assembler::A64_B_COND(uint32_t cc, uint32_t offset)
{
LOG_INSTR("B.%s #.+%d\n", cc_codes[cc], offset);
return (0x54 << 24) | ((offset/4) << 5) | (cc);
}
-uint32_t ArmToAarch64Assembler::A64_ADD_X(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_ADD_X(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift,
uint32_t amount)
{
@@ -1079,21 +1079,21 @@ uint32_t ArmToAarch64Assembler::A64_ADD_X(uint32_t Rd, uint32_t Rn,
return ((0x8B << 24) | (shift << 22) | ( Rm << 16) |
(amount << 10) |(Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_ADD_IMM_X(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_ADD_IMM_X(uint32_t Rd, uint32_t Rn,
uint32_t imm, uint32_t shift)
{
LOG_INSTR("ADD X%d, X%d, #%d, LSL #%d\n", Rd, Rn, imm, shift);
return (0x91 << 24) | ((shift/12) << 22) | (imm << 10) | (Rn << 5) | Rd;
}
-uint32_t ArmToAarch64Assembler::A64_SUB_IMM_X(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_SUB_IMM_X(uint32_t Rd, uint32_t Rn,
uint32_t imm, uint32_t shift)
{
LOG_INSTR("SUB X%d, X%d, #%d, LSL #%d\n", Rd, Rn, imm, shift);
return (0xD1 << 24) | ((shift/12) << 22) | (imm << 10) | (Rn << 5) | Rd;
}
-uint32_t ArmToAarch64Assembler::A64_ADD_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_ADD_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift,
uint32_t amount)
{
@@ -1103,7 +1103,7 @@ uint32_t ArmToAarch64Assembler::A64_ADD_W(uint32_t Rd, uint32_t Rn,
(amount << 10) |(Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_SUB_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_SUB_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift,
uint32_t amount,
uint32_t setflag)
@@ -1124,7 +1124,7 @@ uint32_t ArmToAarch64Assembler::A64_SUB_W(uint32_t Rd, uint32_t Rn,
}
}
-uint32_t ArmToAarch64Assembler::A64_AND_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_AND_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift,
uint32_t amount)
{
@@ -1134,7 +1134,7 @@ uint32_t ArmToAarch64Assembler::A64_AND_W(uint32_t Rd, uint32_t Rn,
(amount << 10) |(Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_ORR_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_ORR_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift,
uint32_t amount)
{
@@ -1144,7 +1144,7 @@ uint32_t ArmToAarch64Assembler::A64_ORR_W(uint32_t Rd, uint32_t Rn,
(amount << 10) |(Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_ORN_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_ORN_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift,
uint32_t amount)
{
@@ -1154,76 +1154,76 @@ uint32_t ArmToAarch64Assembler::A64_ORN_W(uint32_t Rd, uint32_t Rn,
(amount << 10) |(Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_CSEL_X(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_CSEL_X(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t cond)
{
LOG_INSTR("CSEL X%d, X%d, X%d, %s\n", Rd, Rn, Rm, cc_codes[cond]);
return ((0x9A << 24)|(0x1 << 23)|(Rm << 16) |(cond << 12)| (Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_CSEL_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_CSEL_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t cond)
{
LOG_INSTR("CSEL W%d, W%d, W%d, %s\n", Rd, Rn, Rm, cc_codes[cond]);
return ((0x1A << 24)|(0x1 << 23)|(Rm << 16) |(cond << 12)| (Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_RET(uint32_t Rn)
+uint32_t ArmToArm64Assembler::A64_RET(uint32_t Rn)
{
LOG_INSTR("RET X%d\n", Rn);
return ((0xD6 << 24) | (0x1 << 22) | (0x1F << 16) | (Rn << 5));
}
-uint32_t ArmToAarch64Assembler::A64_MOVZ_X(uint32_t Rd, uint32_t imm,
+uint32_t ArmToArm64Assembler::A64_MOVZ_X(uint32_t Rd, uint32_t imm,
uint32_t shift)
{
LOG_INSTR("MOVZ X%d, #0x%x, LSL #%d\n", Rd, imm, shift);
return(0xD2 << 24) | (0x1 << 23) | ((shift/16) << 21) | (imm << 5) | Rd;
}
-uint32_t ArmToAarch64Assembler::A64_MOVK_W(uint32_t Rd, uint32_t imm,
+uint32_t ArmToArm64Assembler::A64_MOVK_W(uint32_t Rd, uint32_t imm,
uint32_t shift)
{
LOG_INSTR("MOVK W%d, #0x%x, LSL #%d\n", Rd, imm, shift);
return (0x72 << 24) | (0x1 << 23) | ((shift/16) << 21) | (imm << 5) | Rd;
}
-uint32_t ArmToAarch64Assembler::A64_MOVZ_W(uint32_t Rd, uint32_t imm,
+uint32_t ArmToArm64Assembler::A64_MOVZ_W(uint32_t Rd, uint32_t imm,
uint32_t shift)
{
LOG_INSTR("MOVZ W%d, #0x%x, LSL #%d\n", Rd, imm, shift);
return(0x52 << 24) | (0x1 << 23) | ((shift/16) << 21) | (imm << 5) | Rd;
}
-uint32_t ArmToAarch64Assembler::A64_SMADDL(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_SMADDL(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t Ra)
{
LOG_INSTR("SMADDL X%d, W%d, W%d, X%d\n",Rd, Rn, Rm, Ra);
return ((0x9B << 24) | (0x1 << 21) | (Rm << 16)|(Ra << 10)|(Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_MADD_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_MADD_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t Ra)
{
LOG_INSTR("MADD W%d, W%d, W%d, W%d\n",Rd, Rn, Rm, Ra);
return ((0x1B << 24) | (Rm << 16) | (Ra << 10) |(Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_SBFM_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_SBFM_W(uint32_t Rd, uint32_t Rn,
uint32_t immr, uint32_t imms)
{
LOG_INSTR("SBFM W%d, W%d, #%d, #%d\n", Rd, Rn, immr, imms);
return ((0x13 << 24) | (immr << 16) | (imms << 10) | (Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_UBFM_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_UBFM_W(uint32_t Rd, uint32_t Rn,
uint32_t immr, uint32_t imms)
{
LOG_INSTR("UBFM W%d, W%d, #%d, #%d\n", Rd, Rn, immr, imms);
return ((0x53 << 24) | (immr << 16) | (imms << 10) | (Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_UBFM_X(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_UBFM_X(uint32_t Rd, uint32_t Rn,
uint32_t immr, uint32_t imms)
{
LOG_INSTR("UBFM X%d, X%d, #%d, #%d\n", Rd, Rn, immr, imms);
@@ -1231,7 +1231,7 @@ uint32_t ArmToAarch64Assembler::A64_UBFM_X(uint32_t Rd, uint32_t Rn,
(immr << 16) | (imms << 10) | (Rn << 5) | Rd);
}
-uint32_t ArmToAarch64Assembler::A64_EXTR_W(uint32_t Rd, uint32_t Rn,
+uint32_t ArmToArm64Assembler::A64_EXTR_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t lsb)
{
LOG_INSTR("EXTR W%d, W%d, W%d, #%d\n", Rd, Rn, Rm, lsb);
diff --git a/libpixelflinger/codeflinger/Aarch64Assembler.h b/libpixelflinger/codeflinger/Arm64Assembler.h
index 79c912b..8479270 100644
--- a/libpixelflinger/codeflinger/Aarch64Assembler.h
+++ b/libpixelflinger/codeflinger/Arm64Assembler.h
@@ -26,8 +26,8 @@
* SUCH DAMAGE.
*/
-#ifndef ANDROID_ARMTOAARCH64ASSEMBLER_H
-#define ANDROID_ARMTOAARCH64ASSEMBLER_H
+#ifndef ANDROID_ARMTOARM64ASSEMBLER_H
+#define ANDROID_ARMTOARM64ASSEMBLER_H
#include <stdint.h>
#include <sys/types.h>
@@ -44,12 +44,12 @@ namespace android {
// ----------------------------------------------------------------------------
-class ArmToAarch64Assembler : public ARMAssemblerInterface
+class ArmToArm64Assembler : public ARMAssemblerInterface
{
public:
- ArmToAarch64Assembler(const sp<Assembly>& assembly);
- ArmToAarch64Assembler(void *base);
- virtual ~ArmToAarch64Assembler();
+ ArmToArm64Assembler(const sp<Assembly>& assembly);
+ ArmToArm64Assembler(void *base);
+ virtual ~ArmToArm64Assembler();
uint32_t* base() const;
uint32_t* pc() const;
@@ -176,8 +176,8 @@ public:
virtual void UBFX(int cc, int Rd, int Rn, int lsb, int width);
private:
- ArmToAarch64Assembler(const ArmToAarch64Assembler& rhs);
- ArmToAarch64Assembler& operator = (const ArmToAarch64Assembler& rhs);
+ ArmToArm64Assembler(const ArmToArm64Assembler& rhs);
+ ArmToArm64Assembler& operator = (const ArmToArm64Assembler& rhs);
// -----------------------------------------------------------------------
// helper functions
@@ -189,7 +189,7 @@ private:
int Rd, int Rn, uint32_t Op2);
// -----------------------------------------------------------------------
- // Aarch64 instructions
+ // Arm64 instructions
// -----------------------------------------------------------------------
uint32_t A64_B_COND(uint32_t cc, uint32_t offset);
uint32_t A64_RET(uint32_t Rn);
@@ -287,4 +287,4 @@ private:
}; // namespace android
-#endif //ANDROID_AARCH64ASSEMBLER_H
+#endif //ANDROID_ARM64ASSEMBLER_H
diff --git a/libpixelflinger/codeflinger/Aarch64Disassembler.cpp b/libpixelflinger/codeflinger/Arm64Disassembler.cpp
index 4bb97b4..70f1ff1 100644
--- a/libpixelflinger/codeflinger/Aarch64Disassembler.cpp
+++ b/libpixelflinger/codeflinger/Arm64Disassembler.cpp
@@ -267,7 +267,7 @@ static void decode_token(uint32_t code, char *token, char *instr_part)
return;
}
-int aarch64_disassemble(uint32_t code, char* instr)
+int arm64_disassemble(uint32_t code, char* instr)
{
uint32_t i;
char token[256];
diff --git a/libpixelflinger/codeflinger/Aarch64Disassembler.h b/libpixelflinger/codeflinger/Arm64Disassembler.h
index 177d692..86f3aba 100644
--- a/libpixelflinger/codeflinger/Aarch64Disassembler.h
+++ b/libpixelflinger/codeflinger/Arm64Disassembler.h
@@ -26,10 +26,10 @@
* SUCH DAMAGE.
*/
-#ifndef ANDROID_AARCH64DISASSEMBLER_H
-#define ANDROID_AARCH64DISASSEMBLER_H
+#ifndef ANDROID_ARM64DISASSEMBLER_H
+#define ANDROID_ARM64DISASSEMBLER_H
#include <inttypes.h>
-int aarch64_disassemble(uint32_t code, char* instr);
+int arm64_disassemble(uint32_t code, char* instr);
-#endif //ANDROID_AARCH64ASSEMBLER_H
+#endif //ANDROID_ARM64ASSEMBLER_H
diff --git a/libpixelflinger/codeflinger/GGLAssembler.cpp b/libpixelflinger/codeflinger/GGLAssembler.cpp
index 7f088db..2422d7b 100644
--- a/libpixelflinger/codeflinger/GGLAssembler.cpp
+++ b/libpixelflinger/codeflinger/GGLAssembler.cpp
@@ -901,7 +901,7 @@ void GGLAssembler::build_and_immediate(int d, int s, uint32_t mask, int bits)
AND( AL, 0, d, s, imm(mask) );
return;
}
- else if (getCodegenArch() == CODEGEN_ARCH_AARCH64) {
+ else if (getCodegenArch() == CODEGEN_ARCH_ARM64) {
AND( AL, 0, d, s, imm(mask) );
return;
}