am 4800e20e: Merge "system/core: rename aarch64 target to arm64"

* commit '4800e20eafacabe76e56183aa82797ac5c4057c1':
  system/core: rename aarch64 target to arm64
This commit is contained in:
Colin Cross 2014-01-24 12:38:55 -08:00 committed by Android Git Automerger
commit 02f68c4b5f
24 changed files with 177 additions and 177 deletions

View file

@ -44,7 +44,7 @@ extern "C" {
#endif #endif
#if defined(__aarch64__) #if defined(__aarch64__)
#include <cutils/atomic-aarch64.h> #include <cutils/atomic-arm64.h>
#elif defined(__arm__) #elif defined(__arm__)
#include <cutils/atomic-arm.h> #include <cutils/atomic-arm.h>
#elif defined(__i386__) || defined(__x86_64__) #elif defined(__i386__) || defined(__x86_64__)

View file

@ -148,10 +148,10 @@ LOCAL_CFLAGS += \
-DGTEST_OS_LINUX_ANDROID \ -DGTEST_OS_LINUX_ANDROID \
-DGTEST_HAS_STD_STRING \ -DGTEST_HAS_STD_STRING \
ifeq ($(TARGET_ARCH),aarch64) ifeq ($(TARGET_ARCH),arm64)
$(info TODO: $(LOCAL_PATH)/Android.mk -fstack-protector not yet available for the AArch64 toolchain) $(info TODO: $(LOCAL_PATH)/Android.mk -fstack-protector not yet available for the AArch64 toolchain)
LOCAL_CFLAGS += -fno-stack-protector LOCAL_CFLAGS += -fno-stack-protector
endif # aarch64 endif # arm64
LOCAL_CONLYFLAGS += \ LOCAL_CONLYFLAGS += \
$(common_conlyflags) \ $(common_conlyflags) \

View file

@ -52,11 +52,11 @@ endif
LOCAL_SHARED_LIBRARIES := libcutils liblog LOCAL_SHARED_LIBRARIES := libcutils liblog
ifeq ($(TARGET_ARCH),aarch64) ifeq ($(TARGET_ARCH),arm64)
PIXELFLINGER_SRC_FILES += arch-aarch64/t32cb16blend.S PIXELFLINGER_SRC_FILES += arch-arm64/t32cb16blend.S
PIXELFLINGER_SRC_FILES += arch-aarch64/col32cb16blend.S PIXELFLINGER_SRC_FILES += arch-arm64/col32cb16blend.S
PIXELFLINGER_SRC_FILES += codeflinger/Aarch64Assembler.cpp PIXELFLINGER_SRC_FILES += codeflinger/Arm64Assembler.cpp
PIXELFLINGER_SRC_FILES += codeflinger/Aarch64Disassembler.cpp PIXELFLINGER_SRC_FILES += codeflinger/Arm64Disassembler.cpp
PIXELFLINGER_CFLAGS += -fstrict-aliasing -fomit-frame-pointer PIXELFLINGER_CFLAGS += -fstrict-aliasing -fomit-frame-pointer
endif endif

View file

@ -28,7 +28,7 @@
.text .text
.align .align
.global scanline_col32cb16blend_aarch64 .global scanline_col32cb16blend_arm64
// //
// This function alpha blends a fixed color into a destination scanline, using // This function alpha blends a fixed color into a destination scanline, using
@ -46,7 +46,7 @@
// w2 = count // w2 = count
scanline_col32cb16blend_aarch64: scanline_col32cb16blend_arm64:
lsr w5, w1, #24 // shift down alpha lsr w5, w1, #24 // shift down alpha
mov w9, #0xff // create mask mov w9, #0xff // create mask

View file

@ -28,7 +28,7 @@
.text .text
.align .align
.global scanline_t32cb16blend_aarch64 .global scanline_t32cb16blend_arm64
/* /*
* .macro pixel * .macro pixel
@ -155,7 +155,7 @@
// w12: scratch // w12: scratch
// w14: pixel // w14: pixel
scanline_t32cb16blend_aarch64: scanline_t32cb16blend_arm64:
// align DST to 32 bits // align DST to 32 bits
tst x0, #0x3 tst x0, #0x3

View file

@ -63,7 +63,7 @@ public:
}; };
enum { enum {
CODEGEN_ARCH_ARM = 1, CODEGEN_ARCH_MIPS, CODEGEN_ARCH_AARCH64 CODEGEN_ARCH_ARM = 1, CODEGEN_ARCH_MIPS, CODEGEN_ARCH_ARM64
}; };
// ----------------------------------------------------------------------- // -----------------------------------------------------------------------

View file

@ -26,7 +26,7 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#define LOG_TAG "ArmToAarch64Assembler" #define LOG_TAG "ArmToArm64Assembler"
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
@ -36,45 +36,45 @@
#include <cutils/properties.h> #include <cutils/properties.h>
#include <private/pixelflinger/ggl_context.h> #include <private/pixelflinger/ggl_context.h>
#include "codeflinger/Aarch64Assembler.h" #include "codeflinger/Arm64Assembler.h"
#include "codeflinger/CodeCache.h" #include "codeflinger/CodeCache.h"
#include "codeflinger/Aarch64Disassembler.h" #include "codeflinger/Arm64Disassembler.h"
/* /*
** -------------------------------------------- ** --------------------------------------------
** Support for Aarch64 in GGLAssembler JIT ** Support for Arm64 in GGLAssembler JIT
** -------------------------------------------- ** --------------------------------------------
** **
** Approach ** Approach
** - GGLAssembler and associated files are largely un-changed. ** - GGLAssembler and associated files are largely un-changed.
** - A translator class maps ArmAssemblerInterface calls to ** - A translator class maps ArmAssemblerInterface calls to
** generate AArch64 instructions. ** generate Arm64 instructions.
** **
** ---------------------- ** ----------------------
** ArmToAarch64Assembler ** ArmToArm64Assembler
** ---------------------- ** ----------------------
** **
** - Subclassed from ArmAssemblerInterface ** - Subclassed from ArmAssemblerInterface
** **
** - Translates each ArmAssemblerInterface call to generate ** - Translates each ArmAssemblerInterface call to generate
** one or more Aarch64 instructions as necessary. ** one or more Arm64 instructions as necessary.
** **
** - Does not implement ArmAssemblerInterface portions unused by GGLAssembler ** - Does not implement ArmAssemblerInterface portions unused by GGLAssembler
** It calls NOT_IMPLEMENTED() for such cases, which in turn logs ** It calls NOT_IMPLEMENTED() for such cases, which in turn logs
** a fatal message. ** a fatal message.
** **
** - Uses A64_.. series of functions to generate instruction machine code ** - Uses A64_.. series of functions to generate instruction machine code
** for Aarch64 instructions. These functions also log the instruction ** for Arm64 instructions. These functions also log the instruction
** to LOG, if AARCH64_ASM_DEBUG define is set to 1 ** to LOG, if ARM64_ASM_DEBUG define is set to 1
** **
** - Dumps machine code and eqvt assembly if "debug.pf.disasm" option is set ** - Dumps machine code and eqvt assembly if "debug.pf.disasm" option is set
** It uses aarch64_disassemble to perform disassembly ** It uses arm64_disassemble to perform disassembly
** **
** - Uses register 13 (SP in ARM), 15 (PC in ARM), 16, 17 for storing ** - Uses register 13 (SP in ARM), 15 (PC in ARM), 16, 17 for storing
** intermediate results. GGLAssembler does not use SP and PC as these ** intermediate results. GGLAssembler does not use SP and PC as these
** registers are marked as reserved. The temporary registers are not ** registers are marked as reserved. The temporary registers are not
** saved/restored on stack as these are caller-saved registers in Aarch64 ** saved/restored on stack as these are caller-saved registers in Arm64
** **
** - Uses CSEL instruction to support conditional execution. The result is ** - Uses CSEL instruction to support conditional execution. The result is
** stored in a temporary register and then copied to the target register ** stored in a temporary register and then copied to the target register
@ -89,10 +89,10 @@
** move immediate instructions followed by register-register instruction. ** move immediate instructions followed by register-register instruction.
** **
** -------------------------------------------- ** --------------------------------------------
** ArmToAarch64Assembler unit test bench ** ArmToArm64Assembler unit test bench
** -------------------------------------------- ** --------------------------------------------
** **
** - Tests ArmToAarch64Assembler interface for all the possible ** - Tests ArmToArm64Assembler interface for all the possible
** ways in which GGLAssembler uses ArmAssemblerInterface interface. ** ways in which GGLAssembler uses ArmAssemblerInterface interface.
** **
** - Uses test jacket (written in assembly) to set the registers, ** - Uses test jacket (written in assembly) to set the registers,
@ -105,10 +105,10 @@
** (ii) data transfer tests and (iii) LDM/STM tests. ** (ii) data transfer tests and (iii) LDM/STM tests.
** **
** ---------------------- ** ----------------------
** Aarch64 disassembler ** Arm64 disassembler
** ---------------------- ** ----------------------
** - This disassembler disassembles only those machine codes which can be ** - This disassembler disassembles only those machine codes which can be
** generated by ArmToAarch64Assembler. It has a unit testbench which ** generated by ArmToArm64Assembler. It has a unit testbench which
** tests all the instructions supported by the disassembler. ** tests all the instructions supported by the disassembler.
** **
** ------------------------------------------------------------------ ** ------------------------------------------------------------------
@ -122,13 +122,13 @@
** These are ADDR_LDR, ADDR_STR, ADDR_ADD, ADDR_SUB and they map to ** These are ADDR_LDR, ADDR_STR, ADDR_ADD, ADDR_SUB and they map to
** default 32 bit implementations in ARMAssemblerInterface. ** default 32 bit implementations in ARMAssemblerInterface.
** **
** - ArmToAarch64Assembler maps these functions to appropriate 64 bit ** - ArmToArm64Assembler maps these functions to appropriate 64 bit
** functions. ** functions.
** **
** ---------------------- ** ----------------------
** GGLAssembler changes ** GGLAssembler changes
** ---------------------- ** ----------------------
** - Since ArmToAarch64Assembler can generate 4 Aarch64 instructions for ** - Since ArmToArm64Assembler can generate 4 Arm64 instructions for
** each call in worst case, the memory required is set to 4 times ** each call in worst case, the memory required is set to 4 times
** ARM memory ** ARM memory
** **
@ -140,9 +140,9 @@
#define NOT_IMPLEMENTED() LOG_FATAL("Arm instruction %s not yet implemented\n", __func__) #define NOT_IMPLEMENTED() LOG_FATAL("Arm instruction %s not yet implemented\n", __func__)
#define AARCH64_ASM_DEBUG 0 #define ARM64_ASM_DEBUG 0
#if AARCH64_ASM_DEBUG #if ARM64_ASM_DEBUG
#define LOG_INSTR(...) ALOGD("\t" __VA_ARGS__) #define LOG_INSTR(...) ALOGD("\t" __VA_ARGS__)
#define LOG_LABEL(...) ALOGD(__VA_ARGS__) #define LOG_LABEL(...) ALOGD(__VA_ARGS__)
#else #else
@ -163,7 +163,7 @@ static const char *cc_codes[] =
"GE", "LT", "GT", "LE", "AL", "NV" "GE", "LT", "GT", "LE", "AL", "NV"
}; };
ArmToAarch64Assembler::ArmToAarch64Assembler(const sp<Assembly>& assembly) ArmToArm64Assembler::ArmToArm64Assembler(const sp<Assembly>& assembly)
: ARMAssemblerInterface(), : ARMAssemblerInterface(),
mAssembly(assembly) mAssembly(assembly)
{ {
@ -175,7 +175,7 @@ ArmToAarch64Assembler::ArmToAarch64Assembler(const sp<Assembly>& assembly)
mTmpReg3 = 17; mTmpReg3 = 17;
} }
ArmToAarch64Assembler::ArmToAarch64Assembler(void *base) ArmToArm64Assembler::ArmToArm64Assembler(void *base)
: ARMAssemblerInterface(), mAssembly(NULL) : ARMAssemblerInterface(), mAssembly(NULL)
{ {
mBase = mPC = (uint32_t *)base; mBase = mPC = (uint32_t *)base;
@ -187,21 +187,21 @@ ArmToAarch64Assembler::ArmToAarch64Assembler(void *base)
mTmpReg3 = 17; mTmpReg3 = 17;
} }
ArmToAarch64Assembler::~ArmToAarch64Assembler() ArmToArm64Assembler::~ArmToArm64Assembler()
{ {
} }
uint32_t* ArmToAarch64Assembler::pc() const uint32_t* ArmToArm64Assembler::pc() const
{ {
return mPC; return mPC;
} }
uint32_t* ArmToAarch64Assembler::base() const uint32_t* ArmToArm64Assembler::base() const
{ {
return mBase; return mBase;
} }
void ArmToAarch64Assembler::reset() void ArmToArm64Assembler::reset()
{ {
if(mAssembly == NULL) if(mAssembly == NULL)
mPC = mBase; mPC = mBase;
@ -211,19 +211,19 @@ void ArmToAarch64Assembler::reset()
mLabels.clear(); mLabels.clear();
mLabelsInverseMapping.clear(); mLabelsInverseMapping.clear();
mComments.clear(); mComments.clear();
#if AARCH64_ASM_DEBUG #if ARM64_ASM_DEBUG
ALOGI("RESET\n"); ALOGI("RESET\n");
#endif #endif
} }
int ArmToAarch64Assembler::getCodegenArch() int ArmToArm64Assembler::getCodegenArch()
{ {
return CODEGEN_ARCH_AARCH64; return CODEGEN_ARCH_ARM64;
} }
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::disassemble(const char* name) void ArmToArm64Assembler::disassemble(const char* name)
{ {
if(name) if(name)
{ {
@ -246,34 +246,34 @@ void ArmToAarch64Assembler::disassemble(const char* name)
printf("%p: %08x ", i, uint32_t(i[0])); printf("%p: %08x ", i, uint32_t(i[0]));
{ {
char instr[256]; char instr[256];
::aarch64_disassemble(*i, instr); ::arm64_disassemble(*i, instr);
printf("%s\n", instr); printf("%s\n", instr);
} }
i++; i++;
} }
} }
void ArmToAarch64Assembler::comment(const char* string) void ArmToArm64Assembler::comment(const char* string)
{ {
mComments.add(mPC, string); mComments.add(mPC, string);
LOG_INSTR("//%s\n", string); LOG_INSTR("//%s\n", string);
} }
void ArmToAarch64Assembler::label(const char* theLabel) void ArmToArm64Assembler::label(const char* theLabel)
{ {
mLabels.add(theLabel, mPC); mLabels.add(theLabel, mPC);
mLabelsInverseMapping.add(mPC, theLabel); mLabelsInverseMapping.add(mPC, theLabel);
LOG_LABEL("%s:\n", theLabel); LOG_LABEL("%s:\n", theLabel);
} }
void ArmToAarch64Assembler::B(int cc, const char* label) void ArmToArm64Assembler::B(int cc, const char* label)
{ {
mBranchTargets.add(branch_target_t(label, mPC)); mBranchTargets.add(branch_target_t(label, mPC));
LOG_INSTR("B%s %s\n", cc_codes[cc], label ); LOG_INSTR("B%s %s\n", cc_codes[cc], label );
*mPC++ = (0x54 << 24) | cc; *mPC++ = (0x54 << 24) | cc;
} }
void ArmToAarch64Assembler::BL(int cc, const char* label) void ArmToArm64Assembler::BL(int cc, const char* label)
{ {
NOT_IMPLEMENTED(); //Not Required NOT_IMPLEMENTED(); //Not Required
} }
@ -282,21 +282,21 @@ void ArmToAarch64Assembler::BL(int cc, const char* label)
//Prolog/Epilog & Generate... //Prolog/Epilog & Generate...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::prolog() void ArmToArm64Assembler::prolog()
{ {
// write prolog code // write prolog code
mPrologPC = mPC; mPrologPC = mPC;
*mPC++ = A64_MOVZ_X(mZeroReg,0,0); *mPC++ = A64_MOVZ_X(mZeroReg,0,0);
} }
void ArmToAarch64Assembler::epilog(uint32_t touched) void ArmToArm64Assembler::epilog(uint32_t touched)
{ {
// write epilog code // write epilog code
static const int XLR = 30; static const int XLR = 30;
*mPC++ = A64_RET(XLR); *mPC++ = A64_RET(XLR);
} }
int ArmToAarch64Assembler::generate(const char* name) int ArmToArm64Assembler::generate(const char* name)
{ {
// fixup all the branches // fixup all the branches
size_t count = mBranchTargets.size(); size_t count = mBranchTargets.size();
@ -329,7 +329,7 @@ int ArmToAarch64Assembler::generate(const char* name)
return NO_ERROR; return NO_ERROR;
} }
uint32_t* ArmToAarch64Assembler::pcForLabel(const char* label) uint32_t* ArmToArm64Assembler::pcForLabel(const char* label)
{ {
return mLabels.valueFor(label); return mLabels.valueFor(label);
} }
@ -337,7 +337,7 @@ uint32_t* ArmToAarch64Assembler::pcForLabel(const char* label)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Data Processing... // Data Processing...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::dataProcessingCommon(int opcode, void ArmToArm64Assembler::dataProcessingCommon(int opcode,
int s, int Rd, int Rn, uint32_t Op2) int s, int Rd, int Rn, uint32_t Op2)
{ {
if(opcode != opSUB && s == 1) if(opcode != opSUB && s == 1)
@ -405,7 +405,7 @@ void ArmToAarch64Assembler::dataProcessingCommon(int opcode,
} }
} }
void ArmToAarch64Assembler::dataProcessing(int opcode, int cc, void ArmToArm64Assembler::dataProcessing(int opcode, int cc,
int s, int Rd, int Rn, uint32_t Op2) int s, int Rd, int Rn, uint32_t Op2)
{ {
uint32_t Wd; uint32_t Wd;
@ -460,7 +460,7 @@ void ArmToAarch64Assembler::dataProcessing(int opcode, int cc,
// Address Processing... // Address Processing...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::ADDR_ADD(int cc, void ArmToArm64Assembler::ADDR_ADD(int cc,
int s, int Rd, int Rn, uint32_t Op2) int s, int Rd, int Rn, uint32_t Op2)
{ {
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@ -495,7 +495,7 @@ void ArmToAarch64Assembler::ADDR_ADD(int cc,
} }
} }
void ArmToAarch64Assembler::ADDR_SUB(int cc, void ArmToArm64Assembler::ADDR_SUB(int cc,
int s, int Rd, int Rn, uint32_t Op2) int s, int Rd, int Rn, uint32_t Op2)
{ {
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@ -516,7 +516,7 @@ void ArmToAarch64Assembler::ADDR_SUB(int cc,
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// multiply... // multiply...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::MLA(int cc, int s,int Rd, int Rm, int Rs, int Rn) void ArmToArm64Assembler::MLA(int cc, int s,int Rd, int Rm, int Rs, int Rn)
{ {
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@ -524,28 +524,28 @@ void ArmToAarch64Assembler::MLA(int cc, int s,int Rd, int Rm, int Rs, int Rn)
if(s == 1) if(s == 1)
dataProcessingCommon(opSUB, 1, mTmpReg1, Rd, mZeroReg); dataProcessingCommon(opSUB, 1, mTmpReg1, Rd, mZeroReg);
} }
void ArmToAarch64Assembler::MUL(int cc, int s, int Rd, int Rm, int Rs) void ArmToArm64Assembler::MUL(int cc, int s, int Rd, int Rm, int Rs)
{ {
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
if(s != 0) { NOT_IMPLEMENTED(); return;} //Not required if(s != 0) { NOT_IMPLEMENTED(); return;} //Not required
*mPC++ = A64_MADD_W(Rd, Rm, Rs, mZeroReg); *mPC++ = A64_MADD_W(Rd, Rm, Rs, mZeroReg);
} }
void ArmToAarch64Assembler::UMULL(int cc, int s, void ArmToArm64Assembler::UMULL(int cc, int s,
int RdLo, int RdHi, int Rm, int Rs) int RdLo, int RdHi, int Rm, int Rs)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::UMUAL(int cc, int s, void ArmToArm64Assembler::UMUAL(int cc, int s,
int RdLo, int RdHi, int Rm, int Rs) int RdLo, int RdHi, int Rm, int Rs)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::SMULL(int cc, int s, void ArmToArm64Assembler::SMULL(int cc, int s,
int RdLo, int RdHi, int Rm, int Rs) int RdLo, int RdHi, int Rm, int Rs)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::SMUAL(int cc, int s, void ArmToArm64Assembler::SMUAL(int cc, int s,
int RdLo, int RdHi, int Rm, int Rs) int RdLo, int RdHi, int Rm, int Rs)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
@ -554,15 +554,15 @@ void ArmToAarch64Assembler::SMUAL(int cc, int s,
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// branches relative to PC... // branches relative to PC...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::B(int cc, uint32_t* pc){ void ArmToArm64Assembler::B(int cc, uint32_t* pc){
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::BL(int cc, uint32_t* pc){ void ArmToArm64Assembler::BL(int cc, uint32_t* pc){
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::BX(int cc, int Rn){ void ArmToArm64Assembler::BX(int cc, int Rn){
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
@ -574,7 +574,7 @@ enum dataTransferOp
opLDR,opLDRB,opLDRH,opSTR,opSTRB,opSTRH opLDR,opLDRB,opLDRH,opSTR,opSTRB,opSTRH
}; };
void ArmToAarch64Assembler::dataTransfer(int op, int cc, void ArmToArm64Assembler::dataTransfer(int op, int cc,
int Rd, int Rn, uint32_t op_type, uint32_t size) int Rd, int Rn, uint32_t op_type, uint32_t size)
{ {
const int XSP = 31; const int XSP = 31;
@ -631,46 +631,46 @@ void ArmToAarch64Assembler::dataTransfer(int op, int cc,
return; return;
} }
void ArmToAarch64Assembler::ADDR_LDR(int cc, int Rd, int Rn, uint32_t op_type) void ArmToArm64Assembler::ADDR_LDR(int cc, int Rd, int Rn, uint32_t op_type)
{ {
return dataTransfer(opLDR, cc, Rd, Rn, op_type, 64); return dataTransfer(opLDR, cc, Rd, Rn, op_type, 64);
} }
void ArmToAarch64Assembler::ADDR_STR(int cc, int Rd, int Rn, uint32_t op_type) void ArmToArm64Assembler::ADDR_STR(int cc, int Rd, int Rn, uint32_t op_type)
{ {
return dataTransfer(opSTR, cc, Rd, Rn, op_type, 64); return dataTransfer(opSTR, cc, Rd, Rn, op_type, 64);
} }
void ArmToAarch64Assembler::LDR(int cc, int Rd, int Rn, uint32_t op_type) void ArmToArm64Assembler::LDR(int cc, int Rd, int Rn, uint32_t op_type)
{ {
return dataTransfer(opLDR, cc, Rd, Rn, op_type); return dataTransfer(opLDR, cc, Rd, Rn, op_type);
} }
void ArmToAarch64Assembler::LDRB(int cc, int Rd, int Rn, uint32_t op_type) void ArmToArm64Assembler::LDRB(int cc, int Rd, int Rn, uint32_t op_type)
{ {
return dataTransfer(opLDRB, cc, Rd, Rn, op_type); return dataTransfer(opLDRB, cc, Rd, Rn, op_type);
} }
void ArmToAarch64Assembler::STR(int cc, int Rd, int Rn, uint32_t op_type) void ArmToArm64Assembler::STR(int cc, int Rd, int Rn, uint32_t op_type)
{ {
return dataTransfer(opSTR, cc, Rd, Rn, op_type); return dataTransfer(opSTR, cc, Rd, Rn, op_type);
} }
void ArmToAarch64Assembler::STRB(int cc, int Rd, int Rn, uint32_t op_type) void ArmToArm64Assembler::STRB(int cc, int Rd, int Rn, uint32_t op_type)
{ {
return dataTransfer(opSTRB, cc, Rd, Rn, op_type); return dataTransfer(opSTRB, cc, Rd, Rn, op_type);
} }
void ArmToAarch64Assembler::LDRH(int cc, int Rd, int Rn, uint32_t op_type) void ArmToArm64Assembler::LDRH(int cc, int Rd, int Rn, uint32_t op_type)
{ {
return dataTransfer(opLDRH, cc, Rd, Rn, op_type); return dataTransfer(opLDRH, cc, Rd, Rn, op_type);
} }
void ArmToAarch64Assembler::LDRSB(int cc, int Rd, int Rn, uint32_t offset) void ArmToArm64Assembler::LDRSB(int cc, int Rd, int Rn, uint32_t offset)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::LDRSH(int cc, int Rd, int Rn, uint32_t offset) void ArmToArm64Assembler::LDRSH(int cc, int Rd, int Rn, uint32_t offset)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::STRH(int cc, int Rd, int Rn, uint32_t op_type) void ArmToArm64Assembler::STRH(int cc, int Rd, int Rn, uint32_t op_type)
{ {
return dataTransfer(opSTRH, cc, Rd, Rn, op_type); return dataTransfer(opSTRH, cc, Rd, Rn, op_type);
} }
@ -678,7 +678,7 @@ void ArmToAarch64Assembler::STRH(int cc, int Rd, int Rn, uint32_t op_type)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// block data transfer... // block data transfer...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::LDM(int cc, int dir, void ArmToArm64Assembler::LDM(int cc, int dir,
int Rn, int W, uint32_t reg_list) int Rn, int W, uint32_t reg_list)
{ {
const int XSP = 31; const int XSP = 31;
@ -699,7 +699,7 @@ void ArmToAarch64Assembler::LDM(int cc, int dir,
} }
} }
void ArmToAarch64Assembler::STM(int cc, int dir, void ArmToArm64Assembler::STM(int cc, int dir,
int Rn, int W, uint32_t reg_list) int Rn, int W, uint32_t reg_list)
{ {
const int XSP = 31; const int XSP = 31;
@ -723,15 +723,15 @@ void ArmToAarch64Assembler::STM(int cc, int dir,
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// special... // special...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::SWP(int cc, int Rn, int Rd, int Rm) void ArmToArm64Assembler::SWP(int cc, int Rn, int Rd, int Rm)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::SWPB(int cc, int Rn, int Rd, int Rm) void ArmToArm64Assembler::SWPB(int cc, int Rn, int Rd, int Rm)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::SWI(int cc, uint32_t comment) void ArmToArm64Assembler::SWI(int cc, uint32_t comment)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
@ -739,31 +739,31 @@ void ArmToAarch64Assembler::SWI(int cc, uint32_t comment)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// DSP instructions... // DSP instructions...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::PLD(int Rn, uint32_t offset) { void ArmToArm64Assembler::PLD(int Rn, uint32_t offset) {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::CLZ(int cc, int Rd, int Rm) void ArmToArm64Assembler::CLZ(int cc, int Rd, int Rm)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::QADD(int cc, int Rd, int Rm, int Rn) void ArmToArm64Assembler::QADD(int cc, int Rd, int Rm, int Rn)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::QDADD(int cc, int Rd, int Rm, int Rn) void ArmToArm64Assembler::QDADD(int cc, int Rd, int Rm, int Rn)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::QSUB(int cc, int Rd, int Rm, int Rn) void ArmToArm64Assembler::QSUB(int cc, int Rd, int Rm, int Rn)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
void ArmToAarch64Assembler::QDSUB(int cc, int Rd, int Rm, int Rn) void ArmToArm64Assembler::QDSUB(int cc, int Rd, int Rm, int Rn)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
} }
@ -771,7 +771,7 @@ void ArmToAarch64Assembler::QDSUB(int cc, int Rd, int Rm, int Rn)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// 16 x 16 multiplication // 16 x 16 multiplication
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::SMUL(int cc, int xy, void ArmToArm64Assembler::SMUL(int cc, int xy,
int Rd, int Rm, int Rs) int Rd, int Rm, int Rs)
{ {
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@ -791,7 +791,7 @@ void ArmToAarch64Assembler::SMUL(int cc, int xy,
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// 32 x 16 multiplication // 32 x 16 multiplication
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::SMULW(int cc, int y, int Rd, int Rm, int Rs) void ArmToArm64Assembler::SMULW(int cc, int y, int Rd, int Rm, int Rs)
{ {
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@ -807,7 +807,7 @@ void ArmToAarch64Assembler::SMULW(int cc, int y, int Rd, int Rm, int Rs)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// 16 x 16 multiplication and accumulate // 16 x 16 multiplication and accumulate
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::SMLA(int cc, int xy, int Rd, int Rm, int Rs, int Rn) void ArmToArm64Assembler::SMLA(int cc, int xy, int Rd, int Rm, int Rs, int Rn)
{ {
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
if(xy != xyBB) { NOT_IMPLEMENTED(); return;} //Not required if(xy != xyBB) { NOT_IMPLEMENTED(); return;} //Not required
@ -817,14 +817,14 @@ void ArmToAarch64Assembler::SMLA(int cc, int xy, int Rd, int Rm, int Rs, int Rn)
*mPC++ = A64_MADD_W(Rd, mTmpReg1, mTmpReg2, Rn); *mPC++ = A64_MADD_W(Rd, mTmpReg1, mTmpReg2, Rn);
} }
void ArmToAarch64Assembler::SMLAL(int cc, int xy, void ArmToArm64Assembler::SMLAL(int cc, int xy,
int RdHi, int RdLo, int Rs, int Rm) int RdHi, int RdLo, int Rs, int Rm)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
return; return;
} }
void ArmToAarch64Assembler::SMLAW(int cc, int y, void ArmToArm64Assembler::SMLAW(int cc, int y,
int Rd, int Rm, int Rs, int Rn) int Rd, int Rm, int Rs, int Rn)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
@ -834,7 +834,7 @@ void ArmToAarch64Assembler::SMLAW(int cc, int y,
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Byte/half word extract and extend // Byte/half word extract and extend
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate) void ArmToArm64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate)
{ {
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
@ -849,7 +849,7 @@ void ArmToAarch64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Bit manipulation // Bit manipulation
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
void ArmToAarch64Assembler::UBFX(int cc, int Rd, int Rn, int lsb, int width) void ArmToArm64Assembler::UBFX(int cc, int Rd, int Rn, int lsb, int width)
{ {
if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
*mPC++ = A64_UBFM_W(Rd, Rn, lsb, lsb + width - 1); *mPC++ = A64_UBFM_W(Rd, Rn, lsb, lsb + width - 1);
@ -857,7 +857,7 @@ void ArmToAarch64Assembler::UBFX(int cc, int Rd, int Rn, int lsb, int width)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Shifters... // Shifters...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
int ArmToAarch64Assembler::buildImmediate( int ArmToArm64Assembler::buildImmediate(
uint32_t immediate, uint32_t& rot, uint32_t& imm) uint32_t immediate, uint32_t& rot, uint32_t& imm)
{ {
rot = 0; rot = 0;
@ -866,13 +866,13 @@ int ArmToAarch64Assembler::buildImmediate(
} }
bool ArmToAarch64Assembler::isValidImmediate(uint32_t immediate) bool ArmToArm64Assembler::isValidImmediate(uint32_t immediate)
{ {
uint32_t rot, imm; uint32_t rot, imm;
return buildImmediate(immediate, rot, imm) == 0; return buildImmediate(immediate, rot, imm) == 0;
} }
uint32_t ArmToAarch64Assembler::imm(uint32_t immediate) uint32_t ArmToArm64Assembler::imm(uint32_t immediate)
{ {
mAddrMode.immediate = immediate; mAddrMode.immediate = immediate;
mAddrMode.writeback = false; mAddrMode.writeback = false;
@ -882,7 +882,7 @@ uint32_t ArmToAarch64Assembler::imm(uint32_t immediate)
} }
uint32_t ArmToAarch64Assembler::reg_imm(int Rm, int type, uint32_t shift) uint32_t ArmToArm64Assembler::reg_imm(int Rm, int type, uint32_t shift)
{ {
mAddrMode.reg_imm_Rm = Rm; mAddrMode.reg_imm_Rm = Rm;
mAddrMode.reg_imm_type = type; mAddrMode.reg_imm_type = type;
@ -890,13 +890,13 @@ uint32_t ArmToAarch64Assembler::reg_imm(int Rm, int type, uint32_t shift)
return OPERAND_REG_IMM; return OPERAND_REG_IMM;
} }
uint32_t ArmToAarch64Assembler::reg_rrx(int Rm) uint32_t ArmToArm64Assembler::reg_rrx(int Rm)
{ {
NOT_IMPLEMENTED(); NOT_IMPLEMENTED();
return OPERAND_UNSUPPORTED; return OPERAND_UNSUPPORTED;
} }
uint32_t ArmToAarch64Assembler::reg_reg(int Rm, int type, int Rs) uint32_t ArmToArm64Assembler::reg_reg(int Rm, int type, int Rs)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
return OPERAND_UNSUPPORTED; return OPERAND_UNSUPPORTED;
@ -904,7 +904,7 @@ uint32_t ArmToAarch64Assembler::reg_reg(int Rm, int type, int Rs)
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Addressing modes... // Addressing modes...
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
uint32_t ArmToAarch64Assembler::immed12_pre(int32_t immed12, int W) uint32_t ArmToArm64Assembler::immed12_pre(int32_t immed12, int W)
{ {
mAddrMode.immediate = immed12; mAddrMode.immediate = immed12;
mAddrMode.writeback = W; mAddrMode.writeback = W;
@ -913,7 +913,7 @@ uint32_t ArmToAarch64Assembler::immed12_pre(int32_t immed12, int W)
return OPERAND_IMM; return OPERAND_IMM;
} }
uint32_t ArmToAarch64Assembler::immed12_post(int32_t immed12) uint32_t ArmToArm64Assembler::immed12_post(int32_t immed12)
{ {
mAddrMode.immediate = immed12; mAddrMode.immediate = immed12;
mAddrMode.writeback = true; mAddrMode.writeback = true;
@ -922,7 +922,7 @@ uint32_t ArmToAarch64Assembler::immed12_post(int32_t immed12)
return OPERAND_IMM; return OPERAND_IMM;
} }
uint32_t ArmToAarch64Assembler::reg_scale_pre(int Rm, int type, uint32_t ArmToArm64Assembler::reg_scale_pre(int Rm, int type,
uint32_t shift, int W) uint32_t shift, int W)
{ {
if(type != 0 || shift != 0 || W != 0) if(type != 0 || shift != 0 || W != 0)
@ -937,13 +937,13 @@ uint32_t ArmToAarch64Assembler::reg_scale_pre(int Rm, int type,
} }
} }
uint32_t ArmToAarch64Assembler::reg_scale_post(int Rm, int type, uint32_t shift) uint32_t ArmToArm64Assembler::reg_scale_post(int Rm, int type, uint32_t shift)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
return OPERAND_UNSUPPORTED; return OPERAND_UNSUPPORTED;
} }
uint32_t ArmToAarch64Assembler::immed8_pre(int32_t immed8, int W) uint32_t ArmToArm64Assembler::immed8_pre(int32_t immed8, int W)
{ {
mAddrMode.immediate = immed8; mAddrMode.immediate = immed8;
mAddrMode.writeback = W; mAddrMode.writeback = W;
@ -952,7 +952,7 @@ uint32_t ArmToAarch64Assembler::immed8_pre(int32_t immed8, int W)
return OPERAND_IMM; return OPERAND_IMM;
} }
uint32_t ArmToAarch64Assembler::immed8_post(int32_t immed8) uint32_t ArmToArm64Assembler::immed8_post(int32_t immed8)
{ {
mAddrMode.immediate = immed8; mAddrMode.immediate = immed8;
mAddrMode.writeback = true; mAddrMode.writeback = true;
@ -961,7 +961,7 @@ uint32_t ArmToAarch64Assembler::immed8_post(int32_t immed8)
return OPERAND_IMM; return OPERAND_IMM;
} }
uint32_t ArmToAarch64Assembler::reg_pre(int Rm, int W) uint32_t ArmToArm64Assembler::reg_pre(int Rm, int W)
{ {
if(W != 0) if(W != 0)
{ {
@ -975,7 +975,7 @@ uint32_t ArmToAarch64Assembler::reg_pre(int Rm, int W)
} }
} }
uint32_t ArmToAarch64Assembler::reg_post(int Rm) uint32_t ArmToArm64Assembler::reg_post(int Rm)
{ {
NOT_IMPLEMENTED(); //Not required NOT_IMPLEMENTED(); //Not required
return OPERAND_UNSUPPORTED; return OPERAND_UNSUPPORTED;
@ -999,7 +999,7 @@ static const uint32_t dataTransferOpCode [] =
((0x38u << 24) | (0x1 << 21) | (0x6 << 13) | (0x1 << 12) |(0x1 << 11)), ((0x38u << 24) | (0x1 << 21) | (0x6 << 13) | (0x1 << 12) |(0x1 << 11)),
((0x78u << 24) | (0x1 << 21) | (0x6 << 13) | (0x0 << 12) |(0x1 << 11)) ((0x78u << 24) | (0x1 << 21) | (0x6 << 13) | (0x0 << 12) |(0x1 << 11))
}; };
uint32_t ArmToAarch64Assembler::A64_LDRSTR_Wm_SXTW_0(uint32_t op, uint32_t ArmToArm64Assembler::A64_LDRSTR_Wm_SXTW_0(uint32_t op,
uint32_t size, uint32_t Rt, uint32_t size, uint32_t Rt,
uint32_t Rn, uint32_t Rm) uint32_t Rn, uint32_t Rm)
{ {
@ -1017,7 +1017,7 @@ uint32_t ArmToAarch64Assembler::A64_LDRSTR_Wm_SXTW_0(uint32_t op,
} }
} }
uint32_t ArmToAarch64Assembler::A64_STR_IMM_PreIndex(uint32_t Rt, uint32_t ArmToArm64Assembler::A64_STR_IMM_PreIndex(uint32_t Rt,
uint32_t Rn, int32_t simm) uint32_t Rn, int32_t simm)
{ {
if(Rn == 31) if(Rn == 31)
@ -1029,7 +1029,7 @@ uint32_t ArmToAarch64Assembler::A64_STR_IMM_PreIndex(uint32_t Rt,
return (0xB8 << 24) | (imm9 << 12) | (0x3 << 10) | (Rn << 5) | Rt; return (0xB8 << 24) | (imm9 << 12) | (0x3 << 10) | (Rn << 5) | Rt;
} }
uint32_t ArmToAarch64Assembler::A64_LDR_IMM_PostIndex(uint32_t Rt, uint32_t ArmToArm64Assembler::A64_LDR_IMM_PostIndex(uint32_t Rt,
uint32_t Rn, int32_t simm) uint32_t Rn, int32_t simm)
{ {
if(Rn == 31) if(Rn == 31)
@ -1042,7 +1042,7 @@ uint32_t ArmToAarch64Assembler::A64_LDR_IMM_PostIndex(uint32_t Rt,
(imm9 << 12) | (0x1 << 10) | (Rn << 5) | Rt; (imm9 << 12) | (0x1 << 10) | (Rn << 5) | Rt;
} }
uint32_t ArmToAarch64Assembler::A64_ADD_X_Wm_SXTW(uint32_t Rd, uint32_t ArmToArm64Assembler::A64_ADD_X_Wm_SXTW(uint32_t Rd,
uint32_t Rn, uint32_t Rn,
uint32_t Rm, uint32_t Rm,
uint32_t amount) uint32_t amount)
@ -1053,7 +1053,7 @@ uint32_t ArmToAarch64Assembler::A64_ADD_X_Wm_SXTW(uint32_t Rd,
} }
uint32_t ArmToAarch64Assembler::A64_SUB_X_Wm_SXTW(uint32_t Rd, uint32_t ArmToArm64Assembler::A64_SUB_X_Wm_SXTW(uint32_t Rd,
uint32_t Rn, uint32_t Rn,
uint32_t Rm, uint32_t Rm,
uint32_t amount) uint32_t amount)
@ -1064,13 +1064,13 @@ uint32_t ArmToAarch64Assembler::A64_SUB_X_Wm_SXTW(uint32_t Rd,
} }
uint32_t ArmToAarch64Assembler::A64_B_COND(uint32_t cc, uint32_t offset) uint32_t ArmToArm64Assembler::A64_B_COND(uint32_t cc, uint32_t offset)
{ {
LOG_INSTR("B.%s #.+%d\n", cc_codes[cc], offset); LOG_INSTR("B.%s #.+%d\n", cc_codes[cc], offset);
return (0x54 << 24) | ((offset/4) << 5) | (cc); return (0x54 << 24) | ((offset/4) << 5) | (cc);
} }
uint32_t ArmToAarch64Assembler::A64_ADD_X(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_ADD_X(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift, uint32_t Rm, uint32_t shift,
uint32_t amount) uint32_t amount)
{ {
@ -1079,21 +1079,21 @@ uint32_t ArmToAarch64Assembler::A64_ADD_X(uint32_t Rd, uint32_t Rn,
return ((0x8B << 24) | (shift << 22) | ( Rm << 16) | return ((0x8B << 24) | (shift << 22) | ( Rm << 16) |
(amount << 10) |(Rn << 5) | Rd); (amount << 10) |(Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_ADD_IMM_X(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_ADD_IMM_X(uint32_t Rd, uint32_t Rn,
uint32_t imm, uint32_t shift) uint32_t imm, uint32_t shift)
{ {
LOG_INSTR("ADD X%d, X%d, #%d, LSL #%d\n", Rd, Rn, imm, shift); LOG_INSTR("ADD X%d, X%d, #%d, LSL #%d\n", Rd, Rn, imm, shift);
return (0x91 << 24) | ((shift/12) << 22) | (imm << 10) | (Rn << 5) | Rd; return (0x91 << 24) | ((shift/12) << 22) | (imm << 10) | (Rn << 5) | Rd;
} }
uint32_t ArmToAarch64Assembler::A64_SUB_IMM_X(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_SUB_IMM_X(uint32_t Rd, uint32_t Rn,
uint32_t imm, uint32_t shift) uint32_t imm, uint32_t shift)
{ {
LOG_INSTR("SUB X%d, X%d, #%d, LSL #%d\n", Rd, Rn, imm, shift); LOG_INSTR("SUB X%d, X%d, #%d, LSL #%d\n", Rd, Rn, imm, shift);
return (0xD1 << 24) | ((shift/12) << 22) | (imm << 10) | (Rn << 5) | Rd; return (0xD1 << 24) | ((shift/12) << 22) | (imm << 10) | (Rn << 5) | Rd;
} }
uint32_t ArmToAarch64Assembler::A64_ADD_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_ADD_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift, uint32_t Rm, uint32_t shift,
uint32_t amount) uint32_t amount)
{ {
@ -1103,7 +1103,7 @@ uint32_t ArmToAarch64Assembler::A64_ADD_W(uint32_t Rd, uint32_t Rn,
(amount << 10) |(Rn << 5) | Rd); (amount << 10) |(Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_SUB_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_SUB_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift, uint32_t Rm, uint32_t shift,
uint32_t amount, uint32_t amount,
uint32_t setflag) uint32_t setflag)
@ -1124,7 +1124,7 @@ uint32_t ArmToAarch64Assembler::A64_SUB_W(uint32_t Rd, uint32_t Rn,
} }
} }
uint32_t ArmToAarch64Assembler::A64_AND_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_AND_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift, uint32_t Rm, uint32_t shift,
uint32_t amount) uint32_t amount)
{ {
@ -1134,7 +1134,7 @@ uint32_t ArmToAarch64Assembler::A64_AND_W(uint32_t Rd, uint32_t Rn,
(amount << 10) |(Rn << 5) | Rd); (amount << 10) |(Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_ORR_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_ORR_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift, uint32_t Rm, uint32_t shift,
uint32_t amount) uint32_t amount)
{ {
@ -1144,7 +1144,7 @@ uint32_t ArmToAarch64Assembler::A64_ORR_W(uint32_t Rd, uint32_t Rn,
(amount << 10) |(Rn << 5) | Rd); (amount << 10) |(Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_ORN_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_ORN_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t shift, uint32_t Rm, uint32_t shift,
uint32_t amount) uint32_t amount)
{ {
@ -1154,76 +1154,76 @@ uint32_t ArmToAarch64Assembler::A64_ORN_W(uint32_t Rd, uint32_t Rn,
(amount << 10) |(Rn << 5) | Rd); (amount << 10) |(Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_CSEL_X(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_CSEL_X(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t cond) uint32_t Rm, uint32_t cond)
{ {
LOG_INSTR("CSEL X%d, X%d, X%d, %s\n", Rd, Rn, Rm, cc_codes[cond]); LOG_INSTR("CSEL X%d, X%d, X%d, %s\n", Rd, Rn, Rm, cc_codes[cond]);
return ((0x9A << 24)|(0x1 << 23)|(Rm << 16) |(cond << 12)| (Rn << 5) | Rd); return ((0x9A << 24)|(0x1 << 23)|(Rm << 16) |(cond << 12)| (Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_CSEL_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_CSEL_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t cond) uint32_t Rm, uint32_t cond)
{ {
LOG_INSTR("CSEL W%d, W%d, W%d, %s\n", Rd, Rn, Rm, cc_codes[cond]); LOG_INSTR("CSEL W%d, W%d, W%d, %s\n", Rd, Rn, Rm, cc_codes[cond]);
return ((0x1A << 24)|(0x1 << 23)|(Rm << 16) |(cond << 12)| (Rn << 5) | Rd); return ((0x1A << 24)|(0x1 << 23)|(Rm << 16) |(cond << 12)| (Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_RET(uint32_t Rn) uint32_t ArmToArm64Assembler::A64_RET(uint32_t Rn)
{ {
LOG_INSTR("RET X%d\n", Rn); LOG_INSTR("RET X%d\n", Rn);
return ((0xD6 << 24) | (0x1 << 22) | (0x1F << 16) | (Rn << 5)); return ((0xD6 << 24) | (0x1 << 22) | (0x1F << 16) | (Rn << 5));
} }
uint32_t ArmToAarch64Assembler::A64_MOVZ_X(uint32_t Rd, uint32_t imm, uint32_t ArmToArm64Assembler::A64_MOVZ_X(uint32_t Rd, uint32_t imm,
uint32_t shift) uint32_t shift)
{ {
LOG_INSTR("MOVZ X%d, #0x%x, LSL #%d\n", Rd, imm, shift); LOG_INSTR("MOVZ X%d, #0x%x, LSL #%d\n", Rd, imm, shift);
return(0xD2 << 24) | (0x1 << 23) | ((shift/16) << 21) | (imm << 5) | Rd; return(0xD2 << 24) | (0x1 << 23) | ((shift/16) << 21) | (imm << 5) | Rd;
} }
uint32_t ArmToAarch64Assembler::A64_MOVK_W(uint32_t Rd, uint32_t imm, uint32_t ArmToArm64Assembler::A64_MOVK_W(uint32_t Rd, uint32_t imm,
uint32_t shift) uint32_t shift)
{ {
LOG_INSTR("MOVK W%d, #0x%x, LSL #%d\n", Rd, imm, shift); LOG_INSTR("MOVK W%d, #0x%x, LSL #%d\n", Rd, imm, shift);
return (0x72 << 24) | (0x1 << 23) | ((shift/16) << 21) | (imm << 5) | Rd; return (0x72 << 24) | (0x1 << 23) | ((shift/16) << 21) | (imm << 5) | Rd;
} }
uint32_t ArmToAarch64Assembler::A64_MOVZ_W(uint32_t Rd, uint32_t imm, uint32_t ArmToArm64Assembler::A64_MOVZ_W(uint32_t Rd, uint32_t imm,
uint32_t shift) uint32_t shift)
{ {
LOG_INSTR("MOVZ W%d, #0x%x, LSL #%d\n", Rd, imm, shift); LOG_INSTR("MOVZ W%d, #0x%x, LSL #%d\n", Rd, imm, shift);
return(0x52 << 24) | (0x1 << 23) | ((shift/16) << 21) | (imm << 5) | Rd; return(0x52 << 24) | (0x1 << 23) | ((shift/16) << 21) | (imm << 5) | Rd;
} }
uint32_t ArmToAarch64Assembler::A64_SMADDL(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_SMADDL(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t Ra) uint32_t Rm, uint32_t Ra)
{ {
LOG_INSTR("SMADDL X%d, W%d, W%d, X%d\n",Rd, Rn, Rm, Ra); LOG_INSTR("SMADDL X%d, W%d, W%d, X%d\n",Rd, Rn, Rm, Ra);
return ((0x9B << 24) | (0x1 << 21) | (Rm << 16)|(Ra << 10)|(Rn << 5) | Rd); return ((0x9B << 24) | (0x1 << 21) | (Rm << 16)|(Ra << 10)|(Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_MADD_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_MADD_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t Ra) uint32_t Rm, uint32_t Ra)
{ {
LOG_INSTR("MADD W%d, W%d, W%d, W%d\n",Rd, Rn, Rm, Ra); LOG_INSTR("MADD W%d, W%d, W%d, W%d\n",Rd, Rn, Rm, Ra);
return ((0x1B << 24) | (Rm << 16) | (Ra << 10) |(Rn << 5) | Rd); return ((0x1B << 24) | (Rm << 16) | (Ra << 10) |(Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_SBFM_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_SBFM_W(uint32_t Rd, uint32_t Rn,
uint32_t immr, uint32_t imms) uint32_t immr, uint32_t imms)
{ {
LOG_INSTR("SBFM W%d, W%d, #%d, #%d\n", Rd, Rn, immr, imms); LOG_INSTR("SBFM W%d, W%d, #%d, #%d\n", Rd, Rn, immr, imms);
return ((0x13 << 24) | (immr << 16) | (imms << 10) | (Rn << 5) | Rd); return ((0x13 << 24) | (immr << 16) | (imms << 10) | (Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_UBFM_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_UBFM_W(uint32_t Rd, uint32_t Rn,
uint32_t immr, uint32_t imms) uint32_t immr, uint32_t imms)
{ {
LOG_INSTR("UBFM W%d, W%d, #%d, #%d\n", Rd, Rn, immr, imms); LOG_INSTR("UBFM W%d, W%d, #%d, #%d\n", Rd, Rn, immr, imms);
return ((0x53 << 24) | (immr << 16) | (imms << 10) | (Rn << 5) | Rd); return ((0x53 << 24) | (immr << 16) | (imms << 10) | (Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_UBFM_X(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_UBFM_X(uint32_t Rd, uint32_t Rn,
uint32_t immr, uint32_t imms) uint32_t immr, uint32_t imms)
{ {
LOG_INSTR("UBFM X%d, X%d, #%d, #%d\n", Rd, Rn, immr, imms); LOG_INSTR("UBFM X%d, X%d, #%d, #%d\n", Rd, Rn, immr, imms);
@ -1231,7 +1231,7 @@ uint32_t ArmToAarch64Assembler::A64_UBFM_X(uint32_t Rd, uint32_t Rn,
(immr << 16) | (imms << 10) | (Rn << 5) | Rd); (immr << 16) | (imms << 10) | (Rn << 5) | Rd);
} }
uint32_t ArmToAarch64Assembler::A64_EXTR_W(uint32_t Rd, uint32_t Rn, uint32_t ArmToArm64Assembler::A64_EXTR_W(uint32_t Rd, uint32_t Rn,
uint32_t Rm, uint32_t lsb) uint32_t Rm, uint32_t lsb)
{ {
LOG_INSTR("EXTR W%d, W%d, W%d, #%d\n", Rd, Rn, Rm, lsb); LOG_INSTR("EXTR W%d, W%d, W%d, #%d\n", Rd, Rn, Rm, lsb);

View file

@ -26,8 +26,8 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef ANDROID_ARMTOAARCH64ASSEMBLER_H #ifndef ANDROID_ARMTOARM64ASSEMBLER_H
#define ANDROID_ARMTOAARCH64ASSEMBLER_H #define ANDROID_ARMTOARM64ASSEMBLER_H
#include <stdint.h> #include <stdint.h>
#include <sys/types.h> #include <sys/types.h>
@ -44,12 +44,12 @@ namespace android {
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
class ArmToAarch64Assembler : public ARMAssemblerInterface class ArmToArm64Assembler : public ARMAssemblerInterface
{ {
public: public:
ArmToAarch64Assembler(const sp<Assembly>& assembly); ArmToArm64Assembler(const sp<Assembly>& assembly);
ArmToAarch64Assembler(void *base); ArmToArm64Assembler(void *base);
virtual ~ArmToAarch64Assembler(); virtual ~ArmToArm64Assembler();
uint32_t* base() const; uint32_t* base() const;
uint32_t* pc() const; uint32_t* pc() const;
@ -176,8 +176,8 @@ public:
virtual void UBFX(int cc, int Rd, int Rn, int lsb, int width); virtual void UBFX(int cc, int Rd, int Rn, int lsb, int width);
private: private:
ArmToAarch64Assembler(const ArmToAarch64Assembler& rhs); ArmToArm64Assembler(const ArmToArm64Assembler& rhs);
ArmToAarch64Assembler& operator = (const ArmToAarch64Assembler& rhs); ArmToArm64Assembler& operator = (const ArmToArm64Assembler& rhs);
// ----------------------------------------------------------------------- // -----------------------------------------------------------------------
// helper functions // helper functions
@ -189,7 +189,7 @@ private:
int Rd, int Rn, uint32_t Op2); int Rd, int Rn, uint32_t Op2);
// ----------------------------------------------------------------------- // -----------------------------------------------------------------------
// Aarch64 instructions // Arm64 instructions
// ----------------------------------------------------------------------- // -----------------------------------------------------------------------
uint32_t A64_B_COND(uint32_t cc, uint32_t offset); uint32_t A64_B_COND(uint32_t cc, uint32_t offset);
uint32_t A64_RET(uint32_t Rn); uint32_t A64_RET(uint32_t Rn);
@ -287,4 +287,4 @@ private:
}; // namespace android }; // namespace android
#endif //ANDROID_AARCH64ASSEMBLER_H #endif //ANDROID_ARM64ASSEMBLER_H

View file

@ -267,7 +267,7 @@ static void decode_token(uint32_t code, char *token, char *instr_part)
return; return;
} }
int aarch64_disassemble(uint32_t code, char* instr) int arm64_disassemble(uint32_t code, char* instr)
{ {
uint32_t i; uint32_t i;
char token[256]; char token[256];

View file

@ -26,10 +26,10 @@
* SUCH DAMAGE. * SUCH DAMAGE.
*/ */
#ifndef ANDROID_AARCH64DISASSEMBLER_H #ifndef ANDROID_ARM64DISASSEMBLER_H
#define ANDROID_AARCH64DISASSEMBLER_H #define ANDROID_ARM64DISASSEMBLER_H
#include <inttypes.h> #include <inttypes.h>
int aarch64_disassemble(uint32_t code, char* instr); int arm64_disassemble(uint32_t code, char* instr);
#endif //ANDROID_AARCH64ASSEMBLER_H #endif //ANDROID_ARM64ASSEMBLER_H

View file

@ -901,7 +901,7 @@ void GGLAssembler::build_and_immediate(int d, int s, uint32_t mask, int bits)
AND( AL, 0, d, s, imm(mask) ); AND( AL, 0, d, s, imm(mask) );
return; return;
} }
else if (getCodegenArch() == CODEGEN_ARCH_AARCH64) { else if (getCodegenArch() == CODEGEN_ARCH_ARM64) {
AND( AL, 0, d, s, imm(mask) ); AND( AL, 0, d, s, imm(mask) );
return; return;
} }

View file

@ -34,7 +34,7 @@
#if defined(__arm__) #if defined(__arm__)
#include "codeflinger/ARMAssembler.h" #include "codeflinger/ARMAssembler.h"
#elif defined(__aarch64__) #elif defined(__aarch64__)
#include "codeflinger/Aarch64Assembler.h" #include "codeflinger/Arm64Assembler.h"
#elif defined(__mips__) #elif defined(__mips__)
#include "codeflinger/MIPSAssembler.h" #include "codeflinger/MIPSAssembler.h"
#endif #endif
@ -128,8 +128,8 @@ extern "C" void scanline_t32cb16_arm(uint16_t *dst, uint32_t *src, size_t ct);
extern "C" void scanline_col32cb16blend_neon(uint16_t *dst, uint32_t *col, size_t ct); extern "C" void scanline_col32cb16blend_neon(uint16_t *dst, uint32_t *col, size_t ct);
extern "C" void scanline_col32cb16blend_arm(uint16_t *dst, uint32_t col, size_t ct); extern "C" void scanline_col32cb16blend_arm(uint16_t *dst, uint32_t col, size_t ct);
#elif defined(__aarch64__) #elif defined(__aarch64__)
extern "C" void scanline_t32cb16blend_aarch64(uint16_t*, uint32_t*, size_t); extern "C" void scanline_t32cb16blend_arm64(uint16_t*, uint32_t*, size_t);
extern "C" void scanline_col32cb16blend_aarch64(uint16_t *dst, uint32_t col, size_t ct); extern "C" void scanline_col32cb16blend_arm64(uint16_t *dst, uint32_t col, size_t ct);
#elif defined(__mips__) #elif defined(__mips__)
extern "C" void scanline_t32cb16blend_mips(uint16_t*, uint32_t*, size_t); extern "C" void scanline_t32cb16blend_mips(uint16_t*, uint32_t*, size_t);
#endif #endif
@ -405,7 +405,7 @@ static void pick_scanline(context_t* c)
#if defined(__mips__) #if defined(__mips__)
GGLAssembler assembler( new ArmToMipsAssembler(a) ); GGLAssembler assembler( new ArmToMipsAssembler(a) );
#elif defined(__aarch64__) #elif defined(__aarch64__)
GGLAssembler assembler( new ArmToAarch64Assembler(a) ); GGLAssembler assembler( new ArmToArm64Assembler(a) );
#endif #endif
// generate the scanline code for the given needs // generate the scanline code for the given needs
int err = assembler.scanline(c->state.needs, c); int err = assembler.scanline(c->state.needs, c);
@ -2098,7 +2098,7 @@ void scanline_col32cb16blend(context_t* c)
scanline_col32cb16blend_arm(dst, GGL_RGBA_TO_HOST(c->packed8888), ct); scanline_col32cb16blend_arm(dst, GGL_RGBA_TO_HOST(c->packed8888), ct);
#endif // defined(__ARM_HAVE_NEON) && BYTE_ORDER == LITTLE_ENDIAN #endif // defined(__ARM_HAVE_NEON) && BYTE_ORDER == LITTLE_ENDIAN
#elif ((ANDROID_CODEGEN >= ANDROID_CODEGEN_ASM) && defined(__aarch64__)) #elif ((ANDROID_CODEGEN >= ANDROID_CODEGEN_ASM) && defined(__aarch64__))
scanline_col32cb16blend_aarch64(dst, GGL_RGBA_TO_HOST(c->packed8888), ct); scanline_col32cb16blend_arm64(dst, GGL_RGBA_TO_HOST(c->packed8888), ct);
#else #else
uint32_t s = GGL_RGBA_TO_HOST(c->packed8888); uint32_t s = GGL_RGBA_TO_HOST(c->packed8888);
int sA = (s>>24); int sA = (s>>24);
@ -2186,7 +2186,7 @@ void scanline_t32cb16blend(context_t* c)
#ifdef __arm__ #ifdef __arm__
scanline_t32cb16blend_arm(dst, src, ct); scanline_t32cb16blend_arm(dst, src, ct);
#elif defined(__aarch64__) #elif defined(__aarch64__)
scanline_t32cb16blend_aarch64(dst, src, ct); scanline_t32cb16blend_arm64(dst, src, ct);
#elif defined(__mips__) #elif defined(__mips__)
scanline_t32cb16blend_mips(dst, src, ct); scanline_t32cb16blend_mips(dst, src, ct);
#endif #endif

View file

@ -1,3 +1,3 @@
ifeq ($(TARGET_ARCH),aarch64) ifeq ($(TARGET_ARCH),arm64)
include $(all-subdir-makefiles) include $(all-subdir-makefiles)
endif endif

View file

@ -2,7 +2,7 @@ LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS) include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \ LOCAL_SRC_FILES:= \
aarch64_assembler_test.cpp\ arm64_assembler_test.cpp\
asm_test_jacket.S asm_test_jacket.S
LOCAL_SHARED_LIBRARIES := \ LOCAL_SHARED_LIBRARIES := \
@ -12,7 +12,7 @@ LOCAL_SHARED_LIBRARIES := \
LOCAL_C_INCLUDES := \ LOCAL_C_INCLUDES := \
system/core/libpixelflinger system/core/libpixelflinger
LOCAL_MODULE:= test-pixelflinger-aarch64-assembler-test LOCAL_MODULE:= test-pixelflinger-arm64-assembler-test
LOCAL_MODULE_TAGS := tests LOCAL_MODULE_TAGS := tests

View file

@ -40,7 +40,7 @@
#include <inttypes.h> #include <inttypes.h>
#include "codeflinger/ARMAssemblerInterface.h" #include "codeflinger/ARMAssemblerInterface.h"
#include "codeflinger/Aarch64Assembler.h" #include "codeflinger/Arm64Assembler.h"
using namespace android; using namespace android;
#define TESTS_DATAOP_ENABLE 1 #define TESTS_DATAOP_ENABLE 1
@ -712,7 +712,7 @@ int main(void)
{ {
uint32_t i; uint32_t i;
/* Allocate memory to store instructions generated by ArmToAarch64Assembler */ /* Allocate memory to store instructions generated by ArmToArm64Assembler */
{ {
int fd = ashmem_create_region("code cache", instrMemSize); int fd = ashmem_create_region("code cache", instrMemSize);
if(fd < 0) if(fd < 0)
@ -723,7 +723,7 @@ int main(void)
MAP_PRIVATE, fd, 0); MAP_PRIVATE, fd, 0);
} }
ArmToAarch64Assembler a64asm(instrMem); ArmToArm64Assembler a64asm(instrMem);
if(TESTS_DATAOP_ENABLE) if(TESTS_DATAOP_ENABLE)
{ {

View file

@ -3,13 +3,13 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \ LOCAL_SRC_FILES:= \
col32cb16blend_test.c \ col32cb16blend_test.c \
../../../arch-aarch64/col32cb16blend.S ../../../arch-arm64/col32cb16blend.S
LOCAL_SHARED_LIBRARIES := LOCAL_SHARED_LIBRARIES :=
LOCAL_C_INCLUDES := LOCAL_C_INCLUDES :=
LOCAL_MODULE:= test-pixelflinger-aarch64-col32cb16blend LOCAL_MODULE:= test-pixelflinger-arm64-col32cb16blend
LOCAL_MODULE_TAGS := tests LOCAL_MODULE_TAGS := tests

View file

@ -60,7 +60,7 @@ struct test_t tests[] =
{"Count 10, Src=Rand, Dst=Rand", 0x12345678, 0x9ABC, 10} {"Count 10, Src=Rand, Dst=Rand", 0x12345678, 0x9ABC, 10}
}; };
void scanline_col32cb16blend_aarch64(uint16_t *dst, int32_t src, size_t count); void scanline_col32cb16blend_arm64(uint16_t *dst, int32_t src, size_t count);
void scanline_col32cb16blend_c(uint16_t * dst, int32_t src, size_t count) void scanline_col32cb16blend_c(uint16_t * dst, int32_t src, size_t count)
{ {
int srcAlpha = (src>>24); int srcAlpha = (src>>24);
@ -103,7 +103,7 @@ void scanline_col32cb16blend_test()
scanline_col32cb16blend_c(dst_c, test.src_color, test.count); scanline_col32cb16blend_c(dst_c, test.src_color, test.count);
scanline_col32cb16blend_aarch64(dst_asm, test.src_color, test.count); scanline_col32cb16blend_arm64(dst_asm, test.src_color, test.count);
if(memcmp(dst_c, dst_asm, sizeof(dst_c)) == 0) if(memcmp(dst_c, dst_asm, sizeof(dst_c)) == 0)

View file

@ -2,15 +2,15 @@ LOCAL_PATH:= $(call my-dir)
include $(CLEAR_VARS) include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \ LOCAL_SRC_FILES:= \
aarch64_diassembler_test.cpp \ arm64_diassembler_test.cpp \
../../../codeflinger/Aarch64Disassembler.cpp ../../../codeflinger/Arm64Disassembler.cpp
LOCAL_SHARED_LIBRARIES := LOCAL_SHARED_LIBRARIES :=
LOCAL_C_INCLUDES := \ LOCAL_C_INCLUDES := \
system/core/libpixelflinger/codeflinger system/core/libpixelflinger/codeflinger
LOCAL_MODULE:= test-pixelflinger-aarch64-disassembler-test LOCAL_MODULE:= test-pixelflinger-arm64-disassembler-test
LOCAL_MODULE_TAGS := tests LOCAL_MODULE_TAGS := tests

View file

@ -29,7 +29,7 @@
#include <inttypes.h> #include <inttypes.h>
#include <string.h> #include <string.h>
int aarch64_disassemble(uint32_t code, char* instr); int arm64_disassemble(uint32_t code, char* instr);
struct test_table_entry_t struct test_table_entry_t
{ {
@ -298,7 +298,7 @@ int main()
{ {
test_table_entry_t *test; test_table_entry_t *test;
test = &test_table[i]; test = &test_table[i];
aarch64_disassemble(test->code, instr); arm64_disassemble(test->code, instr);
if(strcmp(instr, test->instr) != 0) if(strcmp(instr, test->instr) != 0)
{ {
printf("Test Failed \n" printf("Test Failed \n"

View file

@ -3,13 +3,13 @@ include $(CLEAR_VARS)
LOCAL_SRC_FILES:= \ LOCAL_SRC_FILES:= \
t32cb16blend_test.c \ t32cb16blend_test.c \
../../../arch-aarch64/t32cb16blend.S ../../../arch-arm64/t32cb16blend.S
LOCAL_SHARED_LIBRARIES := LOCAL_SHARED_LIBRARIES :=
LOCAL_C_INCLUDES := LOCAL_C_INCLUDES :=
LOCAL_MODULE:= test-pixelflinger-aarch64-t32cb16blend LOCAL_MODULE:= test-pixelflinger-arm64-t32cb16blend
LOCAL_MODULE_TAGS := tests LOCAL_MODULE_TAGS := tests

View file

@ -61,7 +61,7 @@ struct test_t tests[] =
}; };
void scanline_t32cb16blend_aarch64(uint16_t*, uint32_t*, size_t); void scanline_t32cb16blend_arm64(uint16_t*, uint32_t*, size_t);
void scanline_t32cb16blend_c(uint16_t * dst, uint32_t* src, size_t count) void scanline_t32cb16blend_c(uint16_t * dst, uint32_t* src, size_t count)
{ {
while (count--) while (count--)
@ -112,7 +112,7 @@ void scanline_t32cb16blend_test()
} }
scanline_t32cb16blend_c(dst_c,src,test.count); scanline_t32cb16blend_c(dst_c,src,test.count);
scanline_t32cb16blend_aarch64(dst_asm,src,test.count); scanline_t32cb16blend_arm64(dst_asm,src,test.count);
if(memcmp(dst_c, dst_asm, sizeof(dst_c)) == 0) if(memcmp(dst_c, dst_asm, sizeof(dst_c)) == 0)

View file

@ -10,7 +10,7 @@
#include "codeflinger/GGLAssembler.h" #include "codeflinger/GGLAssembler.h"
#include "codeflinger/ARMAssembler.h" #include "codeflinger/ARMAssembler.h"
#include "codeflinger/MIPSAssembler.h" #include "codeflinger/MIPSAssembler.h"
#include "codeflinger/Aarch64Assembler.h" #include "codeflinger/Arm64Assembler.h"
#if defined(__arm__) || defined(__mips__) || defined(__aarch64__) #if defined(__arm__) || defined(__mips__) || defined(__aarch64__)
# define ANDROID_ARM_CODEGEN 1 # define ANDROID_ARM_CODEGEN 1
@ -57,7 +57,7 @@ static void ggl_test_codegen(uint32_t n, uint32_t p, uint32_t t0, uint32_t t1)
#endif #endif
#if defined(__aarch64__) #if defined(__aarch64__)
GGLAssembler assembler( new ArmToAarch64Assembler(a) ); GGLAssembler assembler( new ArmToArm64Assembler(a) );
#endif #endif
int err = assembler.scanline(needs, (context_t*)c); int err = assembler.scanline(needs, (context_t*)c);
@ -66,7 +66,7 @@ static void ggl_test_codegen(uint32_t n, uint32_t p, uint32_t t0, uint32_t t1)
} }
gglUninit(c); gglUninit(c);
#else #else
printf("This test runs only on ARM, Aarch64 or MIPS\n"); printf("This test runs only on ARM, Arm64 or MIPS\n");
#endif #endif
} }