aboutsummaryrefslogtreecommitdiff
path: root/vm/compiler/codegen/arm/CodegenDriver.c
diff options
context:
space:
mode:
Diffstat (limited to 'vm/compiler/codegen/arm/CodegenDriver.c')
-rw-r--r--vm/compiler/codegen/arm/CodegenDriver.c611
1 files changed, 351 insertions, 260 deletions
diff --git a/vm/compiler/codegen/arm/CodegenDriver.c b/vm/compiler/codegen/arm/CodegenDriver.c
index 061ffb825..fb7e30e81 100644
--- a/vm/compiler/codegen/arm/CodegenDriver.c
+++ b/vm/compiler/codegen/arm/CodegenDriver.c
@@ -205,7 +205,7 @@ static bool genConversionPortable(CompilationUnit *cUnit, MIR *mir)
static void selfVerificationBranchInsert(LIR *currentLIR, ArmOpcode opcode,
int dest, int src1)
{
- ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
+ ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
insn->opcode = opcode;
insn->operands[0] = dest;
insn->operands[1] = src1;
@@ -904,22 +904,28 @@ static ArmLIR *genUnconditionalBranch(CompilationUnit *cUnit, ArmLIR *target)
/* Perform the actual operation for OP_RETURN_* */
static void genReturnCommon(CompilationUnit *cUnit, MIR *mir)
{
- genDispatchToHandler(cUnit, TEMPLATE_RETURN);
+ if (!cUnit->methodJitMode) {
+ genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+ TEMPLATE_RETURN_PROF :
+ TEMPLATE_RETURN);
#if defined(WITH_JIT_TUNING)
- gDvmJit.returnOp++;
+ gDvmJit.returnOp++;
#endif
- int dPC = (int) (cUnit->method->insns + mir->offset);
- /* Insert branch, but defer setting of target */
- ArmLIR *branch = genUnconditionalBranch(cUnit, NULL);
- /* Set up the place holder to reconstruct this Dalvik PC */
- ArmLIR *pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
- pcrLabel->opcode = kArmPseudoPCReconstructionCell;
- pcrLabel->operands[0] = dPC;
- pcrLabel->operands[1] = mir->offset;
- /* Insert the place holder to the growable list */
- dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
- /* Branch to the PC reconstruction code */
- branch->generic.target = (LIR *) pcrLabel;
+ int dPC = (int) (cUnit->method->insns + mir->offset);
+ /* Insert branch, but defer setting of target */
+ ArmLIR *branch = genUnconditionalBranch(cUnit, NULL);
+ /* Set up the place holder to reconstruct this Dalvik PC */
+ ArmLIR *pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
+ pcrLabel->opcode = kArmPseudoPCReconstructionCell;
+ pcrLabel->operands[0] = dPC;
+ pcrLabel->operands[1] = mir->offset;
+ /* Insert the place holder to the growable list */
+ dvmInsertGrowableList(&cUnit->pcReconstructionList,
+ (intptr_t) pcrLabel);
+ /* Branch to the PC reconstruction code */
+ branch->generic.target = (LIR *) pcrLabel;
+ }
+ /* TODO: Move result to InterpState for non-void returns */
}
static void genProcessArgsNoRange(CompilationUnit *cUnit, MIR *mir,
@@ -982,8 +988,11 @@ static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
/*
* Protect the loadMultiple instruction from being reordered with other
* Dalvik stack accesses.
+ *
+ * This code is also shared by the invoke jumbo instructions, and this
+ * does not need to be done if the invoke jumbo has no arguments.
*/
- loadMultiple(cUnit, r4PC, regMask);
+ if (numArgs != 0) loadMultiple(cUnit, r4PC, regMask);
opRegRegImm(cUnit, kOpSub, r7, rFP,
sizeof(StackSaveArea) + (numArgs << 2));
@@ -1024,7 +1033,7 @@ static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir,
}
/* Save the last batch of loaded values */
- storeMultiple(cUnit, r7, regMask);
+ if (numArgs != 0) storeMultiple(cUnit, r7, regMask);
/* Generate the loop epilogue - don't use r0 */
if ((numArgs > 4) && (numArgs % 4)) {
@@ -1079,14 +1088,18 @@ static void genInvokeSingletonCommon(CompilationUnit *cUnit, MIR *mir,
* r7 = calleeMethod->registersSize
*/
if (dvmIsNativeMethod(calleeMethod)) {
- genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NATIVE);
+ genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+ TEMPLATE_INVOKE_METHOD_NATIVE_PROF :
+ TEMPLATE_INVOKE_METHOD_NATIVE);
#if defined(WITH_JIT_TUNING)
gDvmJit.invokeNative++;
#endif
} else {
/* For Java callees, set up r2 to be calleeMethod->outsSize */
loadConstant(cUnit, r2, calleeMethod->outsSize);
- genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_CHAIN);
+ genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+ TEMPLATE_INVOKE_METHOD_CHAIN_PROF :
+ TEMPLATE_INVOKE_METHOD_CHAIN);
#if defined(WITH_JIT_TUNING)
gDvmJit.invokeMonomorphic++;
#endif
@@ -1145,7 +1158,9 @@ static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
ArmLIR *predictedChainingCell = opRegRegImm(cUnit, kOpAdd, r2, rpc, 0);
predictedChainingCell->generic.target = (LIR *) predChainingCell;
- genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
+ genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+ TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF :
+ TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
/* return through lr - jump to the chaining cell */
genUnconditionalBranch(cUnit, predChainingCell);
@@ -1156,12 +1171,13 @@ static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
*/
if (pcrLabel == NULL) {
int dPC = (int) (cUnit->method->insns + mir->offset);
- pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
+ pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
pcrLabel->opcode = kArmPseudoPCReconstructionCell;
pcrLabel->operands[0] = dPC;
pcrLabel->operands[1] = mir->offset;
/* Insert the place holder to the growable list */
- dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
+ dvmInsertGrowableList(&cUnit->pcReconstructionList,
+ (intptr_t) pcrLabel);
}
/* return through lr+2 - punt to the interpreter */
@@ -1207,7 +1223,9 @@ static void genInvokeVirtualCommon(CompilationUnit *cUnit, MIR *mir,
* r1 = &ChainingCell,
* r4PC = callsiteDPC,
*/
- genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT);
+ genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+ TEMPLATE_INVOKE_METHOD_NO_OPT_PROF :
+ TEMPLATE_INVOKE_METHOD_NO_OPT);
#if defined(WITH_JIT_TUNING)
gDvmJit.invokePolymorphic++;
#endif
@@ -1257,8 +1275,7 @@ static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir)
opReg(cUnit, kOpBlx, r2);
}
-#if defined(WITH_DEADLOCK_PREDICTION) || defined(WITH_MONITOR_TRACKING) || \
- defined(_ARMV5TE) || defined(_ARMV5TE_VFP)
+#if defined(_ARMV5TE) || defined(_ARMV5TE_VFP)
/*
* To prevent a thread in a monitor wait from blocking the Jit from
* resetting the code cache, heavyweight monitor lock will not
@@ -1283,11 +1300,7 @@ static void genMonitorPortable(CompilationUnit *cUnit, MIR *mir)
/* Get dPC of next insn */
loadConstant(cUnit, r4PC, (int)(cUnit->method->insns + mir->offset +
dexGetWidthFromOpcode(OP_MONITOR_ENTER)));
-#if defined(WITH_DEADLOCK_PREDICTION)
- genDispatchToHandler(cUnit, TEMPLATE_MONITOR_ENTER_DEBUG);
-#else
genDispatchToHandler(cUnit, TEMPLATE_MONITOR_ENTER);
-#endif
} else {
LOAD_FUNC_ADDR(cUnit, r2, (int)dvmUnlockObject);
/* Do the call */
@@ -1412,14 +1425,14 @@ static bool handleFmt21h(CompilationUnit *cUnit, MIR *mir)
return false;
}
-static bool handleFmt20bc(CompilationUnit *cUnit, MIR *mir)
+static bool handleFmt20bc_Fmt40sc(CompilationUnit *cUnit, MIR *mir)
{
- /* For OP_THROW_VERIFICATION_ERROR */
+ /* For OP_THROW_VERIFICATION_ERROR & OP_THROW_VERIFICATION_ERROR_JUMBO */
genInterpSingleStep(cUnit, mir);
return false;
}
-static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
+static bool handleFmt21c_Fmt31c_Fmt41c(CompilationUnit *cUnit, MIR *mir)
{
RegLocation rlResult;
RegLocation rlDest;
@@ -1442,7 +1455,8 @@ static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
storeValue(cUnit, rlDest, rlResult);
break;
}
- case OP_CONST_CLASS: {
+ case OP_CONST_CLASS:
+ case OP_CONST_CLASS_JUMBO: {
void *classPtr = (void*)
(cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
@@ -1457,14 +1471,20 @@ static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
storeValue(cUnit, rlDest, rlResult);
break;
}
+ case OP_SGET:
case OP_SGET_VOLATILE:
- case OP_SGET_OBJECT_VOLATILE:
+ case OP_SGET_JUMBO:
case OP_SGET_OBJECT:
+ case OP_SGET_OBJECT_VOLATILE:
+ case OP_SGET_OBJECT_JUMBO:
case OP_SGET_BOOLEAN:
+ case OP_SGET_BOOLEAN_JUMBO:
case OP_SGET_CHAR:
+ case OP_SGET_CHAR_JUMBO:
case OP_SGET_BYTE:
+ case OP_SGET_BYTE_JUMBO:
case OP_SGET_SHORT:
- case OP_SGET: {
+ case OP_SGET_SHORT_JUMBO: {
int valOffset = offsetof(StaticField, value);
int tReg = dvmCompilerAllocTemp(cUnit);
bool isVolatile;
@@ -1480,7 +1500,7 @@ static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
isVolatile = (mir->dalvikInsn.opcode == OP_SGET_VOLATILE) ||
(mir->dalvikInsn.opcode == OP_SGET_OBJECT_VOLATILE) ||
- dvmIsVolatileField(fieldPtr);
+ dvmIsVolatileField((Field *) fieldPtr);
rlDest = dvmCompilerGetDest(cUnit, mir, 0);
rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kAnyReg, true);
@@ -1496,7 +1516,8 @@ static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
storeValue(cUnit, rlDest, rlResult);
break;
}
- case OP_SGET_WIDE: {
+ case OP_SGET_WIDE:
+ case OP_SGET_WIDE_JUMBO: {
int valOffset = offsetof(StaticField, value);
const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
mir->meta.calleeMethod : cUnit->method;
@@ -1520,14 +1541,20 @@ static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
storeValueWide(cUnit, rlDest, rlResult);
break;
}
+ case OP_SPUT:
+ case OP_SPUT_VOLATILE:
+ case OP_SPUT_JUMBO:
case OP_SPUT_OBJECT:
case OP_SPUT_OBJECT_VOLATILE:
- case OP_SPUT_VOLATILE:
+ case OP_SPUT_OBJECT_JUMBO:
case OP_SPUT_BOOLEAN:
+ case OP_SPUT_BOOLEAN_JUMBO:
case OP_SPUT_CHAR:
+ case OP_SPUT_CHAR_JUMBO:
case OP_SPUT_BYTE:
+ case OP_SPUT_BYTE_JUMBO:
case OP_SPUT_SHORT:
- case OP_SPUT: {
+ case OP_SPUT_SHORT_JUMBO: {
int valOffset = offsetof(StaticField, value);
int tReg = dvmCompilerAllocTemp(cUnit);
int objHead;
@@ -1540,9 +1567,10 @@ static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
isVolatile = (mir->dalvikInsn.opcode == OP_SPUT_VOLATILE) ||
(mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE) ||
- dvmIsVolatileField(fieldPtr);
+ dvmIsVolatileField((Field *) fieldPtr);
isSputObject = (mir->dalvikInsn.opcode == OP_SPUT_OBJECT) ||
+ (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_JUMBO) ||
(mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE);
if (fieldPtr == NULL) {
@@ -1572,7 +1600,8 @@ static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
break;
}
- case OP_SPUT_WIDE: {
+ case OP_SPUT_WIDE:
+ case OP_SPUT_WIDE_JUMBO: {
int tReg = dvmCompilerAllocTemp(cUnit);
int valOffset = offsetof(StaticField, value);
const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
@@ -1594,12 +1623,13 @@ static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
HEAP_ACCESS_SHADOW(false);
break;
}
- case OP_NEW_INSTANCE: {
+ case OP_NEW_INSTANCE:
+ case OP_NEW_INSTANCE_JUMBO: {
/*
* Obey the calling convention and don't mess with the register
* usage.
*/
- ClassObject *classPtr = (void*)
+ ClassObject *classPtr = (ClassObject *)
(cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]);
if (classPtr == NULL) {
@@ -1637,7 +1667,8 @@ static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir)
storeValue(cUnit, rlDest, rlResult);
break;
}
- case OP_CHECK_CAST: {
+ case OP_CHECK_CAST:
+ case OP_CHECK_CAST_JUMBO: {
/*
* Obey the calling convention and don't mess with the register
* usage.
@@ -1774,11 +1805,7 @@ static bool handleFmt11x(CompilationUnit *cUnit, MIR *mir)
}
case OP_MONITOR_EXIT:
case OP_MONITOR_ENTER:
-#if defined(WITH_DEADLOCK_PREDICTION) || defined(WITH_MONITOR_TRACKING)
- genMonitorPortable(cUnit, mir);
-#else
genMonitor(cUnit, mir);
-#endif
break;
case OP_THROW: {
genInterpSingleStep(cUnit, mir);
@@ -2192,7 +2219,7 @@ static bool handleFmt22b_Fmt22s(CompilationUnit *cUnit, MIR *mir)
return false;
}
-static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
+static bool handleFmt22c_Fmt52c(CompilationUnit *cUnit, MIR *mir)
{
Opcode dalvikOpcode = mir->dalvikInsn.opcode;
int fieldOffset = -1;
@@ -2206,22 +2233,36 @@ static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
*/
case OP_IGET:
case OP_IGET_VOLATILE:
+ case OP_IGET_JUMBO:
case OP_IGET_WIDE:
+ case OP_IGET_WIDE_JUMBO:
case OP_IGET_OBJECT:
case OP_IGET_OBJECT_VOLATILE:
+ case OP_IGET_OBJECT_JUMBO:
case OP_IGET_BOOLEAN:
+ case OP_IGET_BOOLEAN_JUMBO:
case OP_IGET_BYTE:
+ case OP_IGET_BYTE_JUMBO:
case OP_IGET_CHAR:
+ case OP_IGET_CHAR_JUMBO:
case OP_IGET_SHORT:
+ case OP_IGET_SHORT_JUMBO:
case OP_IPUT:
case OP_IPUT_VOLATILE:
+ case OP_IPUT_JUMBO:
case OP_IPUT_WIDE:
+ case OP_IPUT_WIDE_JUMBO:
case OP_IPUT_OBJECT:
case OP_IPUT_OBJECT_VOLATILE:
+ case OP_IPUT_OBJECT_JUMBO:
case OP_IPUT_BOOLEAN:
+ case OP_IPUT_BOOLEAN_JUMBO:
case OP_IPUT_BYTE:
+ case OP_IPUT_BYTE_JUMBO:
case OP_IPUT_CHAR:
- case OP_IPUT_SHORT: {
+ case OP_IPUT_CHAR_JUMBO:
+ case OP_IPUT_SHORT:
+ case OP_IPUT_SHORT_JUMBO: {
const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
mir->meta.calleeMethod : cUnit->method;
Field *fieldPtr =
@@ -2240,7 +2281,8 @@ static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
}
switch (dalvikOpcode) {
- case OP_NEW_ARRAY: {
+ case OP_NEW_ARRAY:
+ case OP_NEW_ARRAY_JUMBO: {
// Generates a call - use explicit registers
RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
@@ -2283,7 +2325,8 @@ static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
storeValue(cUnit, rlDest, rlResult);
break;
}
- case OP_INSTANCE_OF: {
+ case OP_INSTANCE_OF:
+ case OP_INSTANCE_OF_JUMBO: {
// May generate a call - use explicit registers
RegLocation rlSrc = dvmCompilerGetSrc(cUnit, mir, 0);
RegLocation rlDest = dvmCompilerGetDest(cUnit, mir, 0);
@@ -2329,6 +2372,7 @@ static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
break;
}
case OP_IGET_WIDE:
+ case OP_IGET_WIDE_JUMBO:
genIGetWide(cUnit, mir, fieldOffset);
break;
case OP_IGET_VOLATILE:
@@ -2336,21 +2380,33 @@ static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
isVolatile = true;
// NOTE: intentional fallthrough
case OP_IGET:
+ case OP_IGET_JUMBO:
case OP_IGET_OBJECT:
+ case OP_IGET_OBJECT_JUMBO:
case OP_IGET_BOOLEAN:
+ case OP_IGET_BOOLEAN_JUMBO:
case OP_IGET_BYTE:
+ case OP_IGET_BYTE_JUMBO:
case OP_IGET_CHAR:
+ case OP_IGET_CHAR_JUMBO:
case OP_IGET_SHORT:
+ case OP_IGET_SHORT_JUMBO:
genIGet(cUnit, mir, kWord, fieldOffset, isVolatile);
break;
case OP_IPUT_WIDE:
+ case OP_IPUT_WIDE_JUMBO:
genIPutWide(cUnit, mir, fieldOffset);
break;
case OP_IPUT:
- case OP_IPUT_SHORT:
- case OP_IPUT_CHAR:
- case OP_IPUT_BYTE:
+ case OP_IPUT_JUMBO:
case OP_IPUT_BOOLEAN:
+ case OP_IPUT_BOOLEAN_JUMBO:
+ case OP_IPUT_BYTE:
+ case OP_IPUT_BYTE_JUMBO:
+ case OP_IPUT_CHAR:
+ case OP_IPUT_CHAR_JUMBO:
+ case OP_IPUT_SHORT:
+ case OP_IPUT_SHORT_JUMBO:
genIPut(cUnit, mir, kWord, fieldOffset, false, isVolatile);
break;
case OP_IPUT_VOLATILE:
@@ -2358,6 +2414,7 @@ static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir)
isVolatile = true;
// NOTE: intentional fallthrough
case OP_IPUT_OBJECT:
+ case OP_IPUT_OBJECT_JUMBO:
genIPut(cUnit, mir, kWord, fieldOffset, true, isVolatile);
break;
case OP_IGET_WIDE_VOLATILE:
@@ -2778,8 +2835,8 @@ static void genLandingPadForMispredictedCallee(CompilationUnit *cUnit, MIR *mir,
mir->meta.callsiteInfo->misPredBranchOver->target = (LIR *) target;
}
-static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
- ArmLIR *labelList)
+static bool handleFmt35c_3rc_5rc(CompilationUnit *cUnit, MIR *mir,
+ BasicBlock *bb, ArmLIR *labelList)
{
ArmLIR *retChainingCell = NULL;
ArmLIR *pcrLabel = NULL;
@@ -2799,7 +2856,8 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
* ]
*/
case OP_INVOKE_VIRTUAL:
- case OP_INVOKE_VIRTUAL_RANGE: {
+ case OP_INVOKE_VIRTUAL_RANGE:
+ case OP_INVOKE_VIRTUAL_JUMBO: {
ArmLIR *predChainingCell = &labelList[bb->taken->id];
int methodIndex =
cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]->
@@ -2830,7 +2888,8 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
* ->pResMethods[BBBB]->methodIndex]
*/
case OP_INVOKE_SUPER:
- case OP_INVOKE_SUPER_RANGE: {
+ case OP_INVOKE_SUPER_RANGE:
+ case OP_INVOKE_SUPER_JUMBO: {
/* Grab the method ptr directly from what the interpreter sees */
const Method *calleeMethod = mir->meta.callsiteInfo->method;
assert(calleeMethod == cUnit->method->clazz->super->vtable[
@@ -2851,7 +2910,8 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
}
/* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
case OP_INVOKE_DIRECT:
- case OP_INVOKE_DIRECT_RANGE: {
+ case OP_INVOKE_DIRECT_RANGE:
+ case OP_INVOKE_DIRECT_JUMBO: {
/* Grab the method ptr directly from what the interpreter sees */
const Method *calleeMethod = mir->meta.callsiteInfo->method;
assert(calleeMethod ==
@@ -2871,7 +2931,8 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
}
/* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */
case OP_INVOKE_STATIC:
- case OP_INVOKE_STATIC_RANGE: {
+ case OP_INVOKE_STATIC_RANGE:
+ case OP_INVOKE_STATIC_JUMBO: {
/* Grab the method ptr directly from what the interpreter sees */
const Method *calleeMethod = mir->meta.callsiteInfo->method;
assert(calleeMethod ==
@@ -2963,7 +3024,8 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
* 0x47357ebc : .word (0x425719dc)
*/
case OP_INVOKE_INTERFACE:
- case OP_INVOKE_INTERFACE_RANGE: {
+ case OP_INVOKE_INTERFACE_RANGE:
+ case OP_INVOKE_INTERFACE_JUMBO: {
ArmLIR *predChainingCell = &labelList[bb->taken->id];
/*
@@ -2996,7 +3058,9 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
opRegRegImm(cUnit, kOpAdd, r2, rpc, 0);
predictedChainingCell->generic.target = (LIR *) predChainingCell;
- genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
+ genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+ TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN_PROF :
+ TEMPLATE_INVOKE_METHOD_PREDICTED_CHAIN);
/* return through lr - jump to the chaining cell */
genUnconditionalBranch(cUnit, predChainingCell);
@@ -3007,12 +3071,13 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
*/
if (pcrLabel == NULL) {
int dPC = (int) (cUnit->method->insns + mir->offset);
- pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
+ pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
pcrLabel->opcode = kArmPseudoPCReconstructionCell;
pcrLabel->operands[0] = dPC;
pcrLabel->operands[1] = mir->offset;
/* Insert the place holder to the growable list */
- dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
+ dvmInsertGrowableList(&cUnit->pcReconstructionList,
+ (intptr_t) pcrLabel);
}
/* return through lr+2 - punt to the interpreter */
@@ -3098,7 +3163,9 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
* r1 = &ChainingCell,
* r4PC = callsiteDPC,
*/
- genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT);
+ genDispatchToHandler(cUnit, gDvmJit.methodTraceSupport ?
+ TEMPLATE_INVOKE_METHOD_NO_OPT_PROF :
+ TEMPLATE_INVOKE_METHOD_NO_OPT);
#if defined(WITH_JIT_TUNING)
gDvmJit.invokePolymorphic++;
#endif
@@ -3108,10 +3175,13 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
}
/* NOP */
case OP_INVOKE_DIRECT_EMPTY: {
- return false;
+ if (gDvmJit.methodTraceSupport)
+ genInterpSingleStep(cUnit, mir);
+ break;
}
case OP_FILLED_NEW_ARRAY:
- case OP_FILLED_NEW_ARRAY_RANGE: {
+ case OP_FILLED_NEW_ARRAY_RANGE:
+ case OP_FILLED_NEW_ARRAY_JUMBO: {
/* Just let the interpreter deal with these */
genInterpSingleStep(cUnit, mir);
break;
@@ -3122,6 +3192,25 @@ static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb,
return false;
}
+/* "this" pointer is already in r0 */
+static void genValidationForMethodCallee(CompilationUnit *cUnit, MIR *mir,
+ ArmLIR **classCheck)
+{
+ CallsiteInfo *callsiteInfo = mir->meta.callsiteInfo;
+ dvmCompilerLockAllTemps(cUnit);
+
+ loadConstant(cUnit, r1, (int) callsiteInfo->clazz);
+
+ loadWordDisp(cUnit, r0, offsetof(Object, clazz), r2);
+ /* Branch to the slow path if classes are not equal */
+ opRegReg(cUnit, kOpCmp, r1, r2);
+ /*
+ * Set the misPredBranchOver target so that it will be generated when the
+ * code for the non-optimized invoke is generated.
+ */
+ *classCheck = opCondBranch(cUnit, kArmCondNe);
+}
+
static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
BasicBlock *bb, ArmLIR *labelList)
{
@@ -3154,6 +3243,29 @@ static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
else
genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel);
+
+ if (mir->OptimizationFlags & MIR_INVOKE_METHOD_JIT) {
+ const Method *calleeMethod = mir->meta.callsiteInfo->method;
+ void *calleeAddr = dvmJitGetMethodAddr(calleeMethod->insns);
+ if (calleeAddr) {
+ ArmLIR *classCheck;
+ cUnit->printMe = true;
+ genValidationForMethodCallee(cUnit, mir, &classCheck);
+ newLIR2(cUnit, kThumbBl1, (int) calleeAddr,
+ (int) calleeAddr);
+ newLIR2(cUnit, kThumbBl2, (int) calleeAddr,
+ (int) calleeAddr);
+ genUnconditionalBranch(cUnit, retChainingCell);
+
+ /* Target of slow path */
+ ArmLIR *slowPathLabel = newLIR0(cUnit,
+ kArmPseudoTargetLabel);
+
+ slowPathLabel->defMask = ENCODE_ALL;
+ classCheck->generic.target = (LIR *) slowPathLabel;
+ }
+ }
+
genInvokeVirtualCommon(cUnit, mir, methodIndex,
retChainingCell,
predChainingCell,
@@ -3195,7 +3307,7 @@ static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir,
static bool genInlinedCompareTo(CompilationUnit *cUnit, MIR *mir)
{
#if defined(USE_GLOBAL_STRING_DEFS)
- return false;
+ return handleExecuteInlineC(cUnit, mir);
#else
ArmLIR *rollback;
RegLocation rlThis = dvmCompilerGetSrc(cUnit, mir, 0);
@@ -3214,14 +3326,14 @@ static bool genInlinedCompareTo(CompilationUnit *cUnit, MIR *mir)
genDispatchToHandler(cUnit, TEMPLATE_STRING_COMPARETO);
storeValue(cUnit, inlinedTarget(cUnit, mir, false),
dvmCompilerGetReturn(cUnit));
- return true;
+ return false;
#endif
}
static bool genInlinedFastIndexOf(CompilationUnit *cUnit, MIR *mir)
{
#if defined(USE_GLOBAL_STRING_DEFS)
- return false;
+ return handleExecuteInlineC(cUnit, mir);
#else
RegLocation rlThis = dvmCompilerGetSrc(cUnit, mir, 0);
RegLocation rlChar = dvmCompilerGetSrc(cUnit, mir, 1);
@@ -3235,7 +3347,7 @@ static bool genInlinedFastIndexOf(CompilationUnit *cUnit, MIR *mir)
genDispatchToHandler(cUnit, TEMPLATE_STRING_INDEXOF);
storeValue(cUnit, inlinedTarget(cUnit, mir, false),
dvmCompilerGetReturn(cUnit));
- return true;
+ return false;
#endif
}
@@ -3358,103 +3470,102 @@ static bool genInlinedLongDoubleConversion(CompilationUnit *cUnit, MIR *mir)
}
/*
+ * JITs a call to a C function.
+ * TODO: use this for faster native method invocation for simple native
+ * methods (http://b/3069458).
+ */
+static bool handleExecuteInlineC(CompilationUnit *cUnit, MIR *mir)
+{
+ DecodedInstruction *dInsn = &mir->dalvikInsn;
+ int operation = dInsn->vB;
+ unsigned int i;
+ const InlineOperation* inLineTable = dvmGetInlineOpsTable();
+ uintptr_t fn = (int) inLineTable[operation].func;
+ if (fn == 0) {
+ dvmCompilerAbort(cUnit);
+ }
+ dvmCompilerFlushAllRegs(cUnit); /* Everything to home location */
+ dvmCompilerClobberCallRegs(cUnit);
+ dvmCompilerClobber(cUnit, r4PC);
+ dvmCompilerClobber(cUnit, r7);
+ int offset = offsetof(InterpState, retval);
+ opRegRegImm(cUnit, kOpAdd, r4PC, rGLUE, offset);
+ opImm(cUnit, kOpPush, (1<<r4PC) | (1<<r7));
+ LOAD_FUNC_ADDR(cUnit, r4PC, fn);
+ genExportPC(cUnit, mir);
+ for (i=0; i < dInsn->vA; i++) {
+ loadValueDirect(cUnit, dvmCompilerGetSrc(cUnit, mir, i), i);
+ }
+ opReg(cUnit, kOpBlx, r4PC);
+ opRegImm(cUnit, kOpAdd, r13, 8);
+ /* NULL? */
+ ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
+ loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset));
+ genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
+ ArmLIR *target = newLIR0(cUnit, kArmPseudoTargetLabel);
+ target->defMask = ENCODE_ALL;
+ branchOver->generic.target = (LIR *) target;
+ return false;
+}
+
+/*
* NOTE: Handles both range and non-range versions (arguments
* have already been normalized by this point).
*/
static bool handleExecuteInline(CompilationUnit *cUnit, MIR *mir)
{
DecodedInstruction *dInsn = &mir->dalvikInsn;
- switch( mir->dalvikInsn.opcode) {
- case OP_EXECUTE_INLINE_RANGE:
- case OP_EXECUTE_INLINE: {
- unsigned int i;
- const InlineOperation* inLineTable = dvmGetInlineOpsTable();
- int offset = offsetof(InterpState, retval);
- int operation = dInsn->vB;
- switch (operation) {
- case INLINE_EMPTYINLINEMETHOD:
- return false; /* Nop */
- case INLINE_STRING_LENGTH:
- return genInlinedStringLength(cUnit, mir);
- case INLINE_STRING_IS_EMPTY:
- return genInlinedStringIsEmpty(cUnit, mir);
- case INLINE_MATH_ABS_INT:
- return genInlinedAbsInt(cUnit, mir);
- case INLINE_MATH_ABS_LONG:
- return genInlinedAbsLong(cUnit, mir);
- case INLINE_MATH_MIN_INT:
- return genInlinedMinMaxInt(cUnit, mir, true);
- case INLINE_MATH_MAX_INT:
- return genInlinedMinMaxInt(cUnit, mir, false);
- case INLINE_STRING_CHARAT:
- return genInlinedStringCharAt(cUnit, mir);
- case INLINE_MATH_SQRT:
- if (genInlineSqrt(cUnit, mir))
- return false;
- else
- break; /* Handle with C routine */
- case INLINE_MATH_ABS_FLOAT:
- if (genInlinedAbsFloat(cUnit, mir))
- return false;
- else
- break;
- case INLINE_MATH_ABS_DOUBLE:
- if (genInlinedAbsDouble(cUnit, mir))
- return false;
- else
- break;
- case INLINE_STRING_COMPARETO:
- if (genInlinedCompareTo(cUnit, mir))
- return false;
- else
- break;
- case INLINE_STRING_FASTINDEXOF_II:
- if (genInlinedFastIndexOf(cUnit, mir))
- return false;
- else
- break;
- case INLINE_FLOAT_TO_RAW_INT_BITS:
- case INLINE_INT_BITS_TO_FLOAT:
- return genInlinedIntFloatConversion(cUnit, mir);
- case INLINE_DOUBLE_TO_RAW_LONG_BITS:
- case INLINE_LONG_BITS_TO_DOUBLE:
- return genInlinedLongDoubleConversion(cUnit, mir);
- case INLINE_STRING_EQUALS:
- case INLINE_MATH_COS:
- case INLINE_MATH_SIN:
- case INLINE_FLOAT_TO_INT_BITS:
- case INLINE_DOUBLE_TO_LONG_BITS:
- break; /* Handle with C routine */
- default:
- dvmCompilerAbort(cUnit);
- }
- dvmCompilerFlushAllRegs(cUnit); /* Everything to home location */
- dvmCompilerClobberCallRegs(cUnit);
- dvmCompilerClobber(cUnit, r4PC);
- dvmCompilerClobber(cUnit, r7);
- opRegRegImm(cUnit, kOpAdd, r4PC, rGLUE, offset);
- opImm(cUnit, kOpPush, (1<<r4PC) | (1<<r7));
- LOAD_FUNC_ADDR(cUnit, r4PC, (int)inLineTable[operation].func);
- genExportPC(cUnit, mir);
- for (i=0; i < dInsn->vA; i++) {
- loadValueDirect(cUnit, dvmCompilerGetSrc(cUnit, mir, i), i);
- }
- opReg(cUnit, kOpBlx, r4PC);
- opRegImm(cUnit, kOpAdd, r13, 8);
- /* NULL? */
- ArmLIR *branchOver = genCmpImmBranch(cUnit, kArmCondNe, r0, 0);
- loadConstant(cUnit, r0,
- (int) (cUnit->method->insns + mir->offset));
- genDispatchToHandler(cUnit, TEMPLATE_THROW_EXCEPTION_COMMON);
- ArmLIR *target = newLIR0(cUnit, kArmPseudoTargetLabel);
- target->defMask = ENCODE_ALL;
- branchOver->generic.target = (LIR *) target;
- break;
- }
- default:
- return true;
+ assert(dInsn->opcode == OP_EXECUTE_INLINE_RANGE ||
+ dInsn->opcode == OP_EXECUTE_INLINE);
+ switch (dInsn->vB) {
+ case INLINE_EMPTYINLINEMETHOD:
+ return false; /* Nop */
+
+ /* These ones we potentially JIT inline. */
+ case INLINE_STRING_LENGTH:
+ return genInlinedStringLength(cUnit, mir);
+ case INLINE_STRING_IS_EMPTY:
+ return genInlinedStringIsEmpty(cUnit, mir);
+ case INLINE_MATH_ABS_INT:
+ return genInlinedAbsInt(cUnit, mir);
+ case INLINE_MATH_ABS_LONG:
+ return genInlinedAbsLong(cUnit, mir);
+ case INLINE_MATH_MIN_INT:
+ return genInlinedMinMaxInt(cUnit, mir, true);
+ case INLINE_MATH_MAX_INT:
+ return genInlinedMinMaxInt(cUnit, mir, false);
+ case INLINE_STRING_CHARAT:
+ return genInlinedStringCharAt(cUnit, mir);
+ case INLINE_MATH_SQRT:
+ return genInlineSqrt(cUnit, mir);
+ case INLINE_MATH_ABS_FLOAT:
+ return genInlinedAbsFloat(cUnit, mir);
+ case INLINE_MATH_ABS_DOUBLE:
+ return genInlinedAbsDouble(cUnit, mir);
+ case INLINE_STRING_COMPARETO:
+ return genInlinedCompareTo(cUnit, mir);
+ case INLINE_STRING_FASTINDEXOF_II:
+ return genInlinedFastIndexOf(cUnit, mir);
+ case INLINE_FLOAT_TO_RAW_INT_BITS:
+ case INLINE_INT_BITS_TO_FLOAT:
+ return genInlinedIntFloatConversion(cUnit, mir);
+ case INLINE_DOUBLE_TO_RAW_LONG_BITS:
+ case INLINE_LONG_BITS_TO_DOUBLE:
+ return genInlinedLongDoubleConversion(cUnit, mir);
+
+ /*
+ * These ones we just JIT a call to a C function for.
+ * TODO: special-case these in the other "invoke" call paths.
+ */
+ case INLINE_STRING_EQUALS:
+ case INLINE_MATH_COS:
+ case INLINE_MATH_SIN:
+ case INLINE_FLOAT_TO_INT_BITS:
+ case INLINE_DOUBLE_TO_LONG_BITS:
+ return handleExecuteInlineC(cUnit, mir);
}
- return false;
+ dvmCompilerAbort(cUnit);
+ return false; // Not reachable; keeps compiler happy.
}
static bool handleFmt51l(CompilationUnit *cUnit, MIR *mir)
@@ -3532,7 +3643,6 @@ static void handleHotChainingCell(CompilationUnit *cUnit,
addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
}
-#if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING)
/* Chaining cell for branches that branch back into the same basic block */
static void handleBackwardBranchChainingCell(CompilationUnit *cUnit,
unsigned int offset)
@@ -3554,7 +3664,6 @@ static void handleBackwardBranchChainingCell(CompilationUnit *cUnit,
addWordData(cUnit, (int) (cUnit->method->insns + offset), true);
}
-#endif
/* Chaining cell for monomorphic method invocations. */
static void handleInvokeSingletonChainingCell(CompilationUnit *cUnit,
const Method *callee)
@@ -3830,8 +3939,8 @@ static void genValidationForPredictedInline(CompilationUnit *cUnit, MIR *mir)
static void handleExtendedMIR(CompilationUnit *cUnit, MIR *mir)
{
int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
- char *msg = dvmCompilerNew(strlen(extendedMIROpNames[opOffset]) + 1,
- false);
+ char *msg = (char *)dvmCompilerNew(strlen(extendedMIROpNames[opOffset]) + 1,
+ false);
strcpy(msg, extendedMIROpNames[opOffset]);
newLIR1(cUnit, kArmPseudoExtended, (int) msg);
@@ -3878,25 +3987,25 @@ static void setupLoopEntryBlock(CompilationUnit *cUnit, BasicBlock *entry,
ArmLIR *bodyLabel)
{
/* Set up the place holder to reconstruct this Dalvik PC */
- ArmLIR *pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
+ ArmLIR *pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
pcrLabel->opcode = kArmPseudoPCReconstructionCell;
pcrLabel->operands[0] =
(int) (cUnit->method->insns + entry->startOffset);
pcrLabel->operands[1] = entry->startOffset;
/* Insert the place holder to the growable list */
- dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
+ dvmInsertGrowableList(&cUnit->pcReconstructionList, (intptr_t) pcrLabel);
/*
* Next, create two branches - one branch over to the loop body and the
* other branch to the PCR cell to punt.
*/
- ArmLIR *branchToBody = dvmCompilerNew(sizeof(ArmLIR), true);
+ ArmLIR *branchToBody = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
branchToBody->opcode = kThumbBUncond;
branchToBody->generic.target = (LIR *) bodyLabel;
setupResourceMasks(branchToBody);
cUnit->loopAnalysis->branchToBody = (LIR *) branchToBody;
- ArmLIR *branchToPCR = dvmCompilerNew(sizeof(ArmLIR), true);
+ ArmLIR *branchToPCR = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
branchToPCR->opcode = kThumbBUncond;
branchToPCR->generic.target = (LIR *) pcrLabel;
setupResourceMasks(branchToPCR);
@@ -3926,7 +4035,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
{
/* Used to hold the labels of each block */
ArmLIR *labelList =
- dvmCompilerNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
+ (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
GrowableList chainingListByType[kChainingCellGap];
int i;
@@ -3937,51 +4046,22 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
dvmInitGrowableList(&chainingListByType[i], 2);
}
- BasicBlock **blockList = cUnit->blockList;
+ GrowableListIterator iterator;
+ dvmGrowableListIteratorInit(&cUnit->blockList, &iterator);
- if (cUnit->executionCount) {
- /*
- * Reserve 6 bytes at the beginning of the trace
- * +----------------------------+
- * | execution count (4 bytes) |
- * +----------------------------+
- * | chain cell offset (2 bytes)|
- * +----------------------------+
- * ...and then code to increment the execution
- * count:
- * mov r0, pc @ move adr of "mov r0,pc" + 4 to r0
- * sub r0, #10 @ back up to addr of executionCount
- * ldr r1, [r0]
- * add r1, #1
- * str r1, [r0]
- */
- newLIR1(cUnit, kArm16BitData, 0);
- newLIR1(cUnit, kArm16BitData, 0);
- cUnit->chainCellOffsetLIR =
- (LIR *) newLIR1(cUnit, kArm16BitData, CHAIN_CELL_OFFSET_TAG);
- cUnit->headerSize = 6;
- /* Thumb instruction used directly here to ensure correct size */
- newLIR2(cUnit, kThumbMovRR_H2L, r0, rpc);
- newLIR2(cUnit, kThumbSubRI8, r0, 10);
- newLIR3(cUnit, kThumbLdrRRI5, r1, r0, 0);
- newLIR2(cUnit, kThumbAddRI8, r1, 1);
- newLIR3(cUnit, kThumbStrRRI5, r1, r0, 0);
- } else {
- /* Just reserve 2 bytes for the chain cell offset */
- cUnit->chainCellOffsetLIR =
- (LIR *) newLIR1(cUnit, kArm16BitData, CHAIN_CELL_OFFSET_TAG);
- cUnit->headerSize = 2;
- }
+ /* Traces start with a profiling entry point. Generate it here */
+ cUnit->profileCodeSize = genTraceProfileEntry(cUnit);
/* Handle the content in each basic block */
- for (i = 0; i < cUnit->numBlocks; i++) {
- blockList[i]->visited = true;
+ for (i = 0; ; i++) {
MIR *mir;
+ BasicBlock *bb = (BasicBlock *) dvmGrowableListIteratorNext(&iterator);
+ if (bb == NULL) break;
- labelList[i].operands[0] = blockList[i]->startOffset;
+ labelList[i].operands[0] = bb->startOffset;
- if (blockList[i]->blockType >= kChainingCellGap) {
- if (blockList[i]->isFallThroughFromInvoke == true) {
+ if (bb->blockType >= kChainingCellGap) {
+ if (bb->isFallThroughFromInvoke == true) {
/* Align this block first since it is a return chaining cell */
newLIR0(cUnit, kArmPseudoPseudoAlign4);
}
@@ -3992,56 +4072,53 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[i]);
}
- if (blockList[i]->blockType == kTraceEntryBlock) {
+ if (bb->blockType == kTraceEntryBlock) {
labelList[i].opcode = kArmPseudoEntryBlock;
- if (blockList[i]->firstMIRInsn == NULL) {
+ if (bb->firstMIRInsn == NULL) {
continue;
} else {
- setupLoopEntryBlock(cUnit, blockList[i],
- &labelList[blockList[i]->fallThrough->id]);
+ setupLoopEntryBlock(cUnit, bb,
+ &labelList[bb->fallThrough->id]);
}
- } else if (blockList[i]->blockType == kTraceExitBlock) {
+ } else if (bb->blockType == kTraceExitBlock) {
labelList[i].opcode = kArmPseudoExitBlock;
goto gen_fallthrough;
- } else if (blockList[i]->blockType == kDalvikByteCode) {
+ } else if (bb->blockType == kDalvikByteCode) {
labelList[i].opcode = kArmPseudoNormalBlockLabel;
/* Reset the register state */
dvmCompilerResetRegPool(cUnit);
dvmCompilerClobberAllRegs(cUnit);
dvmCompilerResetNullCheck(cUnit);
} else {
- switch (blockList[i]->blockType) {
+ switch (bb->blockType) {
case kChainingCellNormal:
labelList[i].opcode = kArmPseudoChainingCellNormal;
/* handle the codegen later */
dvmInsertGrowableList(
- &chainingListByType[kChainingCellNormal], (void *) i);
+ &chainingListByType[kChainingCellNormal], i);
break;
case kChainingCellInvokeSingleton:
labelList[i].opcode =
kArmPseudoChainingCellInvokeSingleton;
labelList[i].operands[0] =
- (int) blockList[i]->containingMethod;
+ (int) bb->containingMethod;
/* handle the codegen later */
dvmInsertGrowableList(
- &chainingListByType[kChainingCellInvokeSingleton],
- (void *) i);
+ &chainingListByType[kChainingCellInvokeSingleton], i);
break;
case kChainingCellInvokePredicted:
labelList[i].opcode =
kArmPseudoChainingCellInvokePredicted;
/* handle the codegen later */
dvmInsertGrowableList(
- &chainingListByType[kChainingCellInvokePredicted],
- (void *) i);
+ &chainingListByType[kChainingCellInvokePredicted], i);
break;
case kChainingCellHot:
labelList[i].opcode =
kArmPseudoChainingCellHot;
/* handle the codegen later */
dvmInsertGrowableList(
- &chainingListByType[kChainingCellHot],
- (void *) i);
+ &chainingListByType[kChainingCellHot], i);
break;
case kPCReconstruction:
/* Make sure exception handling block is next */
@@ -4059,16 +4136,14 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
opReg(cUnit, kOpBlx, r1);
}
break;
-#if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING)
case kChainingCellBackwardBranch:
labelList[i].opcode =
kArmPseudoChainingCellBackwardBranch;
/* handle the codegen later */
dvmInsertGrowableList(
&chainingListByType[kChainingCellBackwardBranch],
- (void *) i);
+ i);
break;
-#endif
default:
break;
}
@@ -4077,7 +4152,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
ArmLIR *headLIR = NULL;
- for (mir = blockList[i]->firstMIRInsn; mir; mir = mir->next) {
+ for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
dvmCompilerResetRegPool(cUnit);
if (gDvmJit.disableOpt & (1 << kTrackLiveTemps)) {
@@ -4154,7 +4229,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
case kFmt20t:
case kFmt30t:
notHandled = handleFmt10t_Fmt20t_Fmt30t(cUnit,
- mir, blockList[i], labelList);
+ mir, bb, labelList);
break;
case kFmt10x:
notHandled = handleFmt10x(cUnit, mir);
@@ -4170,11 +4245,13 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
notHandled = handleFmt12x(cUnit, mir);
break;
case kFmt20bc:
- notHandled = handleFmt20bc(cUnit, mir);
+ case kFmt40sc:
+ notHandled = handleFmt20bc_Fmt40sc(cUnit, mir);
break;
case kFmt21c:
case kFmt31c:
- notHandled = handleFmt21c_Fmt31c(cUnit, mir);
+ case kFmt41c:
+ notHandled = handleFmt21c_Fmt31c_Fmt41c(cUnit, mir);
break;
case kFmt21h:
notHandled = handleFmt21h(cUnit, mir);
@@ -4183,22 +4260,21 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
notHandled = handleFmt21s(cUnit, mir);
break;
case kFmt21t:
- notHandled = handleFmt21t(cUnit, mir, blockList[i],
- labelList);
+ notHandled = handleFmt21t(cUnit, mir, bb, labelList);
break;
case kFmt22b:
case kFmt22s:
notHandled = handleFmt22b_Fmt22s(cUnit, mir);
break;
case kFmt22c:
- notHandled = handleFmt22c(cUnit, mir);
+ case kFmt52c:
+ notHandled = handleFmt22c_Fmt52c(cUnit, mir);
break;
case kFmt22cs:
notHandled = handleFmt22cs(cUnit, mir);
break;
case kFmt22t:
- notHandled = handleFmt22t(cUnit, mir, blockList[i],
- labelList);
+ notHandled = handleFmt22t(cUnit, mir, bb, labelList);
break;
case kFmt22x:
case kFmt32x:
@@ -4212,12 +4288,13 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
break;
case kFmt3rc:
case kFmt35c:
- notHandled = handleFmt35c_3rc(cUnit, mir, blockList[i],
+ case kFmt5rc:
+ notHandled = handleFmt35c_3rc_5rc(cUnit, mir, bb,
labelList);
break;
case kFmt3rms:
case kFmt35ms:
- notHandled = handleFmt35ms_3rms(cUnit, mir,blockList[i],
+ notHandled = handleFmt35ms_3rms(cUnit, mir, bb,
labelList);
break;
case kFmt35mi:
@@ -4242,7 +4319,7 @@ void dvmCompilerMIR2LIR(CompilationUnit *cUnit)
}
}
- if (blockList[i]->blockType == kTraceEntryBlock) {
+ if (bb->blockType == kTraceEntryBlock) {
dvmCompilerAppendLIR(cUnit,
(LIR *) cUnit->loopAnalysis->branchToBody);
dvmCompilerAppendLIR(cUnit,
@@ -4263,9 +4340,9 @@ gen_fallthrough:
* Check if the block is terminated due to trace length constraint -
* insert an unconditional branch to the chaining cell.
*/
- if (blockList[i]->needFallThroughBranch) {
+ if (bb->needFallThroughBranch) {
genUnconditionalBranch(cUnit,
- &labelList[blockList[i]->fallThrough->id]);
+ &labelList[bb->fallThrough->id]);
}
}
@@ -4286,6 +4363,9 @@ gen_fallthrough:
for (j = 0; j < chainingListByType[i].numUsed; j++) {
int blockId = blockIdList[j];
+ BasicBlock *chainingBlock =
+ (BasicBlock *) dvmGrowableListGetElement(&cUnit->blockList,
+ blockId);
/* Align this chaining cell first */
newLIR0(cUnit, kArmPseudoPseudoAlign4);
@@ -4294,30 +4374,26 @@ gen_fallthrough:
dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[blockId]);
- switch (blockList[blockId]->blockType) {
+ switch (chainingBlock->blockType) {
case kChainingCellNormal:
- handleNormalChainingCell(cUnit,
- blockList[blockId]->startOffset);
+ handleNormalChainingCell(cUnit, chainingBlock->startOffset);
break;
case kChainingCellInvokeSingleton:
handleInvokeSingletonChainingCell(cUnit,
- blockList[blockId]->containingMethod);
+ chainingBlock->containingMethod);
break;
case kChainingCellInvokePredicted:
handleInvokePredictedChainingCell(cUnit);
break;
case kChainingCellHot:
- handleHotChainingCell(cUnit,
- blockList[blockId]->startOffset);
+ handleHotChainingCell(cUnit, chainingBlock->startOffset);
break;
-#if defined(WITH_SELF_VERIFICATION) || defined(WITH_JIT_TUNING)
case kChainingCellBackwardBranch:
handleBackwardBranchChainingCell(cUnit,
- blockList[blockId]->startOffset);
+ chainingBlock->startOffset);
break;
-#endif
default:
- LOGE("Bad blocktype %d", blockList[blockId]->blockType);
+ LOGE("Bad blocktype %d", chainingBlock->blockType);
dvmCompilerAbort(cUnit);
}
}
@@ -4349,10 +4425,15 @@ gen_fallthrough:
#endif
}
-/* Accept the work and start compiling */
+/*
+ * Accept the work and start compiling. Returns true if compilation
+ * is attempted.
+ */
bool dvmCompilerDoWork(CompilerWorkOrder *work)
{
- bool res;
+ JitTraceDescription *desc;
+ bool isCompile;
+ bool success = true;
if (gDvmJit.codeCacheFull) {
return false;
@@ -4360,25 +4441,35 @@ bool dvmCompilerDoWork(CompilerWorkOrder *work)
switch (work->kind) {
case kWorkOrderTrace:
+ isCompile = true;
/* Start compilation with maximally allowed trace length */
- res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
- work->bailPtr, 0 /* no hints */);
+ desc = (JitTraceDescription *)work->info;
+ success = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
+ work->bailPtr, 0 /* no hints */);
break;
case kWorkOrderTraceDebug: {
bool oldPrintMe = gDvmJit.printMe;
gDvmJit.printMe = true;
+ isCompile = true;
/* Start compilation with maximally allowed trace length */
- res = dvmCompileTrace(work->info, JIT_MAX_TRACE_LEN, &work->result,
- work->bailPtr, 0 /* no hints */);
+ desc = (JitTraceDescription *)work->info;
+ success = dvmCompileTrace(desc, JIT_MAX_TRACE_LEN, &work->result,
+ work->bailPtr, 0 /* no hints */);
gDvmJit.printMe = oldPrintMe;
break;
}
+ case kWorkOrderProfileMode:
+ dvmJitChangeProfileMode((TraceProfilingModes)work->info);
+ isCompile = false;
+ break;
default:
- res = false;
+ isCompile = false;
LOGE("Jit: unknown work order type");
assert(0); // Bail if debug build, discard otherwise
}
- return res;
+ if (!success)
+ work->result.codeAddress = NULL;
+ return isCompile;
}
/* Architectural-specific debugging helpers go here */