From: <mic...@us...> - 2007-04-02 12:11:29
|
Revision: 19 http://svn.sourceforge.net/pearcolator/?rev=19&view=rev Author: michael_baer Date: 2007-04-02 05:11:23 -0700 (Mon, 02 Apr 2007) Log Message: ----------- Added support for building with the latest JRVM. Modified Paths: -------------- ext/DBT_Dummy.java ext/org/jikesrvm/classloader/VM_Method.java ext/org/jikesrvm/classloader/VM_NormalMethod.java ext/org/jikesrvm/compilers/opt/OPT_Simplifier.java ext/org/jikesrvm/compilers/opt/ir/OPT_BC2IR.java ext/org/jikesrvm/compilers/opt/ir/OPT_ConditionOperand.java ext/org/jikesrvm/compilers/opt/ir/OPT_ConvertBCtoHIR.java ext/org/jikesrvm/compilers/opt/ir/OPT_GenerationContext.java ext/org/jikesrvm/compilers/opt/ir/OPT_HIRGenerator.java ext/org/jikesrvm/compilers/opt/ir/OPT_HIRInfo.java ext/org/jikesrvm/compilers/opt/ir/ia32/OPT_IA32ConditionOperand.java rvmroot.patch src/org/binarytranslator/DBT_Options.java src/org/binarytranslator/Main.java src/org/binarytranslator/arch/arm/os/process/ARM_ProcessSpace.java src/org/binarytranslator/arch/arm/os/process/image/ARM_ImageProcessSpace.java src/org/binarytranslator/arch/ppc/decoder/PPC2IR.java src/org/binarytranslator/arch/ppc/decoder/PPC_InstructionDecoder.java src/org/binarytranslator/arch/ppc/os/abi/linux/PPC_LinuxSystemCalls.java src/org/binarytranslator/arch/ppc/os/process/PPC_ProcessSpace.java src/org/binarytranslator/arch/x86/decoder/X862IR.java src/org/binarytranslator/arch/x86/decoder/X86_DecodedOperand.java src/org/binarytranslator/arch/x86/decoder/X86_InstructionDecoder.java src/org/binarytranslator/arch/x86/os/process/X86_ProcessSpace.java src/org/binarytranslator/arch/x86/os/process/linux/X86_LinuxProcessSpace.java src/org/binarytranslator/generic/branch/ProcedureInformation.java src/org/binarytranslator/generic/decoder/DecoderUtils.java src/org/binarytranslator/generic/fault/BadInstructionException.java src/org/binarytranslator/generic/memory/ByteAddressedMemory.java src/org/binarytranslator/generic/memory/CallBasedMemory.java src/org/binarytranslator/generic/memory/IntAddressedPreSwappedMemory.java src/org/binarytranslator/generic/memory/Memory.java src/org/binarytranslator/generic/os/loader/Loader.java src/org/binarytranslator/generic/os/process/ProcessSpace.java src/org/binarytranslator/vmInterface/DBT_ConvertBinaryToHIR.java src/org/binarytranslator/vmInterface/DBT_OptimizingCompilerException.java src/org/binarytranslator/vmInterface/DBT_Trace.java src/org/binarytranslator/vmInterface/TranslationHelper.java Added Paths: ----------- ext/org/jikesrvm/compilers/ ext/org/jikesrvm/compilers/common/ ext/org/jikesrvm/compilers/common/VM_RuntimeCompiler.java ext/org/jikesrvm/compilers/opt/ Removed Paths: ------------- ext/org/jikesrvm/opt/ Modified: ext/DBT_Dummy.java =================================================================== --- ext/DBT_Dummy.java 2007-04-01 12:27:50 UTC (rev 18) +++ ext/DBT_Dummy.java 2007-04-02 12:11:23 UTC (rev 19) @@ -1,3 +1,4 @@ + /* * This file is part of binarytranslator.org. The binarytranslator.org * project is distributed under the Common Public License (CPL). @@ -12,16 +13,16 @@ * to find every class comprising the chnages to the opt compiler for DBT */ class OptDummy { - static org.jikesrvm.opt.ir.ia32.OPT_IA32ConditionOperand a; - static org.jikesrvm.opt.ir.OPT_HIRGenerator b; - static org.jikesrvm.opt.ir.OPT_GenerationContext c; - static org.jikesrvm.opt.ir.OPT_ConditionOperand d; - static org.jikesrvm.opt.ir.OPT_HIRInfo e; - static org.jikesrvm.opt.OPT_Simplifier f; + static org.jikesrvm.compilers.opt.ir.ia32.OPT_IA32ConditionOperand a; + static org.jikesrvm.compilers.opt.ir.OPT_HIRGenerator b; + static org.jikesrvm.compilers.opt.ir.OPT_GenerationContext c; + static org.jikesrvm.compilers.opt.ir.OPT_ConditionOperand d; + static org.jikesrvm.compilers.opt.ir.OPT_HIRInfo e; + static org.jikesrvm.compilers.opt.OPT_Simplifier f; static org.jikesrvm.ppc.PPC_Disassembler g; static org.jikesrvm.classloader.VM_Method j; static org.jikesrvm.classloader.VM_Member k; static org.jikesrvm.classloader.VM_NormalMethod l; - static org.jikesrvm.VM_RuntimeCompiler m; - static org.jikesrvm.opt.ir.OPT_ConvertBCtoHIR n; + static org.jikesrvm.compilers.common.VM_RuntimeCompiler m; + static org.jikesrvm.compilers.opt.ir.OPT_ConvertBCtoHIR n; } Modified: ext/org/jikesrvm/classloader/VM_Method.java =================================================================== --- ext/org/jikesrvm/classloader/VM_Method.java 2007-04-01 12:27:50 UTC (rev 18) +++ ext/org/jikesrvm/classloader/VM_Method.java 2007-04-02 12:11:23 UTC (rev 19) @@ -11,6 +11,8 @@ import org.jikesrvm.*; import org.jikesrvm.ArchitectureSpecific.VM_CodeArray; import org.jikesrvm.ArchitectureSpecific.VM_LazyCompilationTrampolineGenerator; +import org.jikesrvm.compilers.common.VM_CompiledMethod; +import org.jikesrvm.compilers.common.VM_CompiledMethods; import org.jikesrvm.runtime.VM_Statics; import org.jikesrvm.runtime.VM_Entrypoints; Modified: ext/org/jikesrvm/classloader/VM_NormalMethod.java =================================================================== --- ext/org/jikesrvm/classloader/VM_NormalMethod.java 2007-04-01 12:27:50 UTC (rev 18) +++ ext/org/jikesrvm/classloader/VM_NormalMethod.java 2007-04-02 12:11:23 UTC (rev 19) @@ -10,9 +10,12 @@ import org.jikesrvm.*; import org.vmmagic.pragma.*; -import org.jikesrvm.opt.ir.OPT_HIRGenerator; -import org.jikesrvm.opt.ir.OPT_BC2IR; -import org.jikesrvm.opt.ir.OPT_GenerationContext; +import org.jikesrvm.compilers.common.VM_BootImageCompiler; +import org.jikesrvm.compilers.common.VM_CompiledMethod; +import org.jikesrvm.compilers.common.VM_RuntimeCompiler; +import org.jikesrvm.compilers.opt.ir.OPT_BC2IR; +import org.jikesrvm.compilers.opt.ir.OPT_GenerationContext; +import org.jikesrvm.compilers.opt.ir.OPT_HIRGenerator; import org.jikesrvm.runtime.VM_DynamicLink; /** Added: ext/org/jikesrvm/compilers/common/VM_RuntimeCompiler.java =================================================================== --- ext/org/jikesrvm/compilers/common/VM_RuntimeCompiler.java (rev 0) +++ ext/org/jikesrvm/compilers/common/VM_RuntimeCompiler.java 2007-04-02 12:11:23 UTC (rev 19) @@ -0,0 +1,855 @@ +/* + * This file is part of Jikes RVM (http://jikesrvm.sourceforge.net). + * The Jikes RVM project is distributed under the Common Public License (CPL). + * A copy of the license is included in the distribution, and is also + * available at http://www.opensource.org/licenses/cpl1.0.php + * + * (C) Copyright IBM Corp. 2001, 2005 + */ +package org.jikesrvm.compilers.common; + +import org.jikesrvm.*; +import org.jikesrvm.classloader.*; +import org.jikesrvm.compilers.baseline.*; +import org.jikesrvm.compilers.opt.*; +import org.jikesrvm.adaptive.controller.VM_Controller; +import org.jikesrvm.adaptive.controller.VM_ControllerMemory; +import org.jikesrvm.adaptive.controller.VM_ControllerPlan; +import org.jikesrvm.adaptive.recompilation.VM_InvocationCounts; +import org.jikesrvm.adaptive.recompilation.VM_PreCompile; +import org.jikesrvm.adaptive.recompilation.instrumentation.VM_AOSInstrumentationPlan; +import org.jikesrvm.adaptive.util.*; +import org.jikesrvm.ArchitectureSpecific.VM_JNICompiler; +import org.jikesrvm.runtime.VM_Time; +import org.jikesrvm.scheduler.VM_Thread; + +/** + * Harness to select which compiler to dynamically compile a method in first + * invocation. + * + * A place to put code common to all runtime compilers. This includes + * instrumentation code to get equivalent data for each of the runtime + * compilers. + * <p> + * We collect the following data for each compiler + * <ol> + * <li> total number of methods complied by the compiler + * <li> total compilation time in milliseconds. + * <li> total number of bytes of bytecodes compiled by the compiler (under the + * assumption that there is no padding in the bytecode array and thus + * VM_Method.getBytecodes().length is the number bytes of bytecode for a method) + * <li> total number of machine code insructions generated by the compiler + * (under the assumption that there is no (excessive) padding in the machine + * code array and thus VM_CompiledMethod.numberOfInsturctions() is a close + * enough approximation of the number of machinecodes generated) + * </ol> + * Note that even if 3. & 4. are inflated due to padding, the numbers will still + * be an accurate measure of the space costs of the compile-only approach. + * + * @author Matthew Arnold + * @author Dave Grove + * @author Michael Hind + */ +public class VM_RuntimeCompiler implements VM_Constants, + VM_Callbacks.ExitMonitor { + + // Use these to encode the compiler for record() + public static final byte JNI_COMPILER = 0; + + public static final byte BASELINE_COMPILER = 1; + + public static final byte OPT_COMPILER = 2; + + // Data accumulators + private static final String[] name = { "JNI\t", "Base\t", "Opt\t" }; // Output + // names + + private static int[] totalMethods = { 0, 0, 0 }; + + private static double[] totalCompTime = { 0, 0, 0 }; + + private static int[] totalBCLength = { 0, 0, 0 }; + + private static int[] totalMCLength = { 0, 0, 0 }; + + // running sum of the natural logs of the rates, + // used for geometric mean, the product of rates is too big for doubles + // so we use the principle of logs to help us + // We compute e ** ((log a + log b + ... + log n) / n ) + private static double[] totalLogOfRates = { 0, 0, 0 }; + + // We can't record values until Math.log is loaded, so we miss the first few + private static int[] totalLogValueMethods = { 0, 0, 0 }; + + private static String[] earlyOptArgs = new String[0]; + + // is the opt compiler usable? + protected static boolean compilerEnabled; + + // is opt compiler currently in use? + // This flag is used to detect/avoid recursive opt compilation. + // (ie when opt compilation causes a method to be compiled). + // We also make all public entrypoints static synchronized methods + // because the opt compiler is not reentrant. + // When we actually fix defect 2912, we'll have to implement a different + // scheme that can distinguish between recursive opt compilation by the same + // thread (always bad) and parallel opt compilation (currently bad, future + // ok). + // NOTE: This code can be quite subtle, so please be absolutely sure + // you know what you're doing before modifying it!!! + protected static boolean compilationInProgress; + + // One time check to optionally preload and compile a specified class + protected static boolean preloadChecked = false; + + // Cache objects needed to cons up compilation plans + // TODO: cutting link to opt compiler by declaring type as object. + public static Object /* OPT_Options */options; + + public static Object /* OPT_OptimizationPlanElement[] */optimizationPlan; + + /** + * To be called when the VM is about to exit. + * + * @param value + * the exit value + */ + public void notifyExit(int value) { + report(false); + } + + /** + * This method records the time and sizes (bytecode and machine code) for a + * compilation. + * + * @param compiler + * the compiler used + * @param method + * the resulting VM_Method + * @param compiledMethod + * the resulting compiled method + */ + public static void record(byte compiler, VM_NormalMethod method, + VM_CompiledMethod compiledMethod) { + + recordCompilation(compiler, method.getBytecodeLength(), compiledMethod + .numberOfInstructions(), compiledMethod.getCompilationTime()); + + if (VM.BuildForAdaptiveSystem) { + if (VM_AOSLogging.booted()) { + VM_AOSLogging.recordUpdatedCompilationRates(compiler, method, method + .getBytecodeLength(), totalBCLength[compiler], compiledMethod + .numberOfInstructions(), totalMCLength[compiler], compiledMethod + .getCompilationTime(), totalCompTime[compiler], + totalLogOfRates[compiler], totalLogValueMethods[compiler], + totalMethods[compiler]); + } + } + } + + /** + * This method records the time and sizes (bytecode and machine code) for a + * compilation + * + * @param compiler + * the compiler used + * @param method + * the resulting VM_Method + * @param compiledMethod + * the resulting compiled method + */ + public static void record(byte compiler, VM_NativeMethod method, + VM_CompiledMethod compiledMethod) { + + recordCompilation(compiler, 0, // don't have any bytecode info, its native + compiledMethod.numberOfInstructions(), compiledMethod + .getCompilationTime()); + } + + /** + * This method does the actual recording + * + * @param compiler + * the compiler used + * @param BCLength + * the number of bytecodes in method source + * @param MCLength + * the length of the generated machine code + * @param compTime + * the compilation time in ms + */ + private static void recordCompilation(byte compiler, int BCLength, + int MCLength, double compTime) { + + totalMethods[compiler]++; + totalMCLength[compiler] += MCLength; + totalCompTime[compiler] += compTime; + + // Comp rate not useful for JNI compiler because there is no bytecode! + if (compiler != JNI_COMPILER) { + totalBCLength[compiler] += BCLength; + double rate = BCLength / compTime; + + // need to be fully booted before calling log + if (VM.fullyBooted) { + // we want the geometric mean, but the product of rates is too big + // for doubles, so we use the principle of logs to help us + // We compute e ** ((log a + log b + ... + log n) / n ) + totalLogOfRates[compiler] += Math.log(rate); + totalLogValueMethods[compiler]++; + } + } + } + + /** + * This method produces a summary report of compilation activities + * + * @param explain + * Explains the metrics used in the report + */ + public static void report(boolean explain) { + VM.sysWrite("\n\t\tCompilation Subsystem Report\n"); + VM.sysWrite("Comp\t#Meths\tTime\tbcb/ms\tmcb/bcb\tMCKB\tBCKB\n"); + for (int i = 0; i <= name.length - 1; i++) { + if (totalMethods[i] > 0) { + VM.sysWrite(name[i]); + // Number of methods + VM.sysWrite(totalMethods[i]); + VM.sysWrite("\t"); + // Compilation time + VM.sysWrite(totalCompTime[i]); + VM.sysWrite("\t"); + + if (i == JNI_COMPILER) { + VM.sysWrite("NA"); + } else { + // Bytecode bytes per millisecond, + // use unweighted geomean + VM + .sysWrite(Math.exp(totalLogOfRates[i] / totalLogValueMethods[i]), + 2); + } + VM.sysWrite("\t"); + // Ratio of machine code bytes to bytecode bytes + if (i != JNI_COMPILER) { + VM + .sysWrite( + (double) (totalMCLength[i] << ArchitectureSpecific.VM_RegisterConstants.LG_INSTRUCTION_WIDTH) + / (double) totalBCLength[i], 2); + } else { + VM.sysWrite("NA"); + } + VM.sysWrite("\t"); + // Generated machine code Kbytes + VM + .sysWrite( + (double) (totalMCLength[i] << ArchitectureSpecific.VM_RegisterConstants.LG_INSTRUCTION_WIDTH) / 1024, + 1); + VM.sysWrite("\t"); + // Compiled bytecode Kbytes + if (i != JNI_COMPILER) { + VM.sysWrite((double) totalBCLength[i] / 1024, 1); + } else { + VM.sysWrite("NA"); + } + VM.sysWrite("\n"); + } + } + if (explain) { + // Generate an explanation of the metrics reported + VM.sysWrite("\t\t\tExplanation of Metrics\n"); + VM + .sysWrite("#Meths:\t\tTotal number of methods compiled by the compiler\n"); + VM.sysWrite("Time:\t\tTotal compilation time in milliseconds\n"); + VM + .sysWrite("bcb/ms:\t\tNumber of bytecode bytes complied per millisecond\n"); + VM.sysWrite("mcb/bcb:\tRatio of machine code bytes to bytecode bytes\n"); + VM + .sysWrite("MCKB:\t\tTotal number of machine code bytes generated in kilobytes\n"); + VM + .sysWrite("BCKB:\t\tTotal number of bytecode bytes compiled in kilobytes\n"); + } + + VM_BaselineCompiler.generateBaselineCompilerSubsystemReport(explain); + + if (VM.BuildForAdaptiveSystem) { + // Get the opt's report + VM_TypeReference theTypeRef = VM_TypeReference + .findOrCreate( + VM_BootstrapClassLoader.getBootstrapClassLoader(), + VM_Atom + .findOrCreateAsciiAtom("Lorg/jikesrvm/opt/OPT_OptimizationPlanner;")); + VM_Type theType = theTypeRef.peekResolvedType(); + if (theType != null && theType.asClass().isInitialized()) { + OPT_OptimizationPlanner + .generateOptimizingCompilerSubsystemReport(explain); + } else { + VM + .sysWrite("\n\tNot generating Optimizing Compiler SubSystem Report because \n"); + VM.sysWrite("\tthe opt compiler was never invoked.\n\n"); + } + } + } + + /** + * Return the current estimate of basline-compiler rate, in bcb/msec + */ + public static double getBaselineRate() { + return Math.exp(totalLogOfRates[BASELINE_COMPILER] + / totalLogValueMethods[BASELINE_COMPILER]); + } + + /** + * This method will compile the passed method using the baseline compiler. + * + * @param method + * the method to compile + */ + public static VM_CompiledMethod baselineCompile(VM_NormalMethod method) { + VM_Callbacks.notifyMethodCompile(method, VM_CompiledMethod.BASELINE); + long start = 0; + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + start = VM_Thread.getCurrentThread().accumulateCycles(); + } + + VM_CompiledMethod cm = VM_BaselineCompiler.compile(method); + + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + long end = VM_Thread.getCurrentThread().accumulateCycles(); + double compileTime = VM_Time.cyclesToMillis(end - start); + cm.setCompilationTime(compileTime); + record(BASELINE_COMPILER, method, cm); + } + + return cm; + } + + /** + * Process command line argument destined for the opt compiler + */ + public static void processOptCommandLineArg(String prefix, String arg) { + if (VM.BuildForAdaptiveSystem) { + if (compilerEnabled) { + if (((OPT_Options) options).processAsOption(prefix, arg)) { + // update the optimization plan to reflect the new command line + // argument + optimizationPlan = OPT_OptimizationPlanner + .createOptimizationPlan((OPT_Options) options); + } else { + VM.sysWrite("Unrecognized opt compiler argument \"" + arg + "\""); + VM.sysExit(VM.EXIT_STATUS_BOGUS_COMMAND_LINE_ARG); + } + } else { + String[] tmp = new String[earlyOptArgs.length + 2]; + for (int i = 0; i < earlyOptArgs.length; i++) { + tmp[i] = earlyOptArgs[i]; + } + earlyOptArgs = tmp; + earlyOptArgs[earlyOptArgs.length - 2] = prefix; + earlyOptArgs[earlyOptArgs.length - 1] = arg; + } + } else { + if (VM.VerifyAssertions) + VM._assert(NOT_REACHED); + } + } + + /** + * attempt to compile the passed method with the OPT_Compiler. Don't handle + * OPT_OptimizingCompilerExceptions (leave it up to caller to decide what to + * do) Precondition: compilationInProgress "lock" has been acquired + * + * @param method + * the method to compile + * @param plan + * the plan to use for compiling the method + */ + private static VM_CompiledMethod optCompile(VM_NormalMethod method, + OPT_CompilationPlan plan) throws OPT_OptimizingCompilerException { + if (VM.BuildForOptCompiler) { + if (VM.VerifyAssertions) { + VM._assert(compilationInProgress, + "Failed to acquire compilationInProgress \"lock\""); + } + + VM_Callbacks.notifyMethodCompile(method, VM_CompiledMethod.JNI); + long start = 0; + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + start = VM_Thread.getCurrentThread().accumulateCycles(); + } + + VM_CompiledMethod cm = OPT_Compiler.compile(plan); + + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + long end = VM_Thread.getCurrentThread().accumulateCycles(); + double compileTime = VM_Time.cyclesToMillis(end - start); + cm.setCompilationTime(compileTime); + record(OPT_COMPILER, method, cm); + } + + return cm; + } else { + if (VM.VerifyAssertions) + VM._assert(false); + return null; + } + } + + // These methods are safe to invoke from VM_RuntimeCompiler.compile + + /** + * This method tries to compile the passed method with the OPT_Compiler, using + * the default compilation plan. If this fails we will use the quicker + * compiler (baseline for now) The following is carefully crafted to avoid + * (infinte) recursive opt compilation for all combinations of bootimages & + * lazy/eager compilation. Be absolutely sure you know what you're doing + * before changing it !!! + * + * @param method + * the method to compile + */ + public static synchronized VM_CompiledMethod optCompileWithFallBack( + VM_NormalMethod method) { + if (VM.BuildForOptCompiler) { + if (compilationInProgress) { + return fallback(method); + } else { + try { + compilationInProgress = true; + OPT_CompilationPlan plan = new OPT_CompilationPlan(method, + (OPT_OptimizationPlanElement[]) optimizationPlan, null, + (OPT_Options) options); + return optCompileWithFallBackInternal(method, plan); + } finally { + compilationInProgress = false; + } + } + } else { + if (VM.VerifyAssertions) + VM._assert(false); + return null; + } + } + + /** + * This method tries to compile the passed method with the OPT_Compiler with + * the passed compilation plan. If this fails we will use the quicker compiler + * (baseline for now) The following is carefully crafted to avoid (infinte) + * recursive opt compilation for all combinations of bootimages & lazy/eager + * compilation. Be absolutely sure you know what you're doing before changing + * it !!! + * + * @param method + * the method to compile + * @param plan + * the compilation plan to use for the compile + */ + public static synchronized VM_CompiledMethod optCompileWithFallBack( + VM_NormalMethod method, OPT_CompilationPlan plan) { + if (VM.BuildForOptCompiler) { + if (compilationInProgress) { + return fallback(method); + } else { + try { + compilationInProgress = true; + return optCompileWithFallBackInternal(method, plan); + } finally { + compilationInProgress = false; + } + } + } else { + if (VM.VerifyAssertions) + VM._assert(false); + return null; + } + } + + /** + * This real method that performs the opt compilation. + * + * @param method + * the method to compile + * @param plan + * the compilation plan to use + */ + private static VM_CompiledMethod optCompileWithFallBackInternal( + VM_NormalMethod method, OPT_CompilationPlan plan) { + if (VM.BuildForOptCompiler) { + if (method.hasNoOptCompileAnnotation()) + return fallback(method); + try { + return optCompile(method, plan); + } catch (OPT_OptimizingCompilerException e) { + String msg = "VM_RuntimeCompiler: can't optimize \"" + method + + "\" (error was: " + e + "): reverting to baseline compiler\n"; + if (e.isFatal && VM.ErrorsFatal) { + e.printStackTrace(); + VM.sysFail(msg); + } else { + boolean printMsg = true; + if (e instanceof OPT_MagicNotImplementedException) { + printMsg = !((OPT_MagicNotImplementedException) e).isExpected; + } + if (printMsg) + VM.sysWrite(msg); + } + return fallback(method); + } + } else { + if (VM.VerifyAssertions) + VM._assert(false); + return null; + } + } + + /* recompile the specialized method with OPT_Compiler. */ + public static VM_CompiledMethod recompileWithOptOnStackSpecialization( + OPT_CompilationPlan plan) { + if (VM.BuildForOptCompiler) { + if (VM.VerifyAssertions) { + VM._assert(plan.method.isForOsrSpecialization()); + } + if (compilationInProgress) { + return null; + } + + try { + compilationInProgress = true; + + // the compiler will check if isForOsrSpecialization of the method + VM_CompiledMethod cm = optCompile(plan.method, plan); + + // we donot replace the compiledMethod of original method, + // because it is temporary method + return cm; + } catch (OPT_OptimizingCompilerException e) { + e.printStackTrace(); + String msg = "Optimizing compiler " + + "(via recompileWithOptOnStackSpecialization): " + + "can't optimize \"" + plan.method + "\" (error was: " + e + ")\n"; + + if (e.isFatal && VM.ErrorsFatal) { + VM.sysFail(msg); + } else { + VM.sysWrite(msg); + } + return null; + } finally { + compilationInProgress = false; + } + } else { + if (VM.VerifyAssertions) + VM._assert(false); + return null; + } + } + + /** + * This method tries to compile the passed method with the OPT_Compiler. It + * will install the new compiled method in the VM, if sucessful. NOTE: the + * recompile method should never be invoked via VM_RuntimeCompiler.compile; it + * does not have sufficient guards against recursive recompilation. + * + * @param plan + * the compilation plan to use + * @return the CMID of the new method if successful, -1 if the recompilation + * failed. + * + */ + public static synchronized int recompileWithOpt(OPT_CompilationPlan plan) { + if (VM.BuildForOptCompiler) { + if (compilationInProgress) { + return -1; + } else { + try { + compilationInProgress = true; + VM_CompiledMethod cm = optCompile(plan.method, plan); + try { + plan.method.replaceCompiledMethod(cm); + } catch (Throwable e) { + String msg = "Failure in VM_Method.replaceCompiledMethod (via recompileWithOpt): while replacing \"" + + plan.method + "\" (error was: " + e + ")\n"; + if (VM.ErrorsFatal) { + e.printStackTrace(); + VM.sysFail(msg); + } else { + VM.sysWrite(msg); + } + return -1; + } + return cm.getId(); + } catch (OPT_OptimizingCompilerException e) { + String msg = "Optimizing compiler (via recompileWithOpt): can't optimize \"" + + plan.method + "\" (error was: " + e + ")\n"; + if (e.isFatal && VM.ErrorsFatal) { + e.printStackTrace(); + VM.sysFail(msg); + } else { + // VM.sysWrite(msg); + } + return -1; + } finally { + compilationInProgress = false; + } + } + } else { + if (VM.VerifyAssertions) + VM._assert(false); + return -1; + } + } + + /** + * A wrapper method for those callers who don't want to make optimization + * plans + * + * @param method + * the method to recompile + */ + public static int recompileWithOpt(VM_NormalMethod method) { + if (VM.BuildForOptCompiler) { + OPT_CompilationPlan plan = new OPT_CompilationPlan(method, + (OPT_OptimizationPlanElement[]) optimizationPlan, null, + (OPT_Options) options); + return recompileWithOpt(plan); + } else { + if (VM.VerifyAssertions) + VM._assert(false); + return -1; + } + } + + /** + * This method uses the default compiler (baseline) to compile a method It is + * typically called when a more aggressive compilation fails. This method is + * safe to invoke from VM_RuntimeCompiler.compile + */ + protected static VM_CompiledMethod fallback(VM_NormalMethod method) { + // call the inherited method "baselineCompile" + return baselineCompile(method); + } + + public static void boot() { + if (VM.MeasureCompilation) { + VM_Callbacks.addExitMonitor(new VM_RuntimeCompiler()); + } + if (VM.BuildForAdaptiveSystem) { + options = new OPT_Options(); + optimizationPlan = OPT_OptimizationPlanner + .createOptimizationPlan((OPT_Options) options); + if (VM.MeasureCompilation) { + OPT_OptimizationPlanner.initializeMeasureCompilation(); + } + + OPT_Compiler.init((OPT_Options) options); + + VM_PreCompile.init(); + // when we reach here the OPT compiler is enabled. + compilerEnabled = true; + + for (int i = 0; i < earlyOptArgs.length; i += 2) { + processOptCommandLineArg(earlyOptArgs[i], earlyOptArgs[i + 1]); + } + } + } + + public static void processCommandLineArg(String prefix, String arg) { + if (VM.BuildForAdaptiveSystem) { + if (VM_Controller.options != null && VM_Controller.options.optIRC()) { + processOptCommandLineArg(prefix, arg); + } else { + VM_BaselineCompiler.processCommandLineArg(prefix, arg); + } + } else { + VM_BaselineCompiler.processCommandLineArg(prefix, arg); + } + } + + /** + * Compile a Java method when it is first invoked. + * + * @param method + * the method to compile + * @return its compiled method. + */ + public static VM_CompiledMethod compile(VM_NormalMethod method) { + if (VM.BuildForAdaptiveSystem) { + VM_CompiledMethod cm; + if (!VM_Controller.enabled) { + // System still early in boot process; compile with baseline compiler + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } else { + if (!preloadChecked) { + preloadChecked = true; // prevent subsequent calls + // N.B. This will use irc options + if (VM_BaselineCompiler.options.PRELOAD_CLASS != null) { + compilationInProgress = true; // use baseline during preload + // Other than when boot options are requested (processed during + // preloadSpecialClass + // It is hard to communicate options for these special compilations. + // Use the + // default options and at least pick up the verbose if requested for + // base/irc + OPT_Options tmpoptions = (OPT_Options) ((OPT_Options) options) + .clone(); + tmpoptions.PRELOAD_CLASS = VM_BaselineCompiler.options.PRELOAD_CLASS; + tmpoptions.PRELOAD_AS_BOOT = VM_BaselineCompiler.options.PRELOAD_AS_BOOT; + if (VM_BaselineCompiler.options.PRINT_METHOD) { + tmpoptions.PRINT_METHOD = true; + } else { + tmpoptions = (OPT_Options) options; + } + OPT_Compiler.preloadSpecialClass(tmpoptions); + compilationInProgress = false; + } + } + if (VM_Controller.options.optIRC() || method.optCompileOnly()) { + if (// will only run once: don't bother optimizing + method.isClassInitializer() || + // exception in progress. can't use opt compiler: + // it uses exceptions and runtime doesn't support + // multiple pending (undelivered) exceptions [--DL] + VM_Thread.getCurrentThread().hardwareExceptionRegisters.inuse) { + // compile with baseline compiler + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } else { // compile with opt compiler + VM_AOSInstrumentationPlan instrumentationPlan = new VM_AOSInstrumentationPlan( + VM_Controller.options, method); + OPT_CompilationPlan compPlan = new OPT_CompilationPlan(method, + (OPT_OptimizationPlanElement[]) optimizationPlan, + instrumentationPlan, (OPT_Options) options); + if (!method.optCompileOnly()) { + cm = optCompileWithFallBack(method, compPlan); + } else { + compilationInProgress = true; + try { + cm = optCompile(method, compPlan); + } catch (OPT_OptimizingCompilerException e) { + String msg = "Optimizing compiler " + + "(on method that can only be optimizing compiler compiled): " + + "can't optimize \"" + method + "\""; + throw new Error(msg, e); + } finally { + compilationInProgress = false; + } + } + } + } else { + if ((VM_Controller.options.BACKGROUND_RECOMPILATION + && (!VM_Controller.options.ENABLE_REPLAY_COMPILE) && (!VM_Controller.options.ENABLE_PRECOMPILE))) { + // must be an inital compilation: compile with baseline compiler + // or if recompilation with OSR. + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } else { + if (VM_CompilerAdviceAttribute.hasAdvice()) { + VM_CompilerAdviceAttribute attr = VM_CompilerAdviceAttribute + .getCompilerAdviceInfo(method); + if (attr.getCompiler() != VM_CompiledMethod.OPT) { + cm = fallback(method); + VM_AOSLogging.recordCompileTime(cm, 0.0); + return cm; + } + int newCMID = -2; + OPT_CompilationPlan compPlan; + if (VM_Controller.options.counters()) { + // for invocation counter, we only use one optimization level + compPlan = VM_InvocationCounts.createCompilationPlan(method); + } else { + // for now there is not two options for sampling, so + // we don't have to use: if (VM_Controller.options.sampling()) + compPlan = VM_Controller.recompilationStrategy + .createCompilationPlan(method, attr.getOptLevel(), null); + } + VM_AOSLogging.recompilationStarted(compPlan); + newCMID = recompileWithOpt(compPlan); + cm = newCMID == -1 ? null : VM_CompiledMethods + .getCompiledMethod(newCMID); + if (newCMID == -1) { + VM_AOSLogging.recompilationAborted(compPlan); + } else if (newCMID > 0) { + VM_AOSLogging.recompilationCompleted(compPlan); + } + if (cm == null) { // if recompilation is aborted + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } + } else { + // check to see if there is a compilation plan for this method. + VM_ControllerPlan plan = VM_ControllerMemory + .findLatestPlan(method); + if (plan == null + || plan.getStatus() != VM_ControllerPlan.IN_PROGRESS) { + // initial compilation or some other funny state: compile with + // baseline compiler + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } else { + cm = plan.doRecompile(); + if (cm == null) { + // opt compilation aborted for some reason. + cm = baselineCompile(method); + } + } + } + } + } + } + if ((VM_Controller.options.ENABLE_ADVICE_GENERATION) + && (cm.getCompilerType() == VM_CompiledMethod.BASELINE) + && VM_Controller.enabled) { + VM_AOSGenerator.baseCompilationCompleted(cm); + } + VM_AOSLogging.recordCompileTime(cm, 0.0); + return cm; + } else { + return baselineCompile(method); + } + } + + /** + * Compile the stub for a native method when it is first invoked. + * + * @param method + * the method to compile + * @return its compiled method. + */ + public static VM_CompiledMethod compile(VM_NativeMethod method) { + VM_Callbacks.notifyMethodCompile(method, VM_CompiledMethod.JNI); + long start = 0; + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + start = VM_Thread.getCurrentThread().accumulateCycles(); + } + + VM_CompiledMethod cm = VM_JNICompiler.compile(method); + if (VM.verboseJNI) { + VM.sysWriteln("[Dynamic-linking native method " + + method.getDeclaringClass() + "." + method.getName() + " " + + method.getDescriptor()); + } + + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + long end = VM_Thread.getCurrentThread().accumulateCycles(); + double compileTime = VM_Time.cyclesToMillis(end - start); + cm.setCompilationTime(compileTime); + record(JNI_COMPILER, method, cm); + } + + return cm; + } + + /** + * returns the string version of compiler number, using the naming scheme in + * this file + * + * @param compiler + * the compiler of interest + * @return the string version of compiler number + */ + public static String getCompilerName(byte compiler) { + return name[compiler]; + } + +} Copied: ext/org/jikesrvm/compilers/opt (from rev 16, ext/org/jikesrvm/opt) Modified: ext/org/jikesrvm/compilers/opt/OPT_Simplifier.java =================================================================== --- ext/org/jikesrvm/opt/OPT_Simplifier.java 2007-03-31 14:04:36 UTC (rev 16) +++ ext/org/jikesrvm/compilers/opt/OPT_Simplifier.java 2007-04-02 12:11:23 UTC (rev 19) @@ -6,130 +6,138 @@ * * (C) Copyright IBM Corp. 2001 */ -package org.jikesrvm.opt; +package org.jikesrvm.compilers.opt; import org.jikesrvm.classloader.*; +import org.jikesrvm.compilers.opt.ir.*; +import org.jikesrvm.objectmodel.VM_TIBLayoutConstants; import org.jikesrvm.VM; -import org.jikesrvm.VM_TIBLayoutConstants; -import org.jikesrvm.opt.ir.*; import org.vmmagic.unboxed.*; import java.lang.reflect.Array; import static org.jikesrvm.VM_SizeConstants.*; -import static org.jikesrvm.opt.ir.OPT_Operators.*; +import static org.jikesrvm.compilers.opt.ir.OPT_Operators.*; + /** - * A constant folder, strength reducer and axiomatic simplifier. - * - * <p> This module performs no analysis, it simply attempts to - * simplify the instruction as is. The intent is that - * analysis modules can call this transformation engine, allowing us to - * share the tedious simplification code among multiple analysis modules. - * - * <p> NOTE: For maintainability purposes, I've intentionally avoided being - * clever about combining 'similar' operators together into a combined case - * of the main switch switch statement. Also, operators are in sorted ordered - * within each major grouping. Please maintain this coding style. - * I'd rather have this module be 2000 lines of obviously correct code than - * 500 lines of clever code. - * + * A constant folder, strength reducer and axiomatic simplifier. + * + * <p> + * This module performs no analysis, it simply attempts to simplify the + * instruction as is. The intent is that analysis modules can call this + * transformation engine, allowing us to share the tedious simplification code + * among multiple analysis modules. + * + * <p> + * NOTE: For maintainability purposes, I've intentionally avoided being clever + * about combining 'similar' operators together into a combined case of the main + * switch switch statement. Also, operators are in sorted ordered within each + * major grouping. Please maintain this coding style. I'd rather have this + * module be 2000 lines of obviously correct code than 500 lines of clever code. + * * @author Dave Grove * @author Ian Rogers */ public abstract class OPT_Simplifier extends OPT_IRTools { // NOTE: The convention is that constant folding is controlled based // on the type of the result of the operator, not the type of its inputs. - /** + /** * Constant fold integer operations? */ public static final boolean CF_INT = true; - /** + + /** * Constant fold address operations? */ public static final boolean CF_LONG = true; - /** + /** * Constant fold address operations? */ public static final boolean CF_ADDR = true; - /** - * Constant fold float operations? Default is true, flip to avoid - * consuming precious JTOC slots to hold new constant values. + /** + * Constant fold float operations? Default is true, flip to avoid consuming + * precious JTOC slots to hold new constant values. */ public static final boolean CF_FLOAT = true; - /** - * Constant fold double operations? Default is true, flip to avoid - * consuming precious JTOC slots to hold new constant values. + + /** + * Constant fold double operations? Default is true, flip to avoid consuming + * precious JTOC slots to hold new constant values. */ public static final boolean CF_DOUBLE = true; - /** - * Constant fold field operations? Default is true, flip to avoid - * consuming precious JTOC slots to hold new constant values. + + /** + * Constant fold field operations? Default is true, flip to avoid consuming + * precious JTOC slots to hold new constant values. */ public static final boolean CF_FIELDS = false; - /** - * Constant fold TIB operations? Default is true, flip to avoid - * consuming precious JTOC slots to hold new constant values. + /** + * Constant fold TIB operations? Default is true, flip to avoid consuming + * precious JTOC slots to hold new constant values. */ public static final boolean CF_TIB = false; /** * Effect of the simplification on Def-Use chains */ - public enum DefUseEffect{ + public enum DefUseEffect { /** - * Enumeration value to indicate an operation is unchanged, - * although the order of operands may have been canonicalized and - * type information strengthened. + * Enumeration value to indicate an operation is unchanged, although the + * order of operands may have been canonicalized and type information + * strengthened. */ UNCHANGED, /** - * Enumeration value to indicate an operation has been replaced by - * a move instruction with a constant right hand side. + * Enumeration value to indicate an operation has been replaced by a move + * instruction with a constant right hand side. */ MOVE_FOLDED, /** - * Enumeration value to indicate an operation has been replaced by - * a move instruction with a non-constant right hand side. + * Enumeration value to indicate an operation has been replaced by a move + * instruction with a non-constant right hand side. */ MOVE_REDUCED, /** - * Enumeration value to indicate an operation has been replaced by - * an unconditional trap instruction. + * Enumeration value to indicate an operation has been replaced by an + * unconditional trap instruction. */ TRAP_REDUCED, /** - * Enumeration value to indicate an operation has been replaced by - * a cheaper, but non-move instruction. + * Enumeration value to indicate an operation has been replaced by a + * cheaper, but non-move instruction. */ REDUCED - } + } /** - * Given an instruction, attempt to simplify it. - * The instruction will be mutated in place. - * - * <p> We don't deal with branching operations here -- - * doing peephole optimizations of branches - * is the job of a separate module. - * - * @param regpool register pool in case simplification requires a temporary register - * @param s the instruction to simplify + * Given an instruction, attempt to simplify it. The instruction will be + * mutated in place. + * + * <p> + * We don't deal with branching operations here -- doing peephole + * optimizations of branches is the job of a separate module. + * + * @param regpool + * register pool in case simplification requires a temporary register + * @param s + * the instruction to simplify * @return one of UNCHANGED, MOVE_FOLDED, MOVE_REDUCED, TRAP_REDUCED, REDUCED */ - public static DefUseEffect simplify(OPT_AbstractRegisterPool regpool, OPT_Instruction s) { + public static DefUseEffect simplify(OPT_AbstractRegisterPool regpool, + OPT_Instruction s) { DefUseEffect result; char opcode = s.getOpcode(); switch (opcode) { - //////////////////// - // GUARD operations - //////////////////// + // ////////////////// + // GUARD operations + // ////////////////// case GUARD_COMBINE_opcode: result = guardCombine(s); break; - //////////////////// - // TRAP operations - //////////////////// + // ////////////////// + // TRAP operations + // ////////////////// case TRAP_IF_opcode: result = trapIf(s); break; @@ -166,9 +174,9 @@ case MUST_IMPLEMENT_INTERFACE_opcode: result = mustImplementInterface(s); break; - //////////////////// - // Conditional moves - //////////////////// + // ////////////////// + // Conditional moves + // ////////////////// case INT_COND_MOVE_opcode: result = intCondMove(s); break; @@ -187,9 +195,9 @@ case GUARD_COND_MOVE_opcode: result = guardCondMove(s); break; - //////////////////// - // INT ALU operations - //////////////////// + // ////////////////// + // INT ALU operations + // ////////////////// case BOOLEAN_NOT_opcode: result = booleanNot(s); break; @@ -202,9 +210,9 @@ case BOOLEAN_CMP2_INT_OR_opcode: result = booleanCmp2IntOr(s); break; - //case BOOLEAN_CMP2_INT_AND: - //result = booleanCmp2IntAnd(s); - //break; + // case BOOLEAN_CMP2_INT_AND: + // result = booleanCmp2IntAnd(s); + // break; case INT_ADD_opcode: result = intAdd(s); break; @@ -244,9 +252,9 @@ case INT_XOR_opcode: result = intXor(s); break; - //////////////////// - // WORD ALU operations - //////////////////// + // ////////////////// + // WORD ALU operations + // ////////////////// case REF_ADD_opcode: result = refAdd(s); break; @@ -274,9 +282,9 @@ case REF_XOR_opcode: result = refXor(s); break; - //////////////////// - // LONG ALU operations - //////////////////// + // ////////////////// + // LONG ALU operations + // ////////////////// case LONG_ADD_opcode: result = longAdd(s); break; @@ -319,9 +327,9 @@ case LONG_XOR_opcode: result = longXor(s); break; - //////////////////// - // FLOAT ALU operations - //////////////////// + // ////////////////// + // FLOAT ALU operations + // ////////////////// case FLOAT_ADD_opcode: result = floatAdd(s); break; @@ -346,9 +354,9 @@ case FLOAT_SUB_opcode: result = floatSub(s); break; - //////////////////// - // DOUBLE ALU operations - //////////////////// + // ////////////////// + // DOUBLE ALU operations + // ////////////////// case DOUBLE_ADD_opcode: result = doubleAdd(s); break; @@ -373,9 +381,9 @@ case DOUBLE_SUB_opcode: result = doubleSub(s); break; - //////////////////// - // CONVERSION operations - //////////////////// + // ////////////////// + // CONVERSION operations + // ////////////////// case DOUBLE_2FLOAT_opcode: result = double2Float(s); break; @@ -448,9 +456,9 @@ case LONG_BITS_AS_DOUBLE_opcode: result = longBitsAsDouble(s); break; - //////////////////// - // Field operations - //////////////////// + // ////////////////// + // Field operations + // ////////////////// case ARRAYLENGTH_opcode: result = arrayLength(s); break; @@ -488,28 +496,28 @@ result = DefUseEffect.UNCHANGED; } if (VM.VerifyAssertions) { - switch (result) { - case MOVE_FOLDED: - // Check move has constant RHS - VM._assert(Move.conforms(s) && - (Move.getVal(s) instanceof OPT_ConstantOperand), - "RHS of move " + s + " should be constant during simplification of " - + OPT_OperatorNames.operatorName[opcode]); - break; - case MOVE_REDUCED: - // Check move has non-constant RHS - VM._assert(Move.conforms(s) && - !(Move.getVal(s) instanceof OPT_ConstantOperand), - "RHS of move " + s + " shouldn't be constant during simplification of " - + OPT_OperatorNames.operatorName[opcode]); - break; - default: - // Nothing to check - } + switch (result) { + case MOVE_FOLDED: + // Check move has constant RHS + VM._assert(Move.conforms(s) + && (Move.getVal(s) instanceof OPT_ConstantOperand), "RHS of move " + + s + " should be constant during simplification of " + + OPT_OperatorNames.operatorName[opcode]); + break; + case MOVE_REDUCED: + // Check move has non-constant RHS + VM._assert(Move.conforms(s) + && !(Move.getVal(s) instanceof OPT_ConstantOperand), "RHS of move " + + s + " shouldn't be constant during simplification of " + + OPT_OperatorNames.operatorName[opcode]); + break; + default: + // Nothing to check + } } return result; } - + private static DefUseEffect guardCombine(OPT_Instruction s) { OPT_Operand op1 = Binary.getVal1(s); OPT_Operand op2 = Binary.getVal2(s); @@ -522,26 +530,25 @@ // ONLY OP2 IS TrueGuard: MOVE REDUCE return DefUseEffect.MOVE_REDUCED; } - } - else if(op1 instanceof OPT_TrueGuardOperand) { + } else if (op1 instanceof OPT_TrueGuardOperand) { // ONLY OP1 IS TrueGuard: MOVE REDUCE Move.mutate(s, GUARD_MOVE, Binary.getClearResult(s), op2); return DefUseEffect.MOVE_REDUCED; - } - else { + } else { return DefUseEffect.UNCHANGED; } } + private static DefUseEffect trapIf(OPT_Instruction s) { - { + { OPT_Operand op1 = TrapIf.getVal1(s); OPT_Operand op2 = TrapIf.getVal2(s); if (op1.isConstant()) { if (op2.isConstant()) { int willTrap = TrapIf.getCond(s).evaluate(op1, op2); if (willTrap == OPT_ConditionOperand.TRUE) { - Trap.mutate(s, TRAP, TrapIf.getClearGuardResult(s), - TrapIf.getClearTCode(s)); + Trap.mutate(s, TRAP, TrapIf.getClearGuardResult(s), TrapIf + .getClearTCode(s)); return DefUseEffect.TRAP_REDUCED; } else if (willTrap == OPT_ConditionOperand.FALSE) { Move.mutate(s, GUARD_MOVE, TrapIf.getClearGuardResult(s), TG()); @@ -549,44 +556,43 @@ } } else { // canonicalize - TrapIf.mutate(s, TRAP_IF, TrapIf.getClearGuardResult(s), - TrapIf.getClearVal2(s), - TrapIf.getClearVal1(s), - TrapIf.getClearCond(s).flipOperands(), - TrapIf.getClearTCode(s)); + TrapIf.mutate(s, TRAP_IF, TrapIf.getClearGuardResult(s), TrapIf + .getClearVal2(s), TrapIf.getClearVal1(s), TrapIf.getClearCond(s) + .flipOperands(), TrapIf.getClearTCode(s)); } } - } + } return DefUseEffect.UNCHANGED; } + private static DefUseEffect nullCheck(OPT_Instruction s) { - OPT_Operand ref = NullCheck.getRef(s); - if (ref.isNullConstant() || - (ref.isAddressConstant() && ref.asAddressConstant().value.isZero())) { - Trap.mutate(s, TRAP, NullCheck.getClearGuardResult(s), - OPT_TrapCodeOperand.NullPtr()); - return DefUseEffect.TRAP_REDUCED; - } else if (ref.isConstant()) { - // object, string, class or non-null address constant - - // Make the slightly suspect assumption that all non-zero address - // constants are actually valid pointers. Not necessarily true, - // but unclear what else we can do. - Move.mutate(s, GUARD_MOVE, NullCheck.getClearGuardResult(s), TG()); - return DefUseEffect.MOVE_FOLDED; - } - else { - return DefUseEffect.UNCHANGED; - } + OPT_Operand ref = NullCheck.getRef(s); + if (ref.isNullConstant() + || (ref.isAddressConstant() && ref.asAddressConstant().value.isZero())) { + Trap.mutate(s, TRAP, NullCheck.getClearGuardResult(s), + OPT_TrapCodeOperand.NullPtr()); + return DefUseEffect.TRAP_REDUCED; + } else if (ref.isConstant()) { + // object, string, class or non-null address constant + + // Make the slightly suspect assumption that all non-zero address + // constants are actually valid pointers. Not necessarily true, + // but unclear what else we can do. + Move.mutate(s, GUARD_MOVE, NullCheck.getClearGuardResult(s), TG()); + return DefUseEffect.MOVE_FOLDED; + } else { + return DefUseEffect.UNCHANGED; + } } + private static DefUseEffect intZeroCheck(OPT_Instruction s) { - { + { OPT_Operand op = ZeroCheck.getValue(s); if (op.isIntConstant()) { int val = op.asIntConstant().value; if (val == 0) { Trap.mutate(s, TRAP, ZeroCheck.getClearGuardResult(s), - OPT_TrapCodeOperand.DivByZero()); + OPT_TrapCodeOperand.DivByZero()); return DefUseEffect.TRAP_REDUCED; } else { Move.mutate(s, GUARD_MOVE, ZeroCheck.getClearGuardResult(s), TG()); @@ -596,14 +602,15 @@ } return DefUseEffect.UNCHANGED; } + private static DefUseEffect longZeroCheck(OPT_Instruction s) { - { + { OPT_Operand op = ZeroCheck.getValue(s); if (op.isLongConstant()) { long val = op.asLongConstant().value; if (val == 0L) { Trap.mutate(s, TRAP, ZeroCheck.getClearGuardResult(s), - OPT_TrapCodeOperand.DivByZero()); + OPT_TrapCodeOperand.DivByZero()); return DefUseEffect.TRAP_REDUCED; } else { Move.mutate(s, GUARD_MOVE, ZeroCheck.getClearGuardResult(s), TG()); @@ -613,7 +620,9 @@ } return DefUseEffect.UNCHANGED; } - private static DefUseEffect checkcast(OPT_AbstractRegisterPool regpool, OPT_Instruction s) { + + private static DefUseEffect checkcast(OPT_AbstractRegisterPool regpool, + OPT_Instruction s) { OPT_Operand ref = TypeCheck.getRef(s); if (ref.isNullConstant()) { Empty.mutate(s, NOP); @@ -634,6 +643,7 @@ } } } + private static DefUseEffect checkcastNotNull(OPT_Instruction s) { OPT_Operand ref = TypeCheck.getRef(s); VM_TypeReference lhsType = TypeCheck.getType(s).getTypeRef(); @@ -645,18 +655,20 @@ } else if (ans == OPT_Constants.NO) { VM_Type rType = rhsType.peekResolvedType(); if (rType != null && rType.isClassType() && rType.asClass().isFinal()) { - // only final (or precise) rhs types can be optimized since rhsType may be conservative + // only final (or precise) rhs types can be optimized since rhsType may + // be conservative Trap.mutate(s, TRAP, null, OPT_TrapCodeOperand.CheckCast()); return DefUseEffect.TRAP_REDUCED; } else { return DefUseEffect.UNCHANGED; } - } - else { + } else { return DefUseEffect.UNCHANGED; } } - private static DefUseEffect instanceOf(OPT_AbstractRegisterPool regpool, OPT_Instruction s) { + + private static DefUseEffect instanceOf(OPT_AbstractRegisterPool regpool, + OPT_Instruction s) { OPT_Operand ref = InstanceOf.getRef(s); if (ref.isNullConstant()) { Move.mutate(s, INT_MOVE, InstanceOf.getClearResult(s), IC(0)); @@ -668,25 +680,26 @@ VM_TypeReference lhsType = InstanceOf.getType(s).getTypeRef(); VM_TypeReference rhsType = ref.getType(); byte ans = OPT_ClassLoaderProxy.includesType(lhsType, rhsType); - // NOTE: OPT_Constants.YES doesn't help because ref may be null and null instanceof T is false + // NOTE: OPT_Constants.YES doesn't help because ref may be null and null + // instanceof T is false if (ans == OPT_Constants.NO) { VM_Type rType = rhsType.peekResolvedType(); if (rType != null && rType.isClassType() && rType.asClass().isFinal()) { - // only final (or precise) rhs types can be optimized since rhsType may be conservative + // only final (or precise) rhs types can be optimized since rhsType + // may be conservative Move.mutate(s, INT_MOVE, InstanceOf.getClearResult(s), IC(0)); return DefUseEffect.MOVE_FOLDED; - } - else { + } else { return DefUseEffect.UNCHANGED; } - } - else { + } else { return DefUseEffect.UNCHANGED; } } } + private static DefUseEffect instanceOfNotNull(OPT_Instruction s) { - { + { OPT_Operand ref = InstanceOf.getRef(s); VM_TypeReference lhsType = InstanceOf.getType(s).getTypeRef(); VM_TypeReference rhsType = ref.getType(); @@ -697,7 +710,8 @@ } else if (ans == OPT_Constants.NO) { VM_Type rType = rhsType.peekResolvedType(); if (rType != null && rType.isClassType() && rType.asClass().isFinal()) { - // only final (or precise) rhs types can be optimized since rhsType may be conservative + // only final (or precise) rhs types can be optimized since rhsType + // may be conservative Move.mutate(s, INT_MOVE, InstanceOf.getClearResult(s), IC(0)); return DefUseEffect.MOVE_FOLDED; } @@ -705,103 +719,118 @@ } return DefUseEffect.UNCHANGED; } - private static DefUseEffect objarrayStoreCheck(OPT_Instruction s){ + + private static DefUseEffect objarrayStoreCheck(OPT_Instruction s) { OPT_Operand val = StoreCheck.getVal(s); if (val.isNullConstant()) { // Writing null into an array is trivially safe - Move.mutate(s, GUARD_MOVE, StoreCheck.getClearGuardResult(s), StoreCheck.getClearGuard(s)); + Move.mutate(s, GUARD_MOVE, StoreCheck.getClearGuardResult(s), StoreCheck + .getClearGuard(s)); return DefUseEffect.MOVE_REDUCED; - } - else { + } else { OPT_Operand ref = StoreCheck.getRef(s); VM_TypeReference arrayTypeRef = ref.getType(); - VM_Type typeOfIMElem = arrayTypeRef.getInnermostElementType().peekResolvedType(); + VM_Type typeOfIMElem = arrayTypeRef.getInnermostElementType() + .peekResolvedType(); if (typeOfIMElem != null) { VM_Type typeOfVal = val.getType().peekResolvedType(); - if ((typeOfIMElem == typeOfVal) && - (typeOfIMElem.isPrimitiveType() || - typeOfIMElem.asClass().isFinal())) { + if ((typeOfIMElem == typeOfVal) + && (typeOfIMElem.isPrimitiveType() || typeOfIMElem.asClass() + .isFinal())) { // Writing something of a final type to an array of that // final type is safe Move.mutate(s, GUARD_MOVE, StoreCheck.getClearGuardResult(s), - StoreCheck.getClearGuard(s)); + StoreCheck.getClearGu... [truncated message content] |