From: <cap...@us...> - 2007-03-20 18:53:30
|
Revision: 6 http://svn.sourceforge.net/pearcolator/?rev=6&view=rev Author: captain5050 Date: 2007-03-20 08:49:03 -0700 (Tue, 20 Mar 2007) Log Message: ----------- Build fixes. Move files from rvm into dbt Modified Paths: -------------- build.xml src/org/binarytranslator/vmInterface/DBT_Trace.java Added Paths: ----------- ext/DBT_Dummy.java ext/org/jikesrvm/classloader/ ext/org/jikesrvm/classloader/VM_Member.java ext/org/jikesrvm/classloader/VM_Method.java ext/org/jikesrvm/classloader/VM_NormalMethod.java ext/org/jikesrvm/opt/OPT_Simplifier.java ext/org/jikesrvm/opt/ir/OPT_BC2IR.java ext/org/jikesrvm/opt/ir/OPT_ConditionOperand.java ext/org/jikesrvm/opt/ir/OPT_GenerationContext.java ext/org/jikesrvm/opt/ir/OPT_HIRInfo.java ext/org/jikesrvm/opt/ir/ia32/ ext/org/jikesrvm/opt/ir/ia32/OPT_IA32ConditionOperand.java Modified: build.xml =================================================================== --- build.xml 2007-03-19 16:01:57 UTC (rev 5) +++ build.xml 2007-03-20 15:49:03 UTC (rev 6) @@ -15,16 +15,34 @@ </javac> </target> <target name="compile-external"> + <delete verbose="true"> + <fileset dir="${build.classes}"> + <include name="org/jikesrvm/opt/ir/ia32/OPT_IA32ConditionOperand.class" /> + <include name="org/jikesrvm/opt/ir/OPT_HIRGenerator.class" /> + <include name="org/jikesrvm/opt/ir/OPT_GenerationContext.class" /> + <include name="org/jikesrvm/opt/ir/OPT_ConditionOperand.class" /> + <include name="org/jikesrvm/opt/ir/OPT_BC2IR.class" /> + <include name="org/jikesrvm/opt/ir/OPT_HIRInfo.class" /> + <include name="org/jikesrvm/opt/OPT_Simplifier.class" /> + <include name="org/jikesrvm/ppc/PPC_Disassembler.class" /> + <include name="org/jikesrvm/ppc/opcode_tab.class" /> + <include name="org/jikesrvm/ppc/opcodeXX.class" /> + <include name="org/jikesrvm/classloader/VM_Method.class" /> + <include name="org/jikesrvm/classloader/VM_Member.class" /> + <include name="org/jikesrvm/classloader/VM_NormalMethod.class" /> + </fileset> + </delete> <javac destdir="${build.classes}" debug="true" debugLevel="lines,source" source="1.5" target="1.5" srcdir="${dbt-ext.java}"> + <include name="DBT_Dummy.java" /> <classpath> <pathelement location="${build.vmmagic-stub.classes}"/> <pathelement location="${build.classes}"/> </classpath> </javac> </target> -</project> \ No newline at end of file +</project> Added: ext/DBT_Dummy.java =================================================================== --- ext/DBT_Dummy.java (rev 0) +++ ext/DBT_Dummy.java 2007-03-20 15:49:03 UTC (rev 6) @@ -0,0 +1,25 @@ +/* + * This file is part of binarytranslator.org. The binarytranslator.org + * project is distributed under the Common Public License (CPL). + * A copy of the license is included in the distribution, and is also + * available at http://www.opensource.org/licenses/cpl1.0.php + * + * (C) Copyright Ian Rogers, The University of Manchester 2003-2006 + */ + +/** + * Dummy class containing enough references to force java compiler + * to find every class comprising the chnages to the opt compiler for DBT + */ +class OptDummy { + static org.jikesrvm.opt.ir.ia32.OPT_IA32ConditionOperand a; + static org.jikesrvm.opt.ir.OPT_HIRGenerator b; + static org.jikesrvm.opt.ir.OPT_GenerationContext c; + static org.jikesrvm.opt.ir.OPT_ConditionOperand d; + static org.jikesrvm.opt.ir.OPT_HIRInfo e; + static org.jikesrvm.opt.OPT_Simplifier f; + static org.jikesrvm.ppc.PPC_Disassembler g; + static org.jikesrvm.classloader.VM_Method j; + static org.jikesrvm.classloader.VM_Member k; + static org.jikesrvm.classloader.VM_NormalMethod l; +} Added: ext/org/jikesrvm/classloader/VM_Member.java =================================================================== --- ext/org/jikesrvm/classloader/VM_Member.java (rev 0) +++ ext/org/jikesrvm/classloader/VM_Member.java 2007-03-20 15:49:03 UTC (rev 6) @@ -0,0 +1,197 @@ +/* + * This file is part of Jikes RVM (http://jikesrvm.sourceforge.net). + * The Jikes RVM project is distributed under the Common Public License (CPL). + * A copy of the license is included in the distribution, and is also + * available at http://www.opensource.org/licenses/cpl1.0.php + * + * (C) Copyright IBM Corp 2001,2002 + */ +package org.jikesrvm.classloader; + +import org.jikesrvm.*; +import org.vmmagic.pragma.*; +import org.vmmagic.unboxed.Offset; + +/** + * A field or method of a java class. + * + * @author Bowen Alpern + * @author Dave Grove + * @author Derek Lieber + */ +public abstract class VM_Member extends VM_AnnotatedElement implements VM_Constants, VM_ClassLoaderConstants { + + /** + * The class that declared this member, avaliable by calling + * getDeclaringClass once the class is loaded. + */ + private final VM_TypeReference declaringClass; + + /** + * The canonical VM_MemberReference for this member + */ + protected final VM_MemberReference memRef; + + /** + * The modifiers associated with this member. + */ + public final short modifiers; + + /** + * The signature is a string representing the generic type for this + * field or method declaration, may be null + */ + private final VM_Atom signature; + + /** + * The member's jtoc/obj/tib offset in bytes. + * Set by {@link VM_Class#resolve()} + */ + protected int offset; + + /** + * NOTE: Only {@link VM_Class} is allowed to create an instance of a VM_Member. + * + * @param declaringClass the VM_TypeReference object of the class that declared this member + * @param memRef the canonical memberReference for this member. + * @param modifiers modifiers associated with this member. + * @param signature generic type of this member + * @param annotations array of runtime visible annotations + */ + protected VM_Member(VM_TypeReference declaringClass, VM_MemberReference memRef, + short modifiers, VM_Atom signature, + VM_Annotation[] annotations) { + super(annotations); + this.declaringClass = declaringClass; + this.memRef = memRef; + this.modifiers = modifiers; + this.signature = signature; + this.offset = Short.MIN_VALUE+1; // invalid value. Set to valid value during VM_Class.resolve() + } + + //--------------------------------------------------------------------// + // Section 1. // + // The following are available after class loading. // + //--------------------------------------------------------------------// + + /** + * Class that declared this field or method. Not available before + * the class is loaded. + */ + @Uninterruptible + public final VM_Class getDeclaringClass() { + return declaringClass.peekResolvedType().asClass(); + } + + /** + * Canonical member reference for this member. + */ + @Uninterruptible + public final VM_MemberReference getMemberRef() { + return memRef; + } + + /** + * Name of this member. + */ + @Uninterruptible + public final VM_Atom getName() { + return memRef.getName(); + } + + /** + * Descriptor for this member. + * something like "I" for a field or "(I)V" for a method. + */ + @Uninterruptible + public final VM_Atom getDescriptor() { + return memRef.getDescriptor(); + } + + /** + * Generic type for member + */ + public final VM_Atom getSignature() { + return signature; + } + + /** + * Get a unique id for this member. + * The id is the id of the canonical VM_MemberReference for this member + * and thus may be used to find the member by first finding the member reference. + */ + @Uninterruptible + public final int getId() { + return memRef.getId(); + } + + /* + * Define hashcode in terms of VM_Atom.hashCode to enable + * consistent hash codes during bootImage writing and run-time. + */ + public int hashCode() { + return memRef.hashCode(); + } + + public final String toString() { + return declaringClass + "." + getName() + " " + getDescriptor(); + } + + /** + * Usable from classes outside its package? + */ + public final boolean isPublic() { + return (modifiers & ACC_PUBLIC) != 0; + } + + /** + * Usable only from this class? + */ + public final boolean isPrivate() { + return (modifiers & ACC_PRIVATE) != 0; + } + + /** + * Usable from subclasses? + */ + public final boolean isProtected() { + return (modifiers & ACC_PROTECTED) != 0; + } + + /** + * Get the member's modifiers. + */ + public final int getModifiers() { + return modifiers; + } + + //------------------------------------------------------------------// + // Section 2. // + // The following are available after the declaring class has been // + // "resolved". // + //------------------------------------------------------------------// + + /** + * Offset of this field or method, in bytes. + * <ul> + * <li> For a static field: offset of field from start of jtoc + * <li> For a static method: offset of code object reference from start of jtoc + * <li> For a non-static field: offset of field from start of object + * <li> For a non-static method: offset of code object reference from start of tib + * </ul> + */ + @Uninterruptible + public final Offset getOffset() { + if (VM.VerifyAssertions) VM._assert(declaringClass.isResolved()); + return Offset.fromIntSignExtend(offset); + } + + /** + * Only meant to be used by VM_ObjectModel.layoutInstanceFields. + * TODO: refactor system so this functionality is in the classloader package + * and this method doesn't have to be final. + */ + public final void setOffset(Offset off) { + offset = off.toInt(); + } +} Added: ext/org/jikesrvm/classloader/VM_Method.java =================================================================== --- ext/org/jikesrvm/classloader/VM_Method.java (rev 0) +++ ext/org/jikesrvm/classloader/VM_Method.java 2007-03-20 15:49:03 UTC (rev 6) @@ -0,0 +1,642 @@ +/* + * This file is part of Jikes RVM (http://jikesrvm.sourceforge.net). + * The Jikes RVM project is distributed under the Common Public License (CPL). + * A copy of the license is included in the distribution, and is also + * available at http://www.opensource.org/licenses/cpl1.0.php + * + * (C) Copyright IBM Corp 2001,2002 + */ +package org.jikesrvm.classloader; + +import org.jikesrvm.*; +import org.jikesrvm.ArchitectureSpecific.VM_CodeArray; +import org.jikesrvm.ArchitectureSpecific.VM_LazyCompilationTrampolineGenerator; + +import java.io.DataInputStream; +import java.io.IOException; + +import org.vmmagic.unboxed.Offset; +import org.vmmagic.pragma.*; + +/** + * A method of a java class corresponding to a method_info structure + * in the class file. A method is read from a class file using the + * {@link #readMethod} method. + * + * @author Bowen Alpern + * @author Dave Grove + * @author Derek Lieber + * @author Ian Rogers + */ +public abstract class VM_Method extends VM_Member implements VM_BytecodeConstants { + + /** + * current compiled method for this method + */ + protected VM_CompiledMethod currentCompiledMethod; + /** + * exceptions this method might throw (null --> none) + */ + protected final VM_TypeReference[] exceptionTypes; + /** + * Method paramter annotations from the class file that are + * described as runtime visible. These annotations are available to + * the reflection API. + */ + protected final VM_Annotation[] parameterAnnotations; + /** + * A value present in the method info tables of annotation types. It + * represents the default result from an annotation method. + */ + protected final Object annotationDefault; + /** + * The offset of this virtual method in the jtoc if it's been placed + * there by constant propagation, otherwise 0. + */ + private Offset jtocOffset; + + /** + * Construct a read method + * + * @param declaringClass the VM_Class object of the class that declared this field + * @param memRef the canonical memberReference for this method. + * @param modifiers modifiers associated with this method. + * @param exceptionTypes exceptions thrown by this method. + * @param signature generic type of this method. + * @param annotations array of runtime visible annotations + * @param parameterAnnotations array of runtime visible parameter annotations + * @param annotationDefault value for this annotation that appears + */ + protected VM_Method(VM_TypeReference declaringClass, VM_MemberReference memRef, + short modifiers, VM_TypeReference[] exceptionTypes, VM_Atom signature, + VM_Annotation[] annotations, + VM_Annotation[] parameterAnnotations, + Object annotationDefault) + { + super(declaringClass, memRef, (short)(modifiers & APPLICABLE_TO_METHODS), signature, annotations); + this.parameterAnnotations = parameterAnnotations; + this.annotationDefault = annotationDefault; + memRef.asMethodReference().setResolvedMember(this); + this.exceptionTypes = exceptionTypes; + this.jtocOffset = Offset.fromIntSignExtend(-1); + } + + /** + * Called from {@link VM_Class#readClass(VM_TypeReference, DataInputStream)} to create an + * instance of a VM_Method by reading the relevant data from the argument bytecode stream. + * + * @param declaringClass the VM_TypeReference of the class being loaded + * @param constantPool the constantPool of the VM_Class object that's being constructed + * @param memRef the canonical memberReference for this member. + * @param modifiers modifiers associated with this member. + * @param input the DataInputStream to read the method's attributes from + */ + static VM_Method readMethod(VM_TypeReference declaringClass, int[] constantPool, VM_MemberReference memRef, + short modifiers, DataInputStream input) throws IOException { + short tmp_localWords = 0; + short tmp_operandWords = 0; + byte[] tmp_bytecodes = null; + VM_ExceptionHandlerMap tmp_exceptionHandlerMap = null; + VM_TypeReference[] tmp_exceptionTypes = null; + int[] tmp_lineNumberMap = null; + VM_Atom tmp_signature = null; + VM_Annotation[] annotations = null; + VM_Annotation[] parameterAnnotations = null; + Object tmp_annotationDefault = null; + + // Read the attributes + for (int i = 0, n = input.readUnsignedShort(); i<n; i++) { + VM_Atom attName = VM_Class.getUtf(constantPool, input.readUnsignedShort()); + int attLength = input.readInt(); + + // Only bother to interpret non-boring Method attributes + if (attName == VM_ClassLoader.codeAttributeName) { + tmp_operandWords = input.readShort(); + tmp_localWords = input.readShort(); + tmp_bytecodes = new byte[input.readInt()]; + input.readFully(tmp_bytecodes); + tmp_exceptionHandlerMap = VM_ExceptionHandlerMap.readExceptionHandlerMap(input, constantPool); + + // Read the attributes portion of the code attribute + for (int j = 0, n2 = input.readUnsignedShort(); j<n2; j++) { + attName = VM_Class.getUtf(constantPool, input.readUnsignedShort()); + attLength = input.readInt(); + + if (attName == VM_ClassLoader.lineNumberTableAttributeName) { + int cnt = input.readUnsignedShort(); + if (cnt != 0) { + tmp_lineNumberMap = new int[cnt]; + for (int k = 0; k<cnt; k++) { + int startPC = input.readUnsignedShort(); + int lineNumber = input.readUnsignedShort(); + tmp_lineNumberMap[k] = (lineNumber << BITS_IN_SHORT) | startPC; + } + } + } else { + // All other entries in the attribute portion of the code attribute are boring. + input.skipBytes(attLength); + } + } + } else if (attName == VM_ClassLoader.exceptionsAttributeName) { + int cnt = input.readUnsignedShort(); + if (cnt != 0) { + tmp_exceptionTypes = new VM_TypeReference[cnt]; + for (int j = 0, m = tmp_exceptionTypes.length; j < m; ++j) { + tmp_exceptionTypes[j] = VM_Class.getTypeRef(constantPool, input.readUnsignedShort()); + } + } + } else if (attName == VM_ClassLoader.syntheticAttributeName) { + modifiers |= ACC_SYNTHETIC; + } else if (attName == VM_ClassLoader.signatureAttributeName) { + tmp_signature = VM_Class.getUtf(constantPool, input.readUnsignedShort()); + } else if (attName == VM_ClassLoader.runtimeVisibleAnnotationsAttributeName) { + annotations = VM_AnnotatedElement.readAnnotations(constantPool, input, 2, + declaringClass.getClassLoader()); + } else if (attName == VM_ClassLoader.runtimeVisibleParameterAnnotationsAttributeName) { + parameterAnnotations = VM_AnnotatedElement.readAnnotations(constantPool, input, 1, + declaringClass.getClassLoader()); + } else if (attName == VM_ClassLoader.annotationDefaultAttributeName) { + try { + tmp_annotationDefault = VM_Annotation.readValue(constantPool, input, declaringClass.getClassLoader()); + } + catch (ClassNotFoundException e){ + throw new Error(e); + } + } else { + // all other method attributes are boring + input.skipBytes(attLength); + } + } + VM_Method method; + if ((modifiers & ACC_NATIVE) != 0) { + method = new VM_NativeMethod(declaringClass, memRef, modifiers, tmp_exceptionTypes, tmp_signature, + annotations, parameterAnnotations, tmp_annotationDefault); + } else if ((modifiers & ACC_ABSTRACT) != 0) { + method = new VM_AbstractMethod(declaringClass, memRef, modifiers, tmp_exceptionTypes, tmp_signature, + annotations, parameterAnnotations, tmp_annotationDefault); + + } else { + method = new VM_NormalMethod(declaringClass, memRef, modifiers, tmp_exceptionTypes, + tmp_localWords, tmp_operandWords, tmp_bytecodes, + tmp_exceptionHandlerMap, tmp_lineNumberMap, + constantPool, tmp_signature, + annotations, parameterAnnotations, tmp_annotationDefault); + } + return method; + } + + /** + * Create a copy of the method that occurs in the annotation + * interface. The method body will contain a read of the field at + * the constant pool index specified. + * + * @param annotationClass the class this method belongs to + * @param constantPool for the class + * @param memRef the member reference corresponding to this method + * @param interfaceMethod the interface method that will copied to + * produce the annotation method + * @param constantPoolIndex the index of the field that will be + * returned by this method + * @return the created method + */ + static VM_Method createAnnotationMethod(VM_TypeReference annotationClass, int[] constantPool, + VM_MemberReference memRef, VM_Method interfaceMethod, + int constantPoolIndex) { + byte[] bytecodes = new byte[] { + (byte)JBC_aload_0, + (byte)JBC_getfield, + (byte)(constantPoolIndex >>> 8), + (byte)constantPoolIndex, + // Xreturn + (byte)typeRefToReturnBytecode(interfaceMethod.getReturnType()) + }; + return new VM_NormalMethod(annotationClass, memRef, (short)(ACC_PUBLIC|ACC_FINAL|ACC_SYNTHETIC), null, + (short)1, (short)2, bytecodes, + null, null, + constantPool, + null, null, null, null); + } + /** + * Create a method to initialise the annotation class + * + * @param aClass the class this method belongs to + * @param constantPool for the class + * @param memRef the member reference corresponding to this method + * @param objectInitIndex an index into the constant pool for a + * method reference to java.lang.Object.<init> + * @param aFields + * @param aMethods + * @return the created method + */ + static VM_Method createAnnotationInit(VM_TypeReference aClass, int[] constantPool, + VM_MemberReference memRef, int objectInitIndex, + VM_Field[] aFields, VM_Method[] aMethods, + int[] defaultConstants) { + byte[] bytecode = new byte[6+(defaultConstants.length*7)]; + bytecode[0] = (byte)JBC_aload_0; // stack[0] = this + bytecode[1] = (byte)JBC_aload_1; // stack[1] = instanceof VM_Annotation + bytecode[2] = (byte)JBC_invokespecial; + bytecode[3] = (byte)(objectInitIndex >>> 8); + bytecode[4] = (byte)objectInitIndex; + for(int i=0, j=0; i < aMethods.length; i++) { + if(aMethods[i].annotationDefault != null) { + bytecode[(j*7)+5+0] = (byte)JBC_aload_0; // stack[0] = this + if(VM_Class.getLiteralSize(constantPool, defaultConstants[j]) == BYTES_IN_INT) { + bytecode[(j*7)+5+1] = (byte)JBC_ldc_w; // stack[1] = value + } + else { + bytecode[(j*7)+5+1] = (byte)JBC_ldc2_w;// stack[1&2] = value + } + bytecode[(j*7)+5+2] = (byte)(defaultConstants[j] >>> 8); + bytecode[(j*7)+5+3] = (byte)defaultConstants[j]; + bytecode[(j*7)+5+4] = (byte)JBC_putfield; + bytecode[(j*7)+5+5] = (byte)(i >>> 8); + bytecode[(j*7)+5+6] = (byte)i; + j++; + } + } + bytecode[bytecode.length-1] = (byte)JBC_return; + return new VM_NormalMethod(aClass, memRef, (short)(ACC_PUBLIC|ACC_FINAL|ACC_SYNTHETIC), null, + (short)2, (short)3, bytecode, + null, null, + constantPool, + null, null, null, null); + } + + /** + * What would be the appropriate return bytecode for the given type + * reference? + */ + private static int typeRefToReturnBytecode(VM_TypeReference tr) { + if(!tr.isPrimitiveType()) { + return JBC_areturn; + } else { + VM_Primitive pt = (VM_Primitive)tr.peekResolvedType(); + if((pt == VM_Type.BooleanType)||(pt == VM_Type.ByteType)||(pt == VM_Type.ShortType)|| + (pt == VM_Type.CharType)||(pt == VM_Type.IntType)) { + return JBC_ireturn; + } + else if(pt == VM_Type.LongType) { + return JBC_lreturn; + } + else if(pt == VM_Type.FloatType) { + return JBC_freturn; + } + else if(pt == VM_Type.DoubleType) { + return JBC_dreturn; + } + else { + VM._assert(false); + return -1; + } + } + } + /** + * Is this method a class initializer? + */ + @Uninterruptible + public final boolean isClassInitializer() { + return getName() == VM_ClassLoader.StandardClassInitializerMethodName; + } + + /** + * Is this method an object initializer? + */ + @Uninterruptible + public final boolean isObjectInitializer() { + return getName() == VM_ClassLoader.StandardObjectInitializerMethodName; + } + + /** + * Is this method a compiler-generated object initializer helper? + */ + @Uninterruptible + public final boolean isObjectInitializerHelper() { + return getName() == VM_ClassLoader.StandardObjectInitializerHelperMethodName; + } + + /** + * Type of this method's return value. + */ + @Uninterruptible + public final VM_TypeReference getReturnType() { + return memRef.asMethodReference().getReturnType(); + } + + /** + * Type of this method's parameters. + * Note: does *not* include implicit "this" parameter, if any. + */ + @Uninterruptible + public final VM_TypeReference[] getParameterTypes() { + return memRef.asMethodReference().getParameterTypes(); + } + + /** + * Space required by this method for its parameters, in words. + * Note: does *not* include implicit "this" parameter, if any. + */ + @Uninterruptible + public final int getParameterWords() { + return memRef.asMethodReference().getParameterWords(); + } + + /** + * Has machine code been generated for this method's bytecodes? + */ + public final boolean isCompiled() { + return currentCompiledMethod != null; + } + + /** + * Get the current compiled method for this method. + * Will return null if there is no current compiled method! + * + * We make this method Unpreemptible to avoid a race-condition + * in VM_Reflection.invoke. + * @return compiled method + */ + @Unpreemptible + public final synchronized VM_CompiledMethod getCurrentCompiledMethod() { + return currentCompiledMethod; + } + + /** + * Declared as statically dispatched? + */ + @Uninterruptible + public final boolean isStatic() { + return (modifiers & ACC_STATIC) != 0; + } + + /** + * Declared as non-overridable by subclasses? + */ + @Uninterruptible + public final boolean isFinal() { + return (modifiers & ACC_FINAL) != 0; + } + + /** + * Guarded by monitorenter/monitorexit? + */ + @Uninterruptible + public final boolean isSynchronized() { + return (modifiers & ACC_SYNCHRONIZED) != 0; + } + + /** + * Not implemented in java? + */ + @Uninterruptible + public final boolean isNative() { + return (modifiers & ACC_NATIVE) != 0; + } + + /** + * Not implemented in Java and use C not JNI calling convention + */ + public final boolean isSysCall() { + return isNative() && isStatic() && isAnnotationDeclared(VM_TypeReference.SysCall); + } + + /** + * Implemented in subclass? + */ + @Uninterruptible + public final boolean isAbstract() { + return (modifiers & ACC_ABSTRACT) != 0; + } + + /** + * Not present in source code file? + */ + public boolean isSynthetic() { + return (modifiers & ACC_SYNTHETIC) != 0; + } + + /** + * Exceptions thrown by this method - + * something like { "java/lang/IOException", "java/lang/EOFException" } + * @return info (null --> method doesn't throw any exceptions) + */ + @Uninterruptible + public final VM_TypeReference[] getExceptionTypes() { + return exceptionTypes; + } + + /** + * Is this method interruptible? + * In other words, should the compiler insert yieldpoints + * in method prologue, epilogue, and backwards branches. + * Also, only methods that are Interruptible have stackoverflow checks + * in the method prologue (since there is no mechanism for handling a stackoverflow + * that doesn't violate the uninterruptiblity of the method). + * To determine if a method is interruptible, the following conditions + * are checked (<em>in order</em>): + * <ul> + * <li> If it is a <clinit> or <init> method then it is interruptible. + * <li> If is the synthetic 'this' method used by jikes to + * factor out default initializers for <init> methods then it is interruptible. + * <li> If it is annotated with <CODE>Interruptible</CODE> it is interruptible. + * <li> If it is annotated with <CODE>Preemptible</CODE> it is interruptible. + * <li> If it is annotated with <CODE>Uninterruptible</CODE> it is not interruptible. + * <li> If it is annotated with <CODE>UninterruptibleNoWarn</CODE> it is not interruptible. + * <li> If it is annotated with <CODE>Unpreemptible</CODE> it is not interruptible. + * <li> If its declaring class is annotated with <CODE>Uninterruptible</CODE> + * or <CODE>Unpreemptible</CODE> it is not interruptible. + * </ul> + */ + public final boolean isInterruptible() { + if (isClassInitializer() || isObjectInitializer()) return true; + if (isObjectInitializerHelper()) return true; + if (hasInterruptibleAnnotation()) return true; + if (hasPreemptibleAnnotation()) return true; + if (hasUninterruptibleNoWarnAnnotation()) return false; + if (hasUninterruptibleAnnotation()) return false; + if (hasUnpreemptibleAnnotation()) return false; + if (getDeclaringClass().hasUnpreemptibleAnnotation()) return false; + return !getDeclaringClass().hasUninterruptibleAnnotation(); + } + + /** + * Is the method Unpreemptible? See the comment in {@link #isInterruptible} + */ + public final boolean isUnpreemptible() { + if (isClassInitializer() || isObjectInitializer()) return false; + if (isObjectInitializerHelper()) return false; + if (hasInterruptibleAnnotation()) return false; + if (hasPreemptibleAnnotation()) return false; + if (hasUninterruptibleAnnotation()) return false; + if (hasUninterruptibleNoWarnAnnotation()) return false; + if (hasUnpreemptibleAnnotation()) return true; + return getDeclaringClass().hasUnpreemptibleAnnotation(); + } + + /** + * Is the method Uninterruptible? See the comment in {@link #isInterruptible} + */ + public final boolean isUninterruptible() { + if (isClassInitializer() || isObjectInitializer()) return false; + if (isObjectInitializerHelper()) return false; + if (hasInterruptibleAnnotation()) return false; + if (hasPreemptibleAnnotation()) return false; + if (hasUnpreemptibleAnnotation()) return false; + if (hasUninterruptibleAnnotation()) return true; + if (hasUninterruptibleNoWarnAnnotation()) return true; + return getDeclaringClass().hasUninterruptibleAnnotation(); + } + + /** + * Has this method been marked as forbidden to inline? + * ie., it is marked with the <CODE>NoInline</CODE> annotation or + * the <CODE>NoOptCompile</CODE> annotation? + */ + public final boolean hasNoInlinePragma() { + return (hasNoInlineAnnotation() || hasNoOptCompileAnnotation()); + } + + /** + * @return true if the method may write to a given field + */ + public boolean mayWrite(VM_Field field) { + return true; // be conservative. native methods can write to anything + } + + /** + * @return true if the method is the implementation of a runtime service + * that is called "under the covers" from the generated code and thus is not subject to + * inlining via the normal mechanisms. + */ + public boolean isRuntimeServiceMethod() { + return false; // only VM_NormalMethods can be runtime service impls in Jikes RVM and they override this method + } + + //------------------------------------------------------------------// + // Section 2. // + // The following are available after the declaring class has been // + // "resolved". // + //------------------------------------------------------------------// + + /** + * Get the code array that corresponds to the entry point (prologue) for the method. + */ + public final synchronized VM_CodeArray getCurrentEntryCodeArray() { + VM_Class declaringClass = getDeclaringClass(); + if (VM.VerifyAssertions) VM._assert(declaringClass.isResolved()); + if (isCompiled()) { + return currentCompiledMethod.getEntryCodeArray(); + } else if (!VM.writingBootImage || isNative()) { + if (!isStatic() && !isObjectInitializer() && !isPrivate()) { + // A non-private virtual method. + if (declaringClass.isJavaLangObjectType() || + declaringClass.getSuperClass().findVirtualMethod(getName(), getDescriptor()) == null) { + // The root method of a virtual method family can use the lazy method invoker directly. + return VM_Entrypoints.lazyMethodInvokerMethod.getCurrentEntryCodeArray(); + } else { + // All other virtual methods in the family must generate unique stubs to + // ensure correct operation of the method test (guarded inlining of virtual calls). + return VM_LazyCompilationTrampolineGenerator.getTrampoline(); + } + } else { + // We'll never do a method test against this method. + // Therefore we can use the lazy method invoker directly. + return VM_Entrypoints.lazyMethodInvokerMethod.getCurrentEntryCodeArray(); + } + } else { + compile(); + return currentCompiledMethod.getEntryCodeArray(); + } + } + + /** + * Generate machine code for this method if valid + * machine code doesn't already exist. + * Return the resulting VM_CompiledMethod object. + */ + public final synchronized void compile() { + if (VM.VerifyAssertions) VM._assert(getDeclaringClass().isResolved()); + if (isCompiled()) return; + + if (VM.TraceClassLoading && VM.runningVM) VM.sysWrite("VM_Method: (begin) compiling " + this + "\n"); + + VM_CompiledMethod cm = genCode(); + + // Ensure that cm wasn't invalidated while it was being compiled. + synchronized(cm) { + if (cm.isInvalid()) { + VM_CompiledMethods.setCompiledMethodObsolete(cm); + } else { + currentCompiledMethod = cm; + } + } + + if (VM.TraceClassLoading && VM.runningVM) VM.sysWrite("VM_Method: (end) compiling " + this + "\n"); + } + + protected abstract VM_CompiledMethod genCode(); + + //----------------------------------------------------------------// + // Section 3. // + // The following are available after the declaring class has been // + // "instantiated". // + //----------------------------------------------------------------// + + /** + * Change machine code that will be used by future executions of this method + * (ie. optimized <-> non-optimized) + * @param compiledMethod new machine code + * Side effect: updates jtoc or method dispatch tables + * ("type information blocks") + * for this class and its subclasses + */ + public synchronized void replaceCompiledMethod(VM_CompiledMethod compiledMethod) { + if (VM.VerifyAssertions) VM._assert(getDeclaringClass().isInstantiated()); + // If we're replacing with a non-null compiledMethod, ensure that is still valid! + if (compiledMethod != null) { + synchronized(compiledMethod) { + if (compiledMethod.isInvalid()) return; + } + } + + // Grab version that is being replaced + VM_CompiledMethod oldCompiledMethod = currentCompiledMethod; + currentCompiledMethod = compiledMethod; + + // Install the new method in jtoc/tib. If virtual, will also replace in + // all subclasses that inherited the method. + getDeclaringClass().updateMethod(this); + + // Replace constant-ified virtual method in JTOC if necessary + if(jtocOffset.toInt() != -1) { + VM_Statics.setSlotContents(jtocOffset, getCurrentEntryCodeArray()); + } + + // Now that we've updated the jtoc/tib, old version is obsolete + if (oldCompiledMethod != null) { + VM_CompiledMethods.setCompiledMethodObsolete(oldCompiledMethod); + } + } + + /** + * If CM is the current compiled code for this, then invaldiate it. + */ + public final synchronized void invalidateCompiledMethod(VM_CompiledMethod cm) { + if (VM.VerifyAssertions) VM._assert(getDeclaringClass().isInstantiated()); + if (currentCompiledMethod == cm) { + replaceCompiledMethod(null); + } + } + + /** + * Find or create a jtoc offset for this method + */ + public final synchronized Offset findOrCreateJtocOffset() { + if (VM.VerifyAssertions) VM._assert(!isStatic() && !isObjectInitializer()); + if(jtocOffset.EQ(Offset.zero())) { + jtocOffset = VM_Statics.allocateReferenceSlot(); + VM_Statics.setSlotContents(jtocOffset, getCurrentEntryCodeArray()); + } + return jtocOffset; + } +} Added: ext/org/jikesrvm/classloader/VM_NormalMethod.java =================================================================== --- ext/org/jikesrvm/classloader/VM_NormalMethod.java (rev 0) +++ ext/org/jikesrvm/classloader/VM_NormalMethod.java 2007-03-20 15:49:03 UTC (rev 6) @@ -0,0 +1,675 @@ +/* + * This file is part of Jikes RVM (http://jikesrvm.sourceforge.net). + * The Jikes RVM project is distributed under the Common Public License (CPL). + * A copy of the license is included in the distribution, and is also + * available at http://www.opensource.org/licenses/cpl1.0.php + * + * (C) Copyright IBM Corp 2001,2002, 2004 + */ +package org.jikesrvm.classloader; + +import org.jikesrvm.*; +import org.vmmagic.pragma.*; +import org.jikesrvm.opt.ir.OPT_HIRGenerator; +import org.jikesrvm.opt.ir.OPT_BC2IR; +import org.jikesrvm.opt.ir.OPT_GenerationContext; + +/** + * A method of a java class that has bytecodes. + * + * @author Bowen Alpern + * @author Stephen Fink + * @author Dave Grove + * @author Derek Lieber + * @modified Ian Rogers + */ +public class VM_NormalMethod + extends VM_Method + implements VM_BytecodeConstants +{ + + /* As we read the bytecodes for the method, we compute + * a simple summary of some interesting properties of the method. + * Because we do this for every method, we require the summarization to + * be fast and the computed summary to be very space efficient. + * + * The following constants encode the estimated relative cost in + * machine instructions when a particular class of bytecode is compiled + * by the optimizing compiler. The estimates approximate the typical + * optimization the compiler is able to perform. + * This information is used to estimate how big a method will be when + * it is inlined. + */ + public static final int SIMPLE_OPERATION_COST = 1; + public static final int LONG_OPERATION_COST = 2; + public static final int ARRAY_LOAD_COST = 2; + public static final int ARRAY_STORE_COST = 2; + public static final int JSR_COST = 5; + public static final int CALL_COST = 6; + // Bias to inlining methods with magic + // most magics are quite cheap (0-1 instructions) + public static final int MAGIC_COST = 0; + // News are actually more expensive than calls + // but bias to inline methods that allocate + // objects becuase we expect better downstream optimization of + // the caller due to class analysis + // and propagation of nonNullness + public static final int ALLOCATION_COST = 4; + // Approximations, assuming some CSE/PRE of object model computations + public static final int CLASS_CHECK_COST = 2*SIMPLE_OPERATION_COST; + public static final int STORE_CHECK_COST = 4*SIMPLE_OPERATION_COST; + // Just a call. + public static final int THROW_COST = CALL_COST; + // Really a bunch of operations plus a call, but undercharge because + // we don't have worry about this causing an exponential growth of call chain + // and we probably want to inline synchronization + // (to get a chance to optimize it). + public static final int SYNCH_COST = 4*SIMPLE_OPERATION_COST; + // The additional cost of a switch isn't that large, since if the + // switch has more than a few cases the method will be too big to inline + // anyways. + public static final int SWITCH_COST = CALL_COST; + + // Definition of flag bits + protected static final char HAS_MAGIC = 0x8000; + protected static final char HAS_SYNCH = 0x4000; + protected static final char HAS_ALLOCATION = 0x2000; + protected static final char HAS_THROW = 0x1000; + protected static final char HAS_INVOKE = 0x0800; + protected static final char HAS_FIELD_READ = 0x0400; + protected static final char HAS_FIELD_WRITE= 0x0200; + protected static final char HAS_ARRAY_READ = 0x0100; + protected static final char HAS_ARRAY_WRITE= 0x0080; + protected static final char HAS_JSR = 0x0040; + protected static final char HAS_COND_BRANCH= 0x0020; + protected static final char HAS_SWITCH = 0x0010; + protected static final char HAS_BACK_BRANCH= 0x0008; + protected static final char IS_RS_METHOD = 0x0004; + + /** + * storage for bytecode summary flags + */ + protected char summaryFlags; + /** + * storage for bytecode summary size + */ + protected char summarySize; + + /** + * words needed for local variables (including parameters) + */ + private final short localWords; + + /** + * words needed for operand stack (high water mark) + * TODO: OSR redesign; add subclass of NormalMethod for OSR method + * and then make this field final in NormalMethod. + */ + private short operandWords; + + /** + * bytecodes for this method (null --> none) + */ + public final byte[] bytecodes; + + /** + * try/catch/finally blocks for this method (null --> none) + */ + private final VM_ExceptionHandlerMap exceptionHandlerMap; + + /** + * pc to source-line info (null --> none) + * Each entry contains both the line number (upper 16 bits) + * and corresponding start PC (lower 16 bits). + */ + private final int[] lineNumberMap; + + // Extra fields for on-stack replacement + // TODO: rework the system so we don't waste space for this on the VM_Method object + /* bytecode array constists of prologue and original bytecodes */ + private byte[] synthesizedBytecodes = null; + /* record osr prologue */ + private byte[] osrPrologue = null; + /* prologue may change the maximum stack height, remember the + * original stack height */ + private short savedOperandWords; + + /** + * Construct a normal Java bytecode method's information + * + * @param dc the VM_TypeReference object of the class that declared this field + * @param mr the canonical memberReference for this member. + * @param mo modifiers associated with this member. + * @param et exceptions thrown by this method. + * @param lw the number of local words used by the bytecode of this method + * @param ow the number of operand words used by the bytecode of this method + * @param bc the bytecodes of this method + * @param eMap the exception handler map for this method + * @param lm the line number map for this method + * @param constantPool the constantPool for this method + * @param sig generic type of this method. + * @param annotations array of runtime visible annotations + * @param parameterAnnotations array of runtime visible paramter annotations + * @param ad annotation default value for that appears in annotation classes + */ + public VM_NormalMethod(VM_TypeReference dc, VM_MemberReference mr, + short mo, VM_TypeReference[] et, short lw, short ow, byte[] bc, + VM_ExceptionHandlerMap eMap, int[] lm, + int[] constantPool, VM_Atom sig, + VM_Annotation[] annotations, + VM_Annotation[] parameterAnnotations, + Object ad) + { + super(dc, mr, mo, et, sig, annotations, parameterAnnotations, ad); + localWords = lw; + operandWords = ow; + bytecodes = bc; + exceptionHandlerMap = eMap; + lineNumberMap = lm; + computeSummary(constantPool); + } + + /** + * Generate the code for this method + */ + protected VM_CompiledMethod genCode() throws VerifyError { + // The byte code verifier is dead; needs replacement. +// if (VM.VerifyBytecode) { +// VM_Verifier verifier = new VM_Verifier(); +// verifier.verifyMethod(this); +// } + + if (VM.writingBootImage) { + return VM_BootImageCompiler.compile(this); + } else { + return VM_RuntimeCompiler.compile(this); + } + } + + /** + * Space required by this method for its local variables, in words. + * Note: local variables include parameters + */ + @Uninterruptible + public int getLocalWords() { + return localWords; + } + + /** + * Space required by this method for its operand stack, in words. + */ + @Uninterruptible + public int getOperandWords() { + return operandWords; + } + + /** + * Get a representation of the bytecodes in the code attribute of this method. + * @return object representing the bytecodes + */ + public VM_BytecodeStream getBytecodes() { + return new VM_BytecodeStream(this, bytecodes); + } + + /** + * Fill in DynamicLink object for the invoke at the given bytecode index + * @param dynamicLink the dynamicLink object to initialize + * @param bcIndex the bcIndex of the invoke instruction + */ + @Uninterruptible + public void getDynamicLink(VM_DynamicLink dynamicLink, int bcIndex) { + if (VM.VerifyAssertions) VM._assert(bytecodes != null); + if (VM.VerifyAssertions) VM._assert(bcIndex + 2 < bytecodes.length); + int bytecode = bytecodes[bcIndex] & 0xFF; + if (VM.VerifyAssertions) VM._assert((VM_BytecodeConstants.JBC_invokevirtual <= bytecode) + && (bytecode <= VM_BytecodeConstants.JBC_invokeinterface)); + int constantPoolIndex = ((bytecodes[bcIndex + 1] & 0xFF) << BITS_IN_BYTE) | (bytecodes[bcIndex + 2] & 0xFF); + dynamicLink.set(getDeclaringClass().getMethodRef(constantPoolIndex), bytecode); + } + + /** + * Size of bytecodes for this method + */ + public int getBytecodeLength() { + return bytecodes.length; + } + + /** + * Exceptions caught by this method. + * @return info (null --> method doesn't catch any exceptions) + */ + @Uninterruptible + public VM_ExceptionHandlerMap getExceptionHandlerMap() { + return exceptionHandlerMap; + } + + /** + * Return the line number information for the argument bytecode index. + * @return The line number, a positive integer. Zero means unable to find. + */ + @Uninterruptible + public int getLineNumberForBCIndex(int bci) { + if (lineNumberMap == null) return 0; + int idx; + for (idx = 0; idx<lineNumberMap.length; idx++) { + int pc = lineNumberMap[idx] & 0xffff; // lower 16 bits are bcIndex + if (bci < pc) { + if (idx == 0) idx++; // add 1, so we can subtract 1 below. + break; + } + } + return lineNumberMap[--idx] >>> 16; // upper 16 bits are line number + } + + // Extra methods for on-stack replacement + // VM_BaselineCompiler and OPT_BC2IR should check if a method is + // for specialization by calling isForOsrSpecialization, the compiler + // uses synthesized bytecodes (prologue + original bytecodes) for + // OSRing method. Other interfaces of method are not changed, therefore, + // dynamic linking and gc referring to bytecodes are safe. + + /** + * Checks if the method is in state for OSR specialization now + * @return true, if it is (with prologue) + */ + public boolean isForOsrSpecialization() { + return this.synthesizedBytecodes != null; + } + + /** + * Sets method in state for OSR specialization, i.e, the subsequent calls + * of {@link #getBytecodes} return the stream of specialized bytecodes. + * + * NB: between flag and action, it should not allow GC or threadSwitch happen. + * @param prologue The bytecode of prologue + * @param newStackHeight The prologue may change the default height of + * stack + */ + public void setForOsrSpecialization(byte[] prologue, short newStackHeight) { + if (VM.VerifyAssertions) VM._assert(this.synthesizedBytecodes == null); + + byte[] newBytecodes = new byte[prologue.length + bytecodes.length]; + System.arraycopy(prologue, 0, newBytecodes, 0, prologue.length); + System.arraycopy(bytecodes, 0, newBytecodes, prologue.length, bytecodes.length); + + this.osrPrologue = prologue; + this.synthesizedBytecodes = newBytecodes; + this.savedOperandWords = operandWords; + if (newStackHeight > operandWords) + this.operandWords = newStackHeight; + } + + /** + * Restores the original state of the method. + */ + public void finalizeOsrSpecialization() { + if (VM.VerifyAssertions) VM._assert(this.synthesizedBytecodes != null); + this.synthesizedBytecodes = null; + this.osrPrologue = null; + this.operandWords = savedOperandWords; + } + + /** + * Returns the OSR prologue length for adjusting various tables and maps. + * @return the length of prologue if the method is in state for OSR, + * 0 otherwise. + */ + public int getOsrPrologueLength() { + return isForOsrSpecialization()?this.osrPrologue.length:0; + } + + /** + * Returns a bytecode stream of osr prologue + * @return osr prologue bytecode stream + */ + public VM_BytecodeStream getOsrPrologue() { + if (VM.VerifyAssertions) VM._assert(synthesizedBytecodes != null); + return new VM_BytecodeStream(this, osrPrologue); + } + + /** + * Returns the synthesized bytecode stream with osr prologue + * @return bytecode stream + */ + public VM_BytecodeStream getOsrSynthesizedBytecodes() { + if (VM.VerifyAssertions) VM._assert(synthesizedBytecodes != null); + return new VM_BytecodeStream(this, synthesizedBytecodes); + } + + + /* + * Methods to access and compute method summary information + */ + + /** + * @return An estimate of the expected size of the machine code instructions + * that will be generated by the opt compiler if the method is inlined. + */ + public int inlinedSizeEstimate() { + return summarySize & 0xFFFF; + } + + /** + * @return true if the method contains a VM_Magic.xxx or Address.yyy + */ + public boolean hasMagic() { + return (summaryFlags & HAS_MAGIC) != 0; + } + + /** + * @return true if the method contains a monitorenter/exit or is synchronized + */ + public boolean hasSynch() { + return (summaryFlags & HAS_SYNCH) != 0; + } + + /** + * @return true if the method contains an allocation + */ + public boolean hasAllocation() { + return (summaryFlags & HAS_ALLOCATION) != 0; + } + + /** + * @return true if the method contains an athrow + */ + public boolean hasThrow() { + return (summaryFlags & HAS_THROW) != 0; + } + + /** + * @return true if the method contains an invoke + */ + public boolean hasInvoke() { + return (summaryFlags & HAS_INVOKE) != 0; + } + + /** + * @return true if the method contains a getfield or getstatic + */ + public boolean hasFieldRead() { + return (summaryFlags & HAS_FIELD_READ) != 0; + } + + /** + * @return true if the method contains a putfield or putstatic + */ + public boolean hasFieldWrite() { + return (summaryFlags & HAS_FIELD_WRITE) != 0; + } + + /** + * @return true if the method contains an array load + */ + public boolean hasArrayRead() { + return (summaryFlags & HAS_ARRAY_READ) != 0; + } + + /** + * @return true if the method contains an array store + */ + public boolean hasArrayWrite() { + return (summaryFlags & HAS_ARRAY_WRITE) != 0; + } + + /** + * @return true if the method contains a jsr + */ + public boolean hasJSR() { + return (summaryFlags & HAS_JSR) != 0; + } + + /** + * @return true if the method contains a conditional branch + */ + public boolean hasCondBranch() { + return (summaryFlags & HAS_COND_BRANCH) != 0; + } + + /** + * @return true if the method contains a switch + */ + public boolean hasSwitch() { + return (summaryFlags & HAS_SWITCH) != 0; + } + + /** + * @return true if the method contains a backwards branch + */ + public boolean hasBackwardsBranch() { + return (summaryFlags & HAS_BACK_BRANCH) != 0; + } + + /** + * @return true if the method is the implementation of a runtime service + * that is called "under the covers" from the generated code and thus is not subject to + * inlining via the normal mechanisms. + */ + public boolean isRuntimeServiceMethod() { + return (summaryFlags & IS_RS_METHOD) != 0; + } + + /** + * Set the value of the 'runtime service method' flag to the argument + * value. A method is considered to be a runtime service method if it + * is only/primarialy invoked "under the covers" from the generated code + * and thus is not subject to inlining via the normal mechanisms. + * For example, the implementations of bytecodes such as new or checkcast + * or the implementation of yieldpoints. + * @param value true if this is a runtime service method, false it is not. + */ + public void setRuntimeServiceMethod(boolean value) { + if (value) { + summaryFlags |= IS_RS_METHOD; + } else { + summaryFlags &= ~IS_RS_METHOD; + } + } + + /** + * @return true if the method may write to a given field + */ + public boolean mayWrite(VM_Field field) { + if (!hasFieldWrite()) return false; + VM_FieldReference it = field.getMemberRef().asFieldReference(); + VM_BytecodeStream bcodes = getBytecodes(); + while (bcodes.hasMoreBytecodes()) { + int opcode = bcodes.nextInstruction(); + if (opcode == JBC_putstatic || opcode == JBC_putfield) { + VM_FieldReference fr = bcodes.getFieldReference(); + if (!fr.definitelyDifferent(it)) return true; + } else { + bcodes.skipInstruction(); + } + } + return false; + } + + /** + * This method computes a summary of interesting method characteristics + * and stores an encoding of the summary as an int. + */ + protected void computeSummary(int[] constantPool) { + int calleeSize = 0; + if (isSynchronized()) { + summaryFlags |= HAS_SYNCH; + calleeSize += 2*SYNCH_COST; // NOTE: ignoring catch/unlock/rethrow block. Probably the right thing to do. + } + + VM_BytecodeStream bcodes = getBytecodes(); + while (bcodes.hasMoreBytecodes()) { + switch (bcodes.nextInstruction()) { + // Array loads: null check, bounds check, index computation, load + case JBC_iaload:case JBC_laload:case JBC_faload:case JBC_daload: + case JBC_aaload:case JBC_baload:case JBC_caload:case JBC_saload: + summaryFlags |= HAS_ARRAY_READ; + calleeSize += ARRAY_LOAD_COST; + break; + + // Array stores: null check, bounds check, index computation, load + case JBC_iastore:case JBC_lastore:case JBC_fastore: + case JBC_dastore:case JBC_bastore:case JBC_castore:case JBC_sastore: + summaryFlags |= HAS_ARRAY_WRITE; + calleeSize += ARRAY_STORE_COST; + break; + case JBC_aastore: + summaryFlags |= HAS_ARRAY_WRITE; + calleeSize += ARRAY_STORE_COST + STORE_CHECK_COST; + break; + + // primitive computations (likely to be very cheap) + case JBC_iadd:case JBC_fadd:case JBC_dadd:case JBC_isub: + case JBC_fsub:case JBC_dsub:case JBC_imul:case JBC_fmul: + case JBC_dmul:case JBC_idiv:case JBC_fdiv:case JBC_ddiv: + case JBC_irem:case JBC_frem:case JBC_drem:case JBC_ineg: + case JBC_fneg:case JBC_dneg:case JBC_ishl:case JBC_ishr: + case JBC_lshr:case JBC_iushr:case JBC_iand:case JBC_ior: + case JBC_ixor:case JBC_iinc: + calleeSize += SIMPLE_OPERATION_COST; + break; + + // long computations may be different cost than primitive computations + case JBC_ladd:case JBC_lsub:case JBC_lmul:case JBC_ldiv: + case JBC_lrem:case JBC_lneg:case JBC_lshl:case JBC_lushr: + case JBC_land:case JBC_lor:case JBC_lxor: + calleeSize += LONG_OPERATION_COST; + break; + + // Some conversion operations are very cheap + case JBC_int2byte:case JBC_int2char:case JBC_int2short: + calleeSize += SIMPLE_OPERATION_COST; + break; + // Others are a little more costly + case JBC_i2l:case JBC_l2i: + calleeSize += LONG_OPERATION_COST; + break; + // Most are roughly as expensive as a call + case JBC_i2f:case JBC_i2d:case JBC_l2f:case JBC_l2d: + case JBC_f2i:case JBC_f2l:case JBC_f2d:case JBC_d2i: + case JBC_d2l:case JBC_d2f: + calleeSize += CALL_COST; + break; + + // approximate compares as 1 simple operation + case JBC_lcmp:case JBC_fcmpl:case JBC_fcmpg:case JBC_dcmpl: + case JBC_dcmpg: + calleeSize += SIMPLE_OPERATION_COST; + break; + + // most control flow is cheap; jsr is more expensive + case JBC_ifeq:case JBC_ifne:case JBC_iflt:case JBC_ifge: + case JBC_ifgt:case JBC_ifle:case JBC_if_icmpeq:case JBC_if_icmpne: + case JBC_if_icmplt:case JBC_if_icmpge:case JBC_if_icmpgt: + case JBC_if_icmple:case JBC_if_acmpeq:case JBC_if_acmpne: + case JBC_ifnull:case JBC_ifnonnull: + summaryFlags |= HAS_COND_BRANCH; + if (bcodes.getBranchOffset() < 0) summaryFlags |= HAS_BACK_BRANCH; + calleeSize += SIMPLE_OPERATION_COST; + continue; // we've processed all of the bytes, so avoid the call to skipInstruction() + case JBC_goto: + if (bcodes.getBranchOffset() < 0) summaryFlags |= HAS_BACK_BRANCH; + calleeSize += SIMPLE_OPERATION_COST; + continue; // we've processed all of the bytes, so avoid the call to skipInstruction() + case JBC_goto_w: + if (bcodes.getWideBranchOffset() < 0) summaryFlags |= HAS_BACK_BRANCH; + calleeSize += SIMPLE_OPERATION_COST; + continue; // we've processed all of the bytes, so avoid the call to skipInstruction() + case JBC_jsr:case JBC_jsr_w: + summaryFlags |= HAS_JSR; + calleeSize += JSR_COST; + break; + + case JBC_tableswitch:case JBC_lookupswitch: + summaryFlags |= HAS_SWITCH; + calleeSize += SWITCH_COST; + break; + + case JBC_putstatic: case JBC_putfield: + summaryFlags |= HAS_FIELD_WRITE; + calleeSize += SIMPLE_OPERATION_COST; + break; + + case JBC_getstatic: case JBC_getfield: + summaryFlags |= HAS_FIELD_READ; + calleeSize += SIMPLE_OPERATION_COST; + break; + + // Various flavors of calls. Assign them call cost (differentiate?) + case JBC_invokevirtual:case JBC_invokespecial: + case JBC_invokestatic: + // Special case VM_Magic's as being cheaper. + VM_MethodReference meth = bcodes.getMethodReference(constantPool); + if (meth.getType().isMagicType()) { + summaryFlags |= HAS_MAGIC; + calleeSize += MAGIC_COST; + } else { + summaryFlags |= HAS_INVOKE; + calleeSize += CALL_COST; + } + continue; // we've processed all of the bytes, so avoid the call to skipInstruction() + + case JBC_invokeinterface: + summaryFlags |= HAS_INVOKE; + calleeSize += CALL_COST; + break; + + case JBC_xxxunusedxxx: + if (VM.VerifyAssertions) VM._assert(VM.NOT_REACHED); + break; + + case JBC_new: case JBC_newarray: case JBC_anewarray: + summaryFlags |= HAS_ALLOCATION; + calleeSize += ALLOCATION_COST; + break; + + case JBC_arraylength: + calleeSize += SIMPLE_OPERATION_COST; + break; + + case JBC_athrow: + summaryFlags |= HAS_THROW; + calleeSize += THROW_COST; + break; + + case JBC_checkcast:case JBC_instanceof: + calleeSize += CLASS_CHECK_COST; + break; + + case JBC_monitorenter:case JBC_monitorexit: + summaryFlags |= HAS_SYNCH; + calleeSize += SYNCH_COST; + break; + + case JBC_multianewarray: + summaryFlags |= HAS_ALLOCATION; + calleeSize += CALL_COST; + break; + } + bcodes.skipInstruction(); + } + if (calleeSize > Character.MAX_VALUE) { + summarySize = Character.MAX_VALUE; + } else { + summarySize = (char)calleeSize; + } + } + + /** + * Create an optimizing compiler HIR code generator for this type of + * method + * @param context the generation context for the HIR generation + * @return a HIR generator + */ + public OPT_HIRGenerator createHIRGenerator(OPT_GenerationContext context){ + return new OPT_BC2IR(context); + } + + /** + * Must this method be OPT compiled? + * @param context the generation context for the HIR generation + * @return a HIR generator + */ + public boolean optCompileOnly() { + return false; + } +} Added: ext/org/jikesrvm/opt/OPT_Simplifier.java =================================================================== --- ext/org/jikesrvm/opt/OPT_Simplifier.java (rev 0) +++ ext/org/jikesrvm/opt/OPT_Simplifier.java 2007-03-20 15:49:03 UTC (rev 6) @@ -0,0 +1,3160 @@ +/* + *... [truncated message content] |
From: <cap...@us...> - 2007-03-21 15:19:48
|
Revision: 8 http://svn.sourceforge.net/pearcolator/?rev=8&view=rev Author: captain5050 Date: 2007-03-21 08:19:39 -0700 (Wed, 21 Mar 2007) Log Message: ----------- Fixes to get DBT_Trace(s) compiled properly by the runtime compiler Modified Paths: -------------- build.xml ext/DBT_Dummy.java src/org/binarytranslator/vmInterface/DBT_Trace.java Added Paths: ----------- ext/org/jikesrvm/VM_RuntimeCompiler.java ext/org/jikesrvm/opt/ir/OPT_ConvertBCtoHIR.java Modified: build.xml =================================================================== --- build.xml 2007-03-21 15:14:00 UTC (rev 7) +++ build.xml 2007-03-21 15:19:39 UTC (rev 8) @@ -18,6 +18,7 @@ <delete verbose="true"> <fileset dir="${build.classes}"> <include name="org/jikesrvm/opt/ir/ia32/OPT_IA32ConditionOperand.class" /> + <include name="org/jikesrvm/opt/ir/OPT_ConvertBCtoHIR.class" /> <include name="org/jikesrvm/opt/ir/OPT_HIRGenerator.class" /> <include name="org/jikesrvm/opt/ir/OPT_GenerationContext.class" /> <include name="org/jikesrvm/opt/ir/OPT_ConditionOperand.class" /> @@ -27,6 +28,7 @@ <include name="org/jikesrvm/ppc/PPC_Disassembler.class" /> <include name="org/jikesrvm/ppc/opcode_tab.class" /> <include name="org/jikesrvm/ppc/opcodeXX.class" /> + <include name="org/jikesrvm/VM_RuntimeCompiler.class" /> <include name="org/jikesrvm/classloader/VM_Method.class" /> <include name="org/jikesrvm/classloader/VM_Member.class" /> <include name="org/jikesrvm/classloader/VM_NormalMethod.class" /> Modified: ext/DBT_Dummy.java =================================================================== --- ext/DBT_Dummy.java 2007-03-21 15:14:00 UTC (rev 7) +++ ext/DBT_Dummy.java 2007-03-21 15:19:39 UTC (rev 8) @@ -22,4 +22,6 @@ static org.jikesrvm.classloader.VM_Method j; static org.jikesrvm.classloader.VM_Member k; static org.jikesrvm.classloader.VM_NormalMethod l; + static org.jikesrvm.VM_RuntimeCompiler m; + static org.jikesrvm.opt.ir.OPT_ConvertBCtoHIR n; } Added: ext/org/jikesrvm/VM_RuntimeCompiler.java =================================================================== --- ext/org/jikesrvm/VM_RuntimeCompiler.java (rev 0) +++ ext/org/jikesrvm/VM_RuntimeCompiler.java 2007-03-21 15:19:39 UTC (rev 8) @@ -0,0 +1,773 @@ +/* + * This file is part of Jikes RVM (http://jikesrvm.sourceforge.net). + * The Jikes RVM project is distributed under the Common Public License (CPL). + * A copy of the license is included in the distribution, and is also + * available at http://www.opensource.org/licenses/cpl1.0.php + * + * (C) Copyright IBM Corp. 2001, 2005 + */ +package org.jikesrvm; + +import org.jikesrvm.classloader.*; +import org.jikesrvm.opt.*; +import org.jikesrvm.adaptive.*; +import org.jikesrvm.ArchitectureSpecific.VM_JNICompiler; + +/** + * Harness to select which compiler to dynamically + * compile a method in first invocation. + * + * A place to put code common to all runtime compilers. + * This includes instrumentation code to get equivalent data for + * each of the runtime compilers. + * <p> + * We collect the following data for each compiler + * <ol> + * <li> + * total number of methods complied by the compiler + * <li> + * total compilation time in milliseconds. + * <li> + * total number of bytes of bytecodes compiled by the compiler + * (under the assumption that there is no padding in the bytecode + * array and thus VM_Method.getBytecodes().length is the number bytes + * of bytecode for a method) + * <li> + * total number of machine code insructions generated by the compiler + * (under the assumption that there is no (excessive) padding in the + * machine code array and thus VM_CompiledMethod.numberOfInsturctions() + * is a close enough approximation of the number of machinecodes generated) + * </ol> + * Note that even if 3. & 4. are inflated due to padding, the numbers will + * still be an accurate measure of the space costs of the compile-only + * approach. + * + * @author Matthew Arnold + * @author Dave Grove + * @author Michael Hind + */ +public class VM_RuntimeCompiler implements VM_Constants, + VM_Callbacks.ExitMonitor { + + // Use these to encode the compiler for record() + public static final byte JNI_COMPILER = 0; + public static final byte BASELINE_COMPILER = 1; + public static final byte OPT_COMPILER = 2; + + // Data accumulators + private static final String[] name = {"JNI\t","Base\t","Opt\t"}; // Output names + private static int[] totalMethods = {0,0,0}; + private static double[] totalCompTime = {0,0,0}; + private static int[] totalBCLength = {0,0,0}; + private static int[] totalMCLength = {0,0,0}; + + // running sum of the natural logs of the rates, + // used for geometric mean, the product of rates is too big for doubles + // so we use the principle of logs to help us + // We compute e ** ((log a + log b + ... + log n) / n ) + private static double[] totalLogOfRates = {0,0,0}; + + // We can't record values until Math.log is loaded, so we miss the first few + private static int[] totalLogValueMethods = {0,0,0}; + + private static String[] earlyOptArgs = new String[0]; + + // is the opt compiler usable? + protected static boolean compilerEnabled; + + // is opt compiler currently in use? + // This flag is used to detect/avoid recursive opt compilation. + // (ie when opt compilation causes a method to be compiled). + // We also make all public entrypoints static synchronized methods + // because the opt compiler is not reentrant. + // When we actually fix defect 2912, we'll have to implement a different + // scheme that can distinguish between recursive opt compilation by the same + // thread (always bad) and parallel opt compilation (currently bad, future ok). + // NOTE: This code can be quite subtle, so please be absolutely sure + // you know what you're doing before modifying it!!! + protected static boolean compilationInProgress; + + // One time check to optionally preload and compile a specified class + protected static boolean preloadChecked = false; + + // Cache objects needed to cons up compilation plans + // TODO: cutting link to opt compiler by declaring type as object. + public static Object /* OPT_Options */ options; + public static Object /* OPT_OptimizationPlanElement[] */ optimizationPlan; + + /** + * To be called when the VM is about to exit. + * @param value the exit value + */ + public void notifyExit(int value) { + report(false); + } + + /** + * This method records the time and sizes (bytecode and machine code) for + * a compilation. + * @param compiler the compiler used + * @param method the resulting VM_Method + * @param compiledMethod the resulting compiled method + */ + public static void record(byte compiler, + VM_NormalMethod method, + VM_CompiledMethod compiledMethod) { + + recordCompilation(compiler, method.getBytecodeLength(), + compiledMethod.numberOfInstructions(), + compiledMethod.getCompilationTime()); + + if (VM.BuildForAdaptiveSystem) { + if (VM_AOSLogging.booted()) { + VM_AOSLogging.recordUpdatedCompilationRates(compiler, + method, + method.getBytecodeLength(), + totalBCLength[compiler], + compiledMethod.numberOfInstructions(), + totalMCLength[compiler], + compiledMethod.getCompilationTime(), + totalCompTime[compiler], + totalLogOfRates[compiler], + totalLogValueMethods[compiler], + totalMethods[compiler]); + } + } + } + + /** + * This method records the time and sizes (bytecode and machine code) for + * a compilation + * @param compiler the compiler used + * @param method the resulting VM_Method + * @param compiledMethod the resulting compiled method + */ + public static void record(byte compiler, + VM_NativeMethod method, + VM_CompiledMethod compiledMethod) { + + + recordCompilation(compiler, + 0, // don't have any bytecode info, its native + compiledMethod.numberOfInstructions(), + compiledMethod.getCompilationTime()); + } + + /** + * This method does the actual recording + * @param compiler the compiler used + * @param BCLength the number of bytecodes in method source + * @param MCLength the length of the generated machine code + * @param compTime the compilation time in ms + */ + private static void recordCompilation(byte compiler, + int BCLength, + int MCLength, + double compTime) { + + totalMethods[compiler]++; + totalMCLength[compiler] += MCLength; + totalCompTime[compiler] += compTime; + + // Comp rate not useful for JNI compiler because there is no bytecode! + if (compiler != JNI_COMPILER) { + totalBCLength[compiler] += BCLength; + double rate = BCLength / compTime; + + // need to be fully booted before calling log + if (VM.fullyBooted) { + // we want the geometric mean, but the product of rates is too big + // for doubles, so we use the principle of logs to help us + // We compute e ** ((log a + log b + ... + log n) / n ) + totalLogOfRates[compiler] += Math.log(rate); + totalLogValueMethods[compiler]++; + } + } + } + + /** + * This method produces a summary report of compilation activities + * @param explain Explains the metrics used in the report + */ + public static void report (boolean explain) { + VM.sysWrite("\n\t\tCompilation Subsystem Report\n"); + VM.sysWrite("Comp\t#Meths\tTime\tbcb/ms\tmcb/bcb\tMCKB\tBCKB\n"); + for (int i=0; i<=name.length-1; i++) { + if (totalMethods[i]>0) { + VM.sysWrite(name[i]); + // Number of methods + VM.sysWrite(totalMethods[i]); + VM.sysWrite("\t"); + // Compilation time + VM.sysWrite(totalCompTime[i]); + VM.sysWrite("\t"); + + if (i == JNI_COMPILER) { + VM.sysWrite("NA"); + } else { + // Bytecode bytes per millisecond, + // use unweighted geomean + VM.sysWrite(Math.exp(totalLogOfRates[i] / totalLogValueMethods[i]), 2); + } + VM.sysWrite("\t"); + // Ratio of machine code bytes to bytecode bytes + if (i != JNI_COMPILER) { + VM.sysWrite((double)(totalMCLength[i] << ArchitectureSpecific.VM_RegisterConstants.LG_INSTRUCTION_WIDTH)/(double)totalBCLength[i], 2); + } else { + VM.sysWrite("NA"); + } + VM.sysWrite("\t"); + // Generated machine code Kbytes + VM.sysWrite((double)(totalMCLength[i] << ArchitectureSpecific.VM_RegisterConstants.LG_INSTRUCTION_WIDTH)/1024, 1); + VM.sysWrite("\t"); + // Compiled bytecode Kbytes + if (i != JNI_COMPILER) { + VM.sysWrite((double)totalBCLength[i]/1024, 1); + } else { + VM.sysWrite("NA"); + } + VM.sysWrite("\n"); + } + } + if (explain) { + // Generate an explanation of the metrics reported + VM.sysWrite("\t\t\tExplanation of Metrics\n"); + VM.sysWrite("#Meths:\t\tTotal number of methods compiled by the compiler\n"); + VM.sysWrite("Time:\t\tTotal compilation time in milliseconds\n"); + VM.sysWrite("bcb/ms:\t\tNumber of bytecode bytes complied per millisecond\n"); + VM.sysWrite("mcb/bcb:\tRatio of machine code bytes to bytecode bytes\n"); + VM.sysWrite("MCKB:\t\tTotal number of machine code bytes generated in kilobytes\n"); + VM.sysWrite("BCKB:\t\tTotal number of bytecode bytes compiled in kilobytes\n"); + } + + VM_BaselineCompiler.generateBaselineCompilerSubsystemReport(explain); + + if (VM.BuildForAdaptiveSystem) { + // Get the opt's report + VM_TypeReference theTypeRef = VM_TypeReference.findOrCreate(VM_BootstrapClassLoader.getBootstrapClassLoader(), + VM_Atom.findOrCreateAsciiAtom("Lorg/jikesrvm/opt/OPT_OptimizationPlanner;")); + VM_Type theType = theTypeRef.peekResolvedType(); + if (theType != null && theType.asClass().isInitialized()) { + OPT_OptimizationPlanner.generateOptimizingCompilerSubsystemReport(explain); + } else { + VM.sysWrite("\n\tNot generating Optimizing Compiler SubSystem Report because \n"); + VM.sysWrite("\tthe opt compiler was never invoked.\n\n"); + } + } + } + + /** + * Return the current estimate of basline-compiler rate, in bcb/msec + */ + public static double getBaselineRate() { + return Math.exp(totalLogOfRates[BASELINE_COMPILER] / totalLogValueMethods[BASELINE_COMPILER]); + } + + /** + * This method will compile the passed method using the baseline compiler. + * @param method the method to compile + */ + public static VM_CompiledMethod baselineCompile(VM_NormalMethod method) { + VM_Callbacks.notifyMethodCompile(method, VM_CompiledMethod.BASELINE); + long start = 0; + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + start = VM_Thread.getCurrentThread().accumulateCycles(); + } + + VM_CompiledMethod cm = VM_BaselineCompiler.compile(method); + + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + long end = VM_Thread.getCurrentThread().accumulateCycles(); + double compileTime = VM_Time.cyclesToMillis(end - start); + cm.setCompilationTime(compileTime); + record(BASELINE_COMPILER, method, cm); + } + + return cm; + } + + /** + * Process command line argument destined for the opt compiler + */ + public static void processOptCommandLineArg(String prefix, String arg) { + if (VM.BuildForAdaptiveSystem) { + if (compilerEnabled) { + if (((OPT_Options)options).processAsOption(prefix, arg)) { + // update the optimization plan to reflect the new command line argument + optimizationPlan = OPT_OptimizationPlanner.createOptimizationPlan((OPT_Options)options); + } else { + VM.sysWrite("Unrecognized opt compiler argument \""+arg+"\""); + VM.sysExit(VM.EXIT_STATUS_BOGUS_COMMAND_LINE_ARG); + } + } else { + String[] tmp = new String[earlyOptArgs.length+2]; + for (int i=0; i<earlyOptArgs.length; i++) { + tmp[i] = earlyOptArgs[i]; + } + earlyOptArgs = tmp; + earlyOptArgs[earlyOptArgs.length-2] = prefix; + earlyOptArgs[earlyOptArgs.length-1] = arg; + } + } else { + if (VM.VerifyAssertions) VM._assert(NOT_REACHED); + } + } + + /** + * attempt to compile the passed method with the OPT_Compiler. + * Don't handle OPT_OptimizingCompilerExceptions + * (leave it up to caller to decide what to do) + * Precondition: compilationInProgress "lock" has been acquired + * @param method the method to compile + * @param plan the plan to use for compiling the method + */ + private static VM_CompiledMethod optCompile(VM_NormalMethod method, + OPT_CompilationPlan plan) + throws OPT_OptimizingCompilerException { + if (VM.BuildForOptCompiler) { + if (VM.VerifyAssertions) { + VM._assert(compilationInProgress, "Failed to acquire compilationInProgress \"lock\""); + } + + VM_Callbacks.notifyMethodCompile(method, VM_CompiledMethod.JNI); + long start = 0; + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + start = VM_Thread.getCurrentThread().accumulateCycles(); + } + + VM_CompiledMethod cm = OPT_Compiler.compile(plan); + + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + long end = VM_Thread.getCurrentThread().accumulateCycles(); + double compileTime = VM_Time.cyclesToMillis(end - start); + cm.setCompilationTime(compileTime); + record(OPT_COMPILER, method, cm); + } + + return cm; + } else { + if (VM.VerifyAssertions) VM._assert(false); + return null; + } + } + + + // These methods are safe to invoke from VM_RuntimeCompiler.compile + + /** + * This method tries to compile the passed method with the OPT_Compiler, + * using the default compilation plan. If + * this fails we will use the quicker compiler (baseline for now) + * The following is carefully crafted to avoid (infinte) recursive opt + * compilation for all combinations of bootimages & lazy/eager compilation. + * Be absolutely sure you know what you're doing before changing it !!! + * @param method the method to compile + */ + public static synchronized VM_CompiledMethod optCompileWithFallBack(VM_NormalMethod method) { + if (VM.BuildForOptCompiler) { + if (compilationInProgress) { + return fallback(method); + } else { + try { + compilationInProgress = true; + OPT_CompilationPlan plan = new OPT_CompilationPlan(method, (OPT_OptimizationPlanElement[])optimizationPlan, null, (OPT_Options)options); + return optCompileWithFallBackInternal(method, plan); + } finally { + compilationInProgress = false; + } + } + } else { + if (VM.VerifyAssertions) VM._assert(false); + return null; + } + } + + /** + * This method tries to compile the passed method with the OPT_Compiler + * with the passed compilation plan. If + * this fails we will use the quicker compiler (baseline for now) + * The following is carefully crafted to avoid (infinte) recursive opt + * compilation for all combinations of bootimages & lazy/eager compilation. + * Be absolutely sure you know what you're doing before changing it !!! + * @param method the method to compile + * @param plan the compilation plan to use for the compile + */ + public static synchronized VM_CompiledMethod optCompileWithFallBack(VM_NormalMethod method, + OPT_CompilationPlan plan) { + if (VM.BuildForOptCompiler) { + if (compilationInProgress) { + return fallback(method); + } else { + try { + compilationInProgress = true; + return optCompileWithFallBackInternal(method, plan); + } finally { + compilationInProgress = false; + } + } + } else { + if (VM.VerifyAssertions) VM._assert(false); + return null; + } + } + + /** + * This real method that performs the opt compilation. + * @param method the method to compile + * @param plan the compilation plan to use + */ + private static VM_CompiledMethod optCompileWithFallBackInternal(VM_NormalMethod method, + OPT_CompilationPlan plan) { + if (VM.BuildForOptCompiler) { + if (method.hasNoOptCompileAnnotation()) return fallback(method); + try { + return optCompile(method, plan); + } catch (OPT_OptimizingCompilerException e) { + String msg = "VM_RuntimeCompiler: can't optimize \"" + method + "\" (error was: " + e + "): reverting to baseline compiler\n"; + if (e.isFatal && VM.ErrorsFatal) { + e.printStackTrace(); + VM.sysFail(msg); + } else { + boolean printMsg = true; + if (e instanceof OPT_MagicNotImplementedException) { + printMsg = !((OPT_MagicNotImplementedException)e).isExpected; + } + if (printMsg) VM.sysWrite(msg); + } + return fallback(method); + } + } else { + if (VM.VerifyAssertions) VM._assert(false); + return null; + } + } + + + /* recompile the specialized method with OPT_Compiler. */ + public static VM_CompiledMethod recompileWithOptOnStackSpecialization(OPT_CompilationPlan plan) { + if (VM.BuildForOptCompiler) { + if (VM.VerifyAssertions) { VM._assert(plan.method.isForOsrSpecialization());} + if (compilationInProgress) { + return null; + } + + try { + compilationInProgress = true; + + // the compiler will check if isForOsrSpecialization of the method + VM_CompiledMethod cm = optCompile(plan.method, plan); + + // we donot replace the compiledMethod of original method, + // because it is temporary method + return cm; + } catch (OPT_OptimizingCompilerException e) { + e.printStackTrace(); + String msg = "Optimizing compiler " + +"(via recompileWithOptOnStackSpecialization): " + +"can't optimize \"" + plan.method + "\" (error was: " + e + ")\n"; + + if (e.isFatal && VM.ErrorsFatal) { + VM.sysFail(msg); + } else { + VM.sysWrite(msg); + } + return null; + } finally { + compilationInProgress = false; + } + } else { + if (VM.VerifyAssertions) VM._assert(false); + return null; + } + } + + /** + * This method tries to compile the passed method with the OPT_Compiler. + * It will install the new compiled method in the VM, if sucessful. + * NOTE: the recompile method should never be invoked via + * VM_RuntimeCompiler.compile; + * it does not have sufficient guards against recursive recompilation. + * @param plan the compilation plan to use + * @return the CMID of the new method if successful, -1 if the + * recompilation failed. + * + **/ + public static synchronized int recompileWithOpt(OPT_CompilationPlan plan) { + if (VM.BuildForOptCompiler) { + if (compilationInProgress) { + return -1; + } else { + try { + compilationInProgress = true; + VM_CompiledMethod cm = optCompile(plan.method, plan); + try { + plan.method.replaceCompiledMethod(cm); + } catch (Throwable e) { + String msg = "Failure in VM_Method.replaceCompiledMethod (via recompileWithOpt): while replacing \"" + plan.method + "\" (error was: " + e + ")\n"; + if (VM.ErrorsFatal) { + e.printStackTrace(); + VM.sysFail(msg); + } else { + VM.sysWrite(msg); + } + return -1; + } + return cm.getId(); + } catch (OPT_OptimizingCompilerException e) { + String msg = "Optimizing compiler (via recompileWithOpt): can't optimize \"" + plan.method + "\" (error was: " + e + ")\n"; + if (e.isFatal && VM.ErrorsFatal) { + e.printStackTrace(); + VM.sysFail(msg); + } else { + // VM.sysWrite(msg); + } + return -1; + } finally { + compilationInProgress = false; + } + } + } else { + if (VM.VerifyAssertions) VM._assert(false); + return -1; + } + } + + /** + * A wrapper method for those callers who don't want to make + * optimization plans + * @param method the method to recompile + */ + public static int recompileWithOpt(VM_NormalMethod method) { + if (VM.BuildForOptCompiler) { + OPT_CompilationPlan plan = new OPT_CompilationPlan(method, + (OPT_OptimizationPlanElement[])optimizationPlan, + null, + (OPT_Options)options); + return recompileWithOpt(plan); + } else { + if (VM.VerifyAssertions) VM._assert(false); + return -1; + } + } + + /** + * This method uses the default compiler (baseline) to compile a method + * It is typically called when a more aggressive compilation fails. + * This method is safe to invoke from VM_RuntimeCompiler.compile + */ + protected static VM_CompiledMethod fallback(VM_NormalMethod method) { + // call the inherited method "baselineCompile" + return baselineCompile(method); + } + + public static void boot() { + if (VM.MeasureCompilation) { + VM_Callbacks.addExitMonitor(new VM_RuntimeCompiler()); + } + if (VM.BuildForAdaptiveSystem) { + options = new OPT_Options(); + optimizationPlan = OPT_OptimizationPlanner.createOptimizationPlan((OPT_Options)options); + if (VM.MeasureCompilation) { + OPT_OptimizationPlanner.initializeMeasureCompilation(); + } + + OPT_Compiler.init((OPT_Options)options); + + VM_PreCompile.init(); + // when we reach here the OPT compiler is enabled. + compilerEnabled = true; + + for (int i=0; i<earlyOptArgs.length; i+=2) { + processOptCommandLineArg(earlyOptArgs[i], earlyOptArgs[i+1]); + } + } + } + + public static void processCommandLineArg(String prefix, String arg) { + if (VM.BuildForAdaptiveSystem) { + if (VM_Controller.options !=null && VM_Controller.options.optIRC()) { + processOptCommandLineArg(prefix, arg); + } else { + VM_BaselineCompiler.processCommandLineArg(prefix, arg); + } + } else { + VM_BaselineCompiler.processCommandLineArg(prefix, arg); + } + } + + /** + * Compile a Java method when it is first invoked. + * @param method the method to compile + * @return its compiled method. + */ + public static VM_CompiledMethod compile(VM_NormalMethod method) { + if (VM.BuildForAdaptiveSystem) { + VM_CompiledMethod cm; + if (!VM_Controller.enabled) { + // System still early in boot process; compile with baseline compiler + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } else { + if (!preloadChecked) { + preloadChecked = true; // prevent subsequent calls + // N.B. This will use irc options + if (VM_BaselineCompiler.options.PRELOAD_CLASS != null) { + compilationInProgress = true; // use baseline during preload + // Other than when boot options are requested (processed during preloadSpecialClass + // It is hard to communicate options for these special compilations. Use the + // default options and at least pick up the verbose if requested for base/irc + OPT_Options tmpoptions = (OPT_Options)((OPT_Options)options).clone(); + tmpoptions.PRELOAD_CLASS = VM_BaselineCompiler.options.PRELOAD_CLASS; + tmpoptions.PRELOAD_AS_BOOT = VM_BaselineCompiler.options.PRELOAD_AS_BOOT; + if (VM_BaselineCompiler.options.PRINT_METHOD) { + tmpoptions.PRINT_METHOD = true; + } else { + tmpoptions = (OPT_Options)options; + } + OPT_Compiler.preloadSpecialClass(tmpoptions); + compilationInProgress = false; + } + } + if (VM_Controller.options.optIRC() || method.optCompileOnly()) { + if (// will only run once: don't bother optimizing + method.isClassInitializer() || + // exception in progress. can't use opt compiler: + // it uses exceptions and runtime doesn't support + // multiple pending (undelivered) exceptions [--DL] + VM_Thread.getCurrentThread().hardwareExceptionRegisters.inuse) { + // compile with baseline compiler + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } else { // compile with opt compiler + VM_AOSInstrumentationPlan instrumentationPlan = + new VM_AOSInstrumentationPlan(VM_Controller.options, method); + OPT_CompilationPlan compPlan = + new OPT_CompilationPlan(method, (OPT_OptimizationPlanElement[])optimizationPlan, + instrumentationPlan, (OPT_Options)options); + if(!method.optCompileOnly()) { + cm = optCompileWithFallBack(method, compPlan); + } + else { + compilationInProgress = true; + try { + cm = optCompile(method, compPlan); + } catch (OPT_OptimizingCompilerException e) { + String msg = "Optimizing compiler " + +"(on method that can only be optimizing compiler compiled): " + +"can't optimize \"" + method + "\""; + throw new Error(msg, e); + } finally { + compilationInProgress = false; + } + } + } + } else { + if ((VM_Controller.options.BACKGROUND_RECOMPILATION + && (!VM_Controller.options.ENABLE_REPLAY_COMPILE) + && (!VM_Controller.options.ENABLE_PRECOMPILE)) + ) { + // must be an inital compilation: compile with baseline compiler + // or if recompilation with OSR. + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } else { + if (VM_CompilerAdviceAttribute.hasAdvice()) { + VM_CompilerAdviceAttribute attr = + VM_CompilerAdviceAttribute.getCompilerAdviceInfo(method); + if (attr.getCompiler() != VM_CompiledMethod.OPT) { + cm=fallback(method); + VM_AOSLogging.recordCompileTime(cm, 0.0); + return cm; + } + int newCMID = -2; + OPT_CompilationPlan compPlan; + if (VM_Controller.options.counters()) { + // for invocation counter, we only use one optimization level + compPlan = VM_InvocationCounts.createCompilationPlan(method); + } else { + // for now there is not two options for sampling, so + // we don't have to use: if (VM_Controller.options.sampling()) + compPlan = VM_Controller.recompilationStrategy.createCompilationPlan(method, attr.getOptLevel(), null); + } + VM_AOSLogging.recompilationStarted(compPlan); + newCMID = recompileWithOpt(compPlan); + cm = newCMID == -1 ? null : VM_CompiledMethods.getCompiledMethod(newCMID); + if (newCMID == -1) { + VM_AOSLogging.recompilationAborted(compPlan); + } else if (newCMID > 0) { + VM_AOSLogging.recompilationCompleted(compPlan); + } + if (cm == null) { // if recompilation is aborted + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } + } else { + // check to see if there is a compilation plan for this method. + VM_ControllerPlan plan = VM_ControllerMemory.findLatestPlan(method); + if (plan == null || plan.getStatus() != VM_ControllerPlan.IN_PROGRESS) { + // initial compilation or some other funny state: compile with baseline compiler + cm = baselineCompile(method); + VM_ControllerMemory.incrementNumBase(); + } else { + cm = plan.doRecompile(); + if (cm == null) { + // opt compilation aborted for some reason. + cm = baselineCompile(method); + } + } + } + } + } + } + if ((VM_Controller.options.ENABLE_ADVICE_GENERATION) + && (cm.getCompilerType() == VM_CompiledMethod.BASELINE) + && VM_Controller.enabled) { + VM_AOSGenerator.baseCompilationCompleted(cm); + } + VM_AOSLogging.recordCompileTime(cm, 0.0); + return cm; + } else { + return baselineCompile(method); + } + } + + /** + * Compile the stub for a native method when it is first invoked. + * @param method the method to compile + * @return its compiled method. + */ + public static VM_CompiledMethod compile(VM_NativeMethod method) { + VM_Callbacks.notifyMethodCompile(method, VM_CompiledMethod.JNI); + long start = 0; + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + start = VM_Thread.getCurrentThread().accumulateCycles(); + } + + VM_CompiledMethod cm = VM_JNICompiler.compile(method); + if (VM.verboseJNI) { + VM.sysWriteln("[Dynamic-linking native method " + + method.getDeclaringClass() + "." + method.getName() + + " "+method.getDescriptor()); + } + + if (VM.MeasureCompilation || VM.BuildForAdaptiveSystem) { + long end = VM_Thread.getCurrentThread().accumulateCycles(); + double compileTime = VM_Time.cyclesToMillis(end - start); + cm.setCompilationTime(compileTime); + record(JNI_COMPILER, method, cm); + } + + return cm; + } + + /** + * returns the string version of compiler number, using the naming scheme + * in this file + * @param compiler the compiler of interest + * @return the string version of compiler number + */ + public static String getCompilerName(byte compiler) { + return name[compiler]; + } + +} Added: ext/org/jikesrvm/opt/ir/OPT_ConvertBCtoHIR.java =================================================================== --- ext/org/jikesrvm/opt/ir/OPT_ConvertBCtoHIR.java (rev 0) +++ ext/org/jikesrvm/opt/ir/OPT_ConvertBCtoHIR.java 2007-03-21 15:19:39 UTC (rev 8) @@ -0,0 +1,54 @@ +/* + * This file is part of Jikes RVM (http://jikesrvm.sourceforge.net). + * The Jikes RVM project is distributed under the Common Public License (CPL). + * A copy of the license is included in the distribution, and is also + * available at http://www.opensource.org/licenses/cpl1.0.php + * + * (C) Copyright IBM Corp. 2001 + */ +package org.jikesrvm.opt.ir; + +import org.jikesrvm.opt.*; + +/** + * Translate from bytecodes to HIR + * + * @author Dave Grove + */ +public final class OPT_ConvertBCtoHIR extends OPT_CompilerPhase { + + public String getName () { + return "Generate HIR"; + } + + /** + * Generate HIR for ir.method into ir + * + * @param ir The IR to generate HIR into + */ + public void perform (OPT_IR ir) { + // Generate the cfg into gc + OPT_GenerationContext gc = + new OPT_GenerationContext(ir.method, ir.compiledMethod, + ir.options, ir.inlinePlan); + ir.method.createHIRGenerator(gc).generateHIR(); + // Transfer HIR and misc state from gc to the ir object + ir.gc = gc; + ir.cfg = gc.cfg; + ir.regpool = gc.temps; + if (gc.allocFrame) { + ir.stackManager.forceFrameAllocation(); + } + // ir now contains well formed HIR. + ir.IRStage = OPT_IR.HIR; + ir.HIRInfo = new OPT_HIRInfo(ir); + if (OPT_IR.SANITY_CHECK) { + ir.verify("Initial HIR", true); + } + } + + // This phase contains no instance fields. + public OPT_CompilerPhase newExecution (OPT_IR ir) { + return this; + } +} Modified: src/org/binarytranslator/vmInterface/DBT_Trace.java =================================================================== --- src/org/binarytranslator/vmInterface/DBT_Trace.java 2007-03-21 15:14:00 UTC (rev 7) +++ src/org/binarytranslator/vmInterface/DBT_Trace.java 2007-03-21 15:19:39 UTC (rev 8) @@ -161,4 +161,10 @@ VM_CompiledMethods.setCompiledMethodObsolete(oldCompiledMethod); } } + /** + * Map bytecode index to java source line number + */ + public int getLineNumberForBCIndex(int bci) { + return bci; + } } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <cap...@us...> - 2007-04-23 09:23:23
|
Revision: 82 http://svn.sourceforge.net/pearcolator/?rev=82&view=rev Author: captain5050 Date: 2007-04-23 02:23:22 -0700 (Mon, 23 Apr 2007) Log Message: ----------- Support for GDB auxv packet Modified Paths: -------------- src/org/binarytranslator/arch/arm/os/process/linux/ARM_LinuxProcessSpace.java src/org/binarytranslator/arch/ppc/os/process/linux/PPC_LinuxProcessSpace.java src/org/binarytranslator/arch/x86/os/process/linux/X86_LinuxProcessSpace.java src/org/binarytranslator/generic/execution/GdbController.java tests/gcc_simple/x86/helloworld Modified: src/org/binarytranslator/arch/arm/os/process/linux/ARM_LinuxProcessSpace.java =================================================================== --- src/org/binarytranslator/arch/arm/os/process/linux/ARM_LinuxProcessSpace.java 2007-04-22 17:03:27 UTC (rev 81) +++ src/org/binarytranslator/arch/arm/os/process/linux/ARM_LinuxProcessSpace.java 2007-04-23 09:23:22 UTC (rev 82) @@ -32,6 +32,11 @@ */ private int brk; + /** + * Auxiliary vector + */ + private int[] auxVector; + public ARM_LinuxProcessSpace() { sysCallGenerator = new Legacy(this); sysCalls = new ARM_LinuxSystemCalls(sysCallGenerator); @@ -52,7 +57,7 @@ this.brk = brk; // initialize the stack - int[] auxVector = {//LinuxStackInitializer.AuxiliaryVectorType.AT_SYSINFO, 0xffffe400, + auxVector = new int[]{//LinuxStackInitializer.AuxiliaryVectorType.AT_SYSINFO, 0xffffe400, //LinuxStackInitializer.AuxiliaryVectorType.AT_SYSINFO_EHDR, 0xffffe000, LinuxStackInitializer.AuxiliaryVectorType.AT_HWCAP, 0x78bfbff, LinuxStackInitializer.AuxiliaryVectorType.AT_PAGESZ, 0x1000, @@ -80,4 +85,7 @@ return null; } + public int[] getAuxVector() { + return auxVector; + } } Modified: src/org/binarytranslator/arch/ppc/os/process/linux/PPC_LinuxProcessSpace.java =================================================================== --- src/org/binarytranslator/arch/ppc/os/process/linux/PPC_LinuxProcessSpace.java 2007-04-22 17:03:27 UTC (rev 81) +++ src/org/binarytranslator/arch/ppc/os/process/linux/PPC_LinuxProcessSpace.java 2007-04-23 09:23:22 UTC (rev 82) @@ -50,6 +50,11 @@ private static final int STACK_TOP = 0x80000000; /** + * Auxiliary vector + */ + private int[] auxVector; + + /** * Constructor */ public PPC_LinuxProcessSpace(Loader loader) { @@ -77,7 +82,7 @@ * Initialise the stack */ private int initialiseStack(Loader loader, int pc) { - int[] auxVector = { + auxVector = new int[]{ LinuxStackInitializer.AuxiliaryVectorType.AT_IGNOREPPC, LinuxStackInitializer.AuxiliaryVectorType.AT_IGNOREPPC, LinuxStackInitializer.AuxiliaryVectorType.AT_IGNOREPPC, @@ -198,6 +203,10 @@ return this; } + public int[] getAuxVector() { + return auxVector; + } + public int getGDBFrameBaseRegister() { return -1; } Modified: src/org/binarytranslator/arch/x86/os/process/linux/X86_LinuxProcessSpace.java =================================================================== --- src/org/binarytranslator/arch/x86/os/process/linux/X86_LinuxProcessSpace.java 2007-04-22 17:03:27 UTC (rev 81) +++ src/org/binarytranslator/arch/x86/os/process/linux/X86_LinuxProcessSpace.java 2007-04-23 09:23:22 UTC (rev 82) @@ -30,6 +30,11 @@ final LinuxSystemCalls syscalls; /** + * Experimental support for the Linux sysinfo page (use to present sysenter and sysexit system call entry) + */ + private static final boolean useSysInfoPage = false; + + /** * Allows uniform access to the arguments of a system call. We cache this object for reuse. */ private final X86_LinuxSyscallArgumentIterator syscallArgs; @@ -45,6 +50,11 @@ private static final int STACK_TOP = 0xC0000000; /** + * Auxiliary vector + */ + private int[] auxVector; + + /** * Constructor */ public X86_LinuxProcessSpace(Loader loader) { @@ -63,40 +73,46 @@ registers.eip = pc; this.brk = brk; registers.writeGP32(X86_Registers.ESP, initialiseStack(loader, pc)); - try { - memory.map(0xffffe000, 8192, true, true, true); - } catch (MemoryMapException e) { - throw new Error ("Error creating VDSO page"); + if (useSysInfoPage) { + try { + memory.map(0xffffe000, 8192, true, true, true); + } catch (MemoryMapException e) { + throw new Error ("Error creating VDSO page"); + } + memory.store8(0xffffe400, 0xCD); // INT + memory.store8(0xffffe400, 0x80); // 80h + memory.store8(0xffffe400, 0xC3); // RET } - memory.store8(0xffffe400, 0xCD); // INT - memory.store8(0xffffe400, 0x80); // 80h - memory.store8(0xffffe400, 0xC3); // RET } /** * Initialise the stack */ private int initialiseStack(Loader loader, int pc) { - int[] auxVector = {//LinuxStackInitializer.AuxiliaryVectorType.AT_SYSINFO, 0xffffe400, - //LinuxStackInitializer.AuxiliaryVectorType.AT_SYSINFO_EHDR, 0xffffe000, - LinuxStackInitializer.AuxiliaryVectorType.AT_HWCAP, 0x78bfbff, - LinuxStackInitializer.AuxiliaryVectorType.AT_PAGESZ, 0x1000, - LinuxStackInitializer.AuxiliaryVectorType.AT_CLKTCK, 0x64, - LinuxStackInitializer.AuxiliaryVectorType.AT_PHDR, ((ELF_Loader)loader).getProgramHeaderAddress(), - LinuxStackInitializer.AuxiliaryVectorType.AT_PHNUM, ((ELF_Loader)loader).elfHeader.getNumberOfProgramSegmentHeaders(), - LinuxStackInitializer.AuxiliaryVectorType.AT_BASE, 0x0, - LinuxStackInitializer.AuxiliaryVectorType.AT_FLAGS, 0x0, - LinuxStackInitializer.AuxiliaryVectorType.AT_ENTRY, pc, + auxVector = new int[] { + LinuxStackInitializer.AuxiliaryVectorType.AT_HWCAP, 0x078bfbff, + LinuxStackInitializer.AuxiliaryVectorType.AT_PAGESZ, 0x1000, + LinuxStackInitializer.AuxiliaryVectorType.AT_CLKTCK, 0x64, + LinuxStackInitializer.AuxiliaryVectorType.AT_PHDR, ((ELF_Loader)loader).getProgramHeaderAddress(), + LinuxStackInitializer.AuxiliaryVectorType.AT_PHNUM, ((ELF_Loader)loader).elfHeader.getNumberOfProgramSegmentHeaders(), + LinuxStackInitializer.AuxiliaryVectorType.AT_BASE, 0x0, + LinuxStackInitializer.AuxiliaryVectorType.AT_FLAGS, 0x0, + LinuxStackInitializer.AuxiliaryVectorType.AT_ENTRY, pc, - LinuxStackInitializer.AuxiliaryVectorType.AT_UID, DBT_Options.UID, - LinuxStackInitializer.AuxiliaryVectorType.AT_EUID, DBT_Options.UID, - LinuxStackInitializer.AuxiliaryVectorType.AT_GID, DBT_Options.GID, - LinuxStackInitializer.AuxiliaryVectorType.AT_EGID, DBT_Options.GID, + LinuxStackInitializer.AuxiliaryVectorType.AT_UID, DBT_Options.UID, + LinuxStackInitializer.AuxiliaryVectorType.AT_EUID, DBT_Options.UID, + LinuxStackInitializer.AuxiliaryVectorType.AT_GID, DBT_Options.GID, + LinuxStackInitializer.AuxiliaryVectorType.AT_EGID, DBT_Options.GID, - LinuxStackInitializer.AuxiliaryVectorType.AT_SECURE, 0, - //LinuxStackInitializer.AuxiliaryVectorType.AT_PLATFORM, LinuxStackInitializer.AuxiliaryVectorType.STACK_TOP - getPlatformString().length, - LinuxStackInitializer.AuxiliaryVectorType.AT_NULL, 0x0}; + LinuxStackInitializer.AuxiliaryVectorType.AT_SECURE, 0, + //LinuxStackInitializer.AuxiliaryVectorType.AT_PLATFORM, LinuxStackInitializer.AuxiliaryVectorType.STACK_TOP - getPlatformString().length, + LinuxStackInitializer.AuxiliaryVectorType.AT_NULL, 0x0}; + if (useSysInfoPage) { +// LinuxStackInitializer.AuxiliaryVectorType.AT_SYSINFO, 0xffffe400, +// LinuxStackInitializer.AuxiliaryVectorType.AT_SYSINFO_EHDR, 0xffffe000, + } + return LinuxStackInitializer.stackInit(memory, STACK_TOP, getEnvironmentVariables(), auxVector); } @@ -149,29 +165,8 @@ public void setStackPtr(int ptr) {} - public int[] getAuxVector() { //ELF_Header header, ELF_ProgramHeaderTable programHeaders) { - /* - return new int[] { - 32, 0xffffe400, - 33, 0xffffe000, - ELF_Constants.AT_HWCAP, 0x78bfbff, - ELF_Constants.AT_PAGESZ, 0x1000, - ELF_Constants.AT_CLKTCK, 0x64, - ELF_Constants.AT_PHDR, header.e_phoff - programHeaders.getSegment(0).p_offset + programHeaders.getSegment(0).p_vaddr, - ELF_Constants.AT_PHNUM, header.e_phnum, - ELF_Constants.AT_BASE, 0x0, - ELF_Constants.AT_FLAGS, 0x0, - ELF_Constants.AT_ENTRY, header.e_entry, - ELF_Constants.AT_UID, ELF_Constants.UID, - ELF_Constants.AT_EUID, ELF_Constants.UID, - ELF_Constants.AT_GID, ELF_Constants.GID, - ELF_Constants.AT_EGID, ELF_Constants.GID, - ELF_Constants.AT_SECURE, 0, - ELF_Constants.AT_PLATFORM, ELF_Constants.STACK_TOP - getPlatformString().length, - ELF_Constants.AT_NULL, 0x0, - }; - */ - throw new Error("TODO"); + public int[] getAuxVector() { + return auxVector; } public byte[] getPlatformString() { Modified: src/org/binarytranslator/generic/execution/GdbController.java =================================================================== --- src/org/binarytranslator/generic/execution/GdbController.java 2007-04-22 17:03:27 UTC (rev 81) +++ src/org/binarytranslator/generic/execution/GdbController.java 2007-04-23 09:23:22 UTC (rev 82) @@ -13,6 +13,7 @@ import java.net.*; import org.binarytranslator.generic.fault.BadInstructionException; +import org.binarytranslator.generic.fault.SegmentationFault; import org.binarytranslator.generic.os.process.ProcessSpace; /** @@ -63,6 +64,11 @@ * Return the address of the current instruction. */ int getCurrentInstructionAddress(); + + /** + * Get the auxiliary vector + */ + int[] getAuxVector(); } @@ -480,6 +486,32 @@ ':', ':' })) { // GDB is telling us it will handle symbol queries for us - nice :-) replyOK(); + } else if (doesBufferMatch(2, new byte[] { 'P','a','r','t',':','a','u','x','v', + ':','r','e','a','d',':',':'})) { + String data = bufferToString(18, dataEnd); + int offset = Integer.parseInt(data.substring(0, data.indexOf(',')), 16); + int length = Integer.parseInt(data.substring(data.indexOf(',') + 1), 16); + int[] auxv = target.getAuxVector(); + byte[] auxv_asbytes = new byte[auxv.length * 4 * 2]; + for(int i=0; i < auxv.length; i++) { + auxv_asbytes[i*8+1] = intToHex (auxv[i] & 0xF); + auxv_asbytes[i*8+0] = intToHex((auxv[i] >> 4) & 0xF); + auxv_asbytes[i*8+3] = intToHex((auxv[i] >> 8) & 0xF); + auxv_asbytes[i*8+2] = intToHex((auxv[i] >> 12) & 0xF); + auxv_asbytes[i*8+5] = intToHex((auxv[i] >> 16) & 0xF); + auxv_asbytes[i*8+4] = intToHex((auxv[i] >> 20) & 0xF); + auxv_asbytes[i*8+7] = intToHex((auxv[i] >> 24) & 0xF); + auxv_asbytes[i*8+6] = intToHex((auxv[i] >> 28) & 0xF); + } + byte[] command = new byte[Math.min(length*2,auxv_asbytes.length-(offset*2))]; + for (int i=0; i < command.length; i++) { + command[i] = auxv_asbytes[offset*2+i]; + } + if (command.length > 0) { + sendCommand(command); + } else { + replyOK(); + } } else { // unrecognized query sendCommand(null); @@ -609,6 +641,10 @@ // report that a SIGILL halted the debugger byte command[] = { 'S', '0', '4' }; sendCommand(command); + } catch (SegmentationFault e) { + // report that a SIGSEGV halted the debugger + byte command[] = { 'S', '0', 'b' }; + sendCommand(command); } break; case 'c': @@ -634,6 +670,10 @@ // report that a SIGILL halted the debugger byte command[] = { 'S', '0', '4' }; sendCommand(command); + } catch (SegmentationFault e) { + // report that a SIGSEGV halted the debugger + byte command[] = { 'S', '0', 'b' }; + sendCommand(command); } break; case 'S': Modified: tests/gcc_simple/x86/helloworld =================================================================== (Binary files differ) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <cap...@us...> - 2007-06-11 16:00:26
|
Revision: 130 http://svn.sourceforge.net/pearcolator/?rev=130&view=rev Author: captain5050 Date: 2007-06-11 09:00:28 -0700 (Mon, 11 Jun 2007) Log Message: ----------- Fixes to get us building against the Jikes RVM SVN head again Modified Paths: -------------- ext/org/jikesrvm/compilers/opt/ia32/OPT_BURS_Helpers.java rvmroot.patch src/org/binarytranslator/vmInterface/DynamicCodeRunner.java Modified: ext/org/jikesrvm/compilers/opt/ia32/OPT_BURS_Helpers.java =================================================================== --- ext/org/jikesrvm/compilers/opt/ia32/OPT_BURS_Helpers.java 2007-06-11 15:48:25 UTC (rev 129) +++ ext/org/jikesrvm/compilers/opt/ia32/OPT_BURS_Helpers.java 2007-06-11 16:00:28 UTC (rev 130) @@ -1,10 +1,14 @@ /* - * This file is part of Jikes RVM (http://jikesrvm.sourceforge.net). - * The Jikes RVM project is distributed under the Common Public License (CPL). - * A copy of the license is included in the distribution, and is also - * available at http://www.opensource.org/licenses/cpl1.0.php + * This file is part of the Jikes RVM project (http://jikesrvm.org). * - * (C) Copyright IBM Corp. 2001 + * This file is licensed to You under the Common Public License (CPL); + * You may not use this file except in compliance with the License. You + * may obtain a copy of the License at + * + * http://www.opensource.org/licenses/cpl1.0.php + * + * See the COPYRIGHT.txt file distributed with this work for information + * regarding copyright ownership. */ package org.jikesrvm.compilers.opt.ia32; @@ -15,7 +19,6 @@ import org.jikesrvm.compilers.opt.OPT_DefUse; import org.jikesrvm.compilers.opt.OPT_OptimizingCompilerException; import org.jikesrvm.compilers.opt.ir.Binary; -import org.jikesrvm.compilers.opt.ir.BinaryAcc; import org.jikesrvm.compilers.opt.ir.CacheOp; import org.jikesrvm.compilers.opt.ir.Call; import org.jikesrvm.compilers.opt.ir.CondMove; @@ -26,11 +29,13 @@ import org.jikesrvm.compilers.opt.ir.MIR_Call; import org.jikesrvm.compilers.opt.ir.MIR_Compare; import org.jikesrvm.compilers.opt.ir.MIR_CompareExchange; +import org.jikesrvm.compilers.opt.ir.MIR_CompareExchange8B; import org.jikesrvm.compilers.opt.ir.MIR_CondBranch; import org.jikesrvm.compilers.opt.ir.MIR_CondMove; import org.jikesrvm.compilers.opt.ir.MIR_ConvertDW2QW; import org.jikesrvm.compilers.opt.ir.MIR_Divide; import org.jikesrvm.compilers.opt.ir.MIR_DoubleShift; +import org.jikesrvm.compilers.opt.ir.MIR_Lea; import org.jikesrvm.compilers.opt.ir.MIR_LowTableSwitch; import org.jikesrvm.compilers.opt.ir.MIR_Move; import org.jikesrvm.compilers.opt.ir.MIR_Multiply; @@ -63,6 +68,9 @@ import static org.jikesrvm.compilers.opt.ir.OPT_Operators.DOUBLE_CMPL; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.FLOAT_CMPL; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.GUARD_MOVE; +import static org.jikesrvm.compilers.opt.ir.OPT_Operators.LONG_SHL; +import static org.jikesrvm.compilers.opt.ir.OPT_Operators.LONG_SHR; +import static org.jikesrvm.compilers.opt.ir.OPT_Operators.LONG_USHR; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_ADC; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_ADD; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_AND; @@ -92,8 +100,12 @@ import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_IDIV; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_IMUL2; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_JCC; +import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_LEA; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_LOCK_CMPXCHG; +import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_LOCK_CMPXCHG8B; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_MOV; +import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_MOVSD; +import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_MOVSS; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_MOVSX__B; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_MOVZX__B; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_MUL; @@ -116,7 +128,6 @@ import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IA32_XOR; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.IR_PROLOGUE; import static org.jikesrvm.compilers.opt.ir.OPT_Operators.MIR_LOWTABLESWITCH; - import org.jikesrvm.compilers.opt.ir.OPT_Register; import org.jikesrvm.compilers.opt.ir.OPT_RegisterOperand; import org.jikesrvm.compilers.opt.ir.OPT_RegisterOperandEnumeration; @@ -166,6 +177,95 @@ } /** + * Create the MIR instruction given by operator from the Binary LIR operands + * @param operator the MIR operator + * @param s the instruction being replaced + * @param result the destination register/memory + * @param val1 the first operand + * @param val2 the second operand + */ + protected void EMIT_Commutative(OPT_Operator operator, OPT_Instruction s, OPT_Operand result, OPT_Operand val1, OPT_Operand val2) { + if(VM.VerifyAssertions) VM._assert(result.isRegister() || result.isMemory()); + // Swap operands to reduce chance of generating a move or to normalize + // constants into val2 + if (val2.similar(result) || val1.isConstant()) { + OPT_Operand temp = val1; + val1 = val2; + val2 = temp; + } + // Do we need to move prior to the operator - result = val1 + if (!result.similar(val1)) { + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, result.copy(), val1))); + } + EMIT(MIR_BinaryAcc.mutate(s, operator, result, val2)); + } + + /** + * Create the MIR instruction given by operator from the Binary LIR operands + * @param operator the MIR operator + * @param s the instruction being replaced + * @param result the destination register/memory + * @param val1 the first operand + * @param val2 the second operand + */ + protected void EMIT_NonCommutative(OPT_Operator operator, OPT_Instruction s, OPT_Operand result, OPT_Operand val1, OPT_Operand val2) { + if(VM.VerifyAssertions) VM._assert(result.isRegister() || result.isMemory()); + if (result.similar(val1)) { + // Straight forward case where instruction is already in accumulate form + EMIT(MIR_BinaryAcc.mutate(s, operator, result, val2)); + } + else if (!result.similar(val2)) { + // Move first operand to result and perform operator on result, if + // possible redundant moves should be remove by register allocator + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, result.copy(), val1))); + EMIT(MIR_BinaryAcc.mutate(s, operator, result, val2)); + } + else { + // Potential to clobber second operand during move to result. Use a + // temporary register to perform the operation and rely on register + // allocator to remove redundant moves + OPT_RegisterOperand temp = regpool.makeTemp(result); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, temp, val1))); + EMIT(MIR_BinaryAcc.mutate(s, operator, temp.copyRO(), val2)); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, result, temp.copyRO()))); + } + } + + /** + * Create the MIR instruction given by operator from the Binary LIR operands + * @param operator the MIR operator + * @param s the instruction being replaced + * @param result the destination register/memory + * @param value the first operand + */ + protected void EMIT_Unary(OPT_Operator operator, OPT_Instruction s, OPT_Operand result, OPT_Operand value) { + if(VM.VerifyAssertions) VM._assert(result.isRegister() || result.isMemory()); + // Do we need to move prior to the operator - result = val1 + if (!result.similar(value)) { + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, result.copy(), value))); + } + EMIT(MIR_UnaryAcc.mutate(s, operator, result)); + } + + /** + * Convert the given comparison with a boolean (int) value into a condition + * suitable for the carry flag + * @param x the value 1 (true) or 0 (false) + * @param cond either equal or not equal + * @return lower or higher equal + */ + protected static OPT_ConditionOperand BIT_TEST(int x, OPT_ConditionOperand cond) { + if (VM.VerifyAssertions) VM._assert((x==0)||(x==1)); + if (VM.VerifyAssertions) VM._assert(EQ_NE(cond)); + if ((x == 1 && cond.isEQUAL())|| + (x == 0 && cond.isNOT_EQUAL())) { + return OPT_ConditionOperand.LOWER(); + } else { + return OPT_ConditionOperand.HIGHER_EQUAL(); + } + } + + /** * Follow a chain of Move operations filtering back to a def * * @param use the place to start from @@ -351,8 +451,7 @@ return IA32_FLDL2T; } } - throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", - "unexpected 387 constant " + val); + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", "unexpected 387 constant " + val); } protected final OPT_IA32ConditionOperand COND(OPT_ConditionOperand op) { @@ -403,12 +502,15 @@ protected final OPT_Operand myFP1() { return new OPT_BURSManagedFPROperand(1); } + + protected final OPT_Register getST0() { + return getIR().regpool.getPhysicalRegisterSet().getST0(); + } /** * Move op into a register operand if it isn't one already. */ - private OPT_Operand asReg(OPT_Instruction s, OPT_Operator movop, - OPT_Operand op) { + private OPT_Operand asReg(OPT_Instruction s, OPT_Operator movop, OPT_Operand op) { if (op.isRegister()) { return op; } @@ -450,16 +552,12 @@ OPT_RegisterOperand hval = (OPT_RegisterOperand) op; OPT_RegisterOperand lval = new OPT_RegisterOperand(regpool .getSecondReg(hval.register), VM_TypeReference.Int); - EMIT(MIR_Move.create(IA32_MOV, new OPT_StackLocationOperand(true, - offset + 4, DW), hval)); - EMIT(MIR_Move.create(IA32_MOV, new OPT_StackLocationOperand(true, offset, - DW), lval)); + EMIT(MIR_Move.create(IA32_MOV, new OPT_StackLocationOperand(true, offset + 4, DW), hval)); + EMIT(MIR_Move.create(IA32_MOV, new OPT_StackLocationOperand(true, offset, DW), lval)); } else { OPT_LongConstantOperand val = LC(op); - EMIT(MIR_Move.create(IA32_MOV, new OPT_StackLocationOperand(true, - offset + 4, DW), IC(val.upper32()))); - EMIT(MIR_Move.create(IA32_MOV, new OPT_StackLocationOperand(true, offset, - DW), IC(val.lower32()))); + EMIT(MIR_Move.create(IA32_MOV, new OPT_StackLocationOperand(true, offset + 4, DW), IC(val.upper32()))); + EMIT(MIR_Move.create(IA32_MOV, new OPT_StackLocationOperand(true, offset, DW), IC(val.lower32()))); } } @@ -472,8 +570,7 @@ private OPT_MemoryOperand loadFromJTOC(Offset offset) { OPT_LocationOperand loc = new OPT_LocationOperand(offset); OPT_Operand guard = TG(); - return OPT_MemoryOperand.D(VM_Magic.getTocPointer().plus(offset), (byte) 4, - loc, guard); + return OPT_MemoryOperand.D(VM_Magic.getTocPointer().plus(offset), (byte) 4, loc, guard); } /* @@ -513,18 +610,25 @@ * @param s the instruction to expand * @param result the result operand * @param value the second operand + * @param signExtend should the value be sign or zero extended? */ protected final void INT_2LONG(OPT_Instruction s, OPT_RegisterOperand result, - OPT_Operand value) { +OPT_Operand value, boolean signExtend) { OPT_Register hr = result.register; OPT_Register lr = regpool.getSecondReg(hr); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(lr, - VM_TypeReference.Int), value)); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(hr, - VM_TypeReference.Int), - new OPT_RegisterOperand(lr, VM_TypeReference.Int))); - EMIT(MIR_BinaryAcc.create(IA32_SAR, new OPT_RegisterOperand(hr, - VM_TypeReference.Int), IC(31))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(lr, VM_TypeReference.Int), value))); + if (signExtend) { + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(hr, VM_TypeReference.Int), + new OPT_RegisterOperand(lr, VM_TypeReference.Int)))); + EMIT(MIR_BinaryAcc.mutate(s,IA32_SAR, + new OPT_RegisterOperand(hr, VM_TypeReference.Int), + IC(31))); + } else { + EMIT(MIR_Move.mutate(s, IA32_MOV, + new OPT_RegisterOperand(hr, VM_TypeReference.Int), + IC(0))); + } } /** @@ -537,15 +641,14 @@ * @param result the result operand * @param value the second operand */ - protected final void FPR_2INT(OPT_Instruction s, OPT_RegisterOperand result, - OPT_Operand value) { + protected final void FPR_2INT(OPT_Instruction s, OPT_RegisterOperand result, OPT_Operand value) { OPT_MemoryOperand M; // Step 1: Get value to be converted into myFP0 // and in 'strict' IEEE mode. if (value instanceof OPT_MemoryOperand) { // value is in memory, all we have to do is load it - EMIT(MIR_Move.create(IA32_FLD, myFP0(), value)); + EMIT(CPOS(s, MIR_Move.create(IA32_FLD, myFP0(), value))); } else { // sigh. value is an FP register. Unfortunately, // SPECjbb requires some 'strict' FP semantics. Naturally, we don't @@ -558,16 +661,16 @@ if (VM.VerifyAssertions) { VM._assert(value.similar(myFP0())); } - EMIT(MIR_Move.create(IA32_FSTP, MO_CONV(DW), value)); - EMIT(MIR_Move.create(IA32_FLD, myFP0(), MO_CONV(DW))); + EMIT(CPOS(s, MIR_Move.create(IA32_FSTP, MO_CONV(DW), value))); + EMIT(CPOS(s, MIR_Move.create(IA32_FLD, myFP0(), MO_CONV(DW)))); } else { - EMIT(MIR_Move.create(IA32_FMOV, MO_CONV(DW), value)); - EMIT(MIR_Move.create(IA32_FLD, myFP0(), MO_CONV(DW))); + EMIT(CPOS(s, MIR_Move.create(IA32_FMOV, MO_CONV(DW), value))); + EMIT(CPOS(s, MIR_Move.create(IA32_FLD, myFP0(), MO_CONV(DW)))); } } // FP Stack: myFP0 = value - EMIT(MIR_Move.create(IA32_FIST, MO_CONV(DW), myFP0())); + EMIT(CPOS(s, MIR_Move.create(IA32_FIST, MO_CONV(DW), myFP0()))); // MO_CONV now holds myFP0 converted to an integer (round-toward nearest) // FP Stack: myFP0 == value @@ -576,112 +679,93 @@ OPT_Register one = regpool.getInteger(); OPT_Register isPositive = regpool.getInteger(); OPT_Register isNegative = regpool.getInteger(); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(one, - VM_TypeReference.Int), IC(1))); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(isPositive, - VM_TypeReference.Int), IC(0))); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(isNegative, - VM_TypeReference.Int), IC(0))); - EMIT(MIR_Nullary.create(IA32_FLDZ, myFP0())); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(one, VM_TypeReference.Int), IC(1)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(isPositive, VM_TypeReference.Int), IC(0)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(isNegative, VM_TypeReference.Int), IC(0)))); + EMIT(CPOS(s, MIR_Nullary.create(IA32_FLDZ, myFP0()))); // FP Stack: myFP0 = 0.0; myFP1 = value - EMIT(MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP1())); + EMIT(CPOS(s, MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP1()))); // FP Stack: myFP0 = value - EMIT(MIR_CondMove.create(IA32_CMOV, - new OPT_RegisterOperand(isPositive, - VM_TypeReference.Int), - new OPT_RegisterOperand(one, - VM_TypeReference.Int), - OPT_IA32ConditionOperand.LLT())); - EMIT(MIR_CondMove.create(IA32_CMOV, - new OPT_RegisterOperand(isNegative, - VM_TypeReference.Int), - new OPT_RegisterOperand(one, - VM_TypeReference.Int), - OPT_IA32ConditionOperand.LGT())); + EMIT(CPOS(s, MIR_CondMove.create(IA32_CMOV, + new OPT_RegisterOperand(isPositive, VM_TypeReference.Int), + new OPT_RegisterOperand(one, VM_TypeReference.Int), + OPT_IA32ConditionOperand.LLT()))); + EMIT(CPOS(s, MIR_CondMove.create(IA32_CMOV, + new OPT_RegisterOperand(isNegative, VM_TypeReference.Int), + new OPT_RegisterOperand(one, VM_TypeReference.Int), + OPT_IA32ConditionOperand.LGT()))); - EMIT(MIR_Move.create(IA32_FILD, myFP0(), MO_CONV(DW))); + EMIT(CPOS(s, MIR_Move.create(IA32_FILD, myFP0(), MO_CONV(DW)))); // FP Stack: myFP0 = round(value), myFP1 = value // addee = 1 iff round(x) < x // subtractee = 1 iff round(x) > x OPT_Register addee = regpool.getInteger(); OPT_Register subtractee = regpool.getInteger(); - EMIT(MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP1())); + EMIT(CPOS(s, MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP1()))); // FP Stack: myFP0 = value - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(addee, - VM_TypeReference.Int), IC(0))); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(subtractee, - VM_TypeReference.Int), IC(0))); - EMIT(MIR_CondMove.create(IA32_CMOV, - new OPT_RegisterOperand(addee, - VM_TypeReference.Int), - new OPT_RegisterOperand(one, - VM_TypeReference.Int), - OPT_IA32ConditionOperand.LLT())); - EMIT(MIR_CondMove.create(IA32_CMOV, - new OPT_RegisterOperand(subtractee, - VM_TypeReference.Int), - new OPT_RegisterOperand(one, - VM_TypeReference.Int), - OPT_IA32ConditionOperand.LGT())); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(addee, VM_TypeReference.Int), IC(0)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(subtractee, VM_TypeReference.Int), IC(0)))); + EMIT(CPOS(s, MIR_CondMove.create(IA32_CMOV, + new OPT_RegisterOperand(addee, VM_TypeReference.Int), + new OPT_RegisterOperand(one, VM_TypeReference.Int), + OPT_IA32ConditionOperand.LLT()))); + EMIT(CPOS(s, MIR_CondMove.create(IA32_CMOV, + new OPT_RegisterOperand(subtractee, VM_TypeReference.Int), + new OPT_RegisterOperand(one, VM_TypeReference.Int), + OPT_IA32ConditionOperand.LGT()))); // Now a little tricky part. // We will add 1 iff isNegative and x > round(x) // We will subtract 1 iff isPositive and x < round(x) - EMIT(MIR_BinaryAcc.create(IA32_AND, - new OPT_RegisterOperand(addee, - VM_TypeReference.Int), - new OPT_RegisterOperand(isNegative, - VM_TypeReference.Int))); - EMIT(MIR_BinaryAcc.create(IA32_AND, - new OPT_RegisterOperand(subtractee, - VM_TypeReference.Int), - new OPT_RegisterOperand(isPositive, - VM_TypeReference.Int))); - EMIT(MIR_Move.create(IA32_MOV, result.copy(), MO_CONV(DW))); - EMIT(MIR_BinaryAcc.create(IA32_ADD, result.copy(), new OPT_RegisterOperand( - addee, VM_TypeReference.Int))); - EMIT(MIR_BinaryAcc.create(IA32_SUB, result.copy(), new OPT_RegisterOperand( - subtractee, VM_TypeReference.Int))); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_AND, + new OPT_RegisterOperand(addee, VM_TypeReference.Int), + new OPT_RegisterOperand(isNegative, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_AND, + new OPT_RegisterOperand(subtractee, VM_TypeReference.Int), + new OPT_RegisterOperand(isPositive, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, result.copy(), MO_CONV(DW)))); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_ADD, result.copy(), new OPT_RegisterOperand(addee, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_SUB, result.copy(), new OPT_RegisterOperand(subtractee, VM_TypeReference.Int)))); // Compare myFP0 with (double)Integer.MAX_VALUE - M = OPT_MemoryOperand.D(VM_Magic.getTocPointer().plus( - VM_Entrypoints.maxintField.getOffset()), QW, null, null); - EMIT(MIR_Move.create(IA32_FLD, myFP0(), M)); + M = OPT_MemoryOperand.D(VM_Magic.getTocPointer().plus(VM_Entrypoints.maxintField.getOffset()), QW, null, null); + EMIT(CPOS(s, MIR_Move.create(IA32_FLD, myFP0(), M))); // FP Stack: myFP0 = (double)Integer.MAX_VALUE; myFP1 = value - EMIT(MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP1())); + EMIT(CPOS(s, MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP1()))); // FP Stack: myFP0 = value // If MAX_VALUE < value, then result := MAX_INT OPT_Register maxInt = regpool.getInteger(); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(maxInt, - VM_TypeReference.Int), IC(Integer.MAX_VALUE))); - EMIT(MIR_CondMove.create(IA32_CMOV, result.copy(), new OPT_RegisterOperand( - maxInt, VM_TypeReference.Int), OPT_IA32ConditionOperand.LLT())); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(maxInt, VM_TypeReference.Int), IC(Integer.MAX_VALUE)))); + EMIT(CPOS(s, MIR_CondMove.create(IA32_CMOV, + result.copy(), + new OPT_RegisterOperand(maxInt, VM_TypeReference.Int), + OPT_IA32ConditionOperand.LLT()))); // Compare myFP0 with (double)Integer.MIN_VALUE - M = OPT_MemoryOperand.D(VM_Magic.getTocPointer().plus( - VM_Entrypoints.minintField.getOffset()), QW, null, null); - EMIT(MIR_Move.create(IA32_FLD, myFP0(), M)); + M = OPT_MemoryOperand.D(VM_Magic.getTocPointer().plus(VM_Entrypoints.minintField.getOffset()), QW, null, null); + EMIT(CPOS(s, MIR_Move.create(IA32_FLD, myFP0(), M))); // FP Stack: myFP0 = (double)Integer.MIN_VALUE; myFP1 = value - EMIT(MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP1())); + EMIT(CPOS(s, MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP1()))); // FP Stack: myFP0 = value // If MIN_VALUE > value, then result := MIN_INT OPT_Register minInt = regpool.getInteger(); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(minInt, - VM_TypeReference.Int), IC(Integer.MIN_VALUE))); - EMIT(MIR_CondMove.create(IA32_CMOV, result.copy(), new OPT_RegisterOperand( - minInt, VM_TypeReference.Int), OPT_IA32ConditionOperand.LGT())); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(minInt, VM_TypeReference.Int), IC(Integer.MIN_VALUE)))); + EMIT(CPOS(s, MIR_CondMove.create(IA32_CMOV, + result.copy(), + new OPT_RegisterOperand(minInt, VM_TypeReference.Int), + OPT_IA32ConditionOperand.LGT()))); // Set condition flags: set PE iff myFP0 is a NaN - EMIT(MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP0())); + EMIT(CPOS(s, MIR_Compare.create(IA32_FCOMIP, myFP0(), myFP0()))); // FP Stack: back to original level (all BURS managed slots freed) // If FP0 was classified as a NaN, then result := 0 OPT_Register zero = regpool.getInteger(); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(zero, - VM_TypeReference.Int), IC(0))); - EMIT(MIR_CondMove.create(IA32_CMOV, result.copy(), new OPT_RegisterOperand( - zero, VM_TypeReference.Int), OPT_IA32ConditionOperand.PE())); - + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(zero, VM_TypeReference.Int), IC(0)))); + EMIT(CPOS(s, MIR_CondMove.create(IA32_CMOV, + result.copy(), + new OPT_RegisterOperand(zero, VM_TypeReference.Int), + OPT_IA32ConditionOperand.PE()))); } /** @@ -690,15 +774,13 @@ protected final void FPR2GPR_64(OPT_Instruction s) { int offset = -burs.ir.stackManager.allocateSpaceForConversion(); OPT_StackLocationOperand sl = new OPT_StackLocationOperand(true, offset, QW); - OPT_StackLocationOperand sl1 = new OPT_StackLocationOperand(true, - offset + 4, DW); - OPT_StackLocationOperand sl2 = new OPT_StackLocationOperand(true, offset, - DW); - EMIT(MIR_Move.create(IA32_FMOV, sl, Unary.getVal(s))); + OPT_StackLocationOperand sl1 = new OPT_StackLocationOperand(true, offset + 4, DW); + OPT_StackLocationOperand sl2 = new OPT_StackLocationOperand(true, offset, DW); + EMIT(CPOS(s, MIR_Move.create(IA32_FMOV, sl, Unary.getVal(s)))); OPT_RegisterOperand i1 = Unary.getResult(s); OPT_RegisterOperand i2 = new OPT_RegisterOperand(regpool .getSecondReg(i1.register), VM_TypeReference.Int); - EMIT(MIR_Move.create(IA32_MOV, i1, sl1)); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, i1, sl1))); EMIT(MIR_Move.mutate(s, IA32_MOV, i2, sl2)); } @@ -708,28 +790,216 @@ protected final void GPR2FPR_64(OPT_Instruction s) { int offset = -burs.ir.stackManager.allocateSpaceForConversion(); OPT_StackLocationOperand sl = new OPT_StackLocationOperand(true, offset, QW); - OPT_StackLocationOperand sl1 = new OPT_StackLocationOperand(true, - offset + 4, DW); - OPT_StackLocationOperand sl2 = new OPT_StackLocationOperand(true, offset, - DW); + OPT_StackLocationOperand sl1 = new OPT_StackLocationOperand(true, offset + 4, DW); + OPT_StackLocationOperand sl2 = new OPT_StackLocationOperand(true, offset, DW); OPT_Operand i1, i2; OPT_Operand val = Unary.getVal(s); if (val instanceof OPT_RegisterOperand) { OPT_RegisterOperand rval = (OPT_RegisterOperand) val; i1 = val; - i2 = new OPT_RegisterOperand(regpool.getSecondReg(rval.register), - VM_TypeReference.Int); + i2 = new OPT_RegisterOperand(regpool.getSecondReg(rval.register), VM_TypeReference.Int); } else { OPT_LongConstantOperand rhs = (OPT_LongConstantOperand) val; i1 = IC(rhs.upper32()); i2 = IC(rhs.lower32()); } - EMIT(MIR_Move.create(IA32_MOV, sl1, i1)); - EMIT(MIR_Move.create(IA32_MOV, sl2, i2)); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, sl1, i1))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, sl2, i2))); EMIT(MIR_Move.mutate(s, IA32_FMOV, Unary.getResult(s), sl)); } + + /** + * Returns the appropriate move operator based on the type of operand. + */ + protected final OPT_Operator SSE2_MOVE(OPT_Operand o) { + return o.isFloat() ? IA32_MOVSS : IA32_MOVSD; + } + + /** + * Returns the size based on the type of operand. + */ + protected final byte SSE2_SIZE(OPT_Operand o) { + return o.isFloat() ? DW : QW; + } + + /** + * Performs a long -> double/float conversion using x87 and marshalls back to XMMs. + */ + protected final void SSE2_X87_FROMLONG(OPT_Instruction s) { + OPT_Operand result = Unary.getResult(s); + STORE_LONG_FOR_CONV(Unary.getVal(s)); + // conversion space allocated, contains the long to load. + int offset = -burs.ir.stackManager.allocateSpaceForConversion(); + OPT_StackLocationOperand sl = new OPT_StackLocationOperand(true, offset, SSE2_SIZE(result)); + OPT_RegisterOperand st0 = new OPT_RegisterOperand(getST0(), result.getType()); + EMIT(CPOS(s, MIR_Move.create(IA32_FILD, st0, sl))); + EMIT(CPOS(s, MIR_Move.create(IA32_FSTP, sl.copy(), st0.copyD2U()))); + EMIT(CPOS(s, MIR_Move.mutate(s, SSE2_MOVE(result), result, sl.copy()))); + } + + /** + * Performs a long -> double/float conversion using x87 and marshalls between to XMMs. + */ + protected final void SSE2_X87_REM(OPT_Instruction s) { + OPT_Operand result = Move.getClearResult(s); + OPT_RegisterOperand st0 = new OPT_RegisterOperand(getST0(), result.getType()); + int offset = -burs.ir.stackManager.allocateSpaceForConversion(); + OPT_StackLocationOperand sl = new OPT_StackLocationOperand(true, offset, SSE2_SIZE(result)); + EMIT(CPOS(s, MIR_Move.create(SSE2_MOVE(result), sl, Binary.getVal2(s)))); + EMIT(CPOS(s, MIR_Move.create(IA32_FLD, st0, sl.copy()))); + EMIT(CPOS(s, MIR_Move.create(SSE2_MOVE(result), sl.copy(), Binary.getVal1(s)))); + EMIT(CPOS(s, MIR_Move.create(IA32_FLD, st0.copy(), sl.copy()))); + EMIT(CPOS(s, MIR_Nullary.create(IA32_FPREM, st0.copy()))); + EMIT(CPOS(s, MIR_Move.create(IA32_FSTP, sl.copy(), st0.copy()))); + EMIT(MIR_Move.mutate(s, SSE2_MOVE(result), result, sl.copy())); + } + + /** + * Emit code to move 64 bits from SSE2 FPRs to GPRs + */ + protected final void SSE2_FPR2GPR_64(OPT_Instruction s) { + int offset = -burs.ir.stackManager.allocateSpaceForConversion(); + OPT_StackLocationOperand sl = new OPT_StackLocationOperand(true, offset, QW); + OPT_StackLocationOperand sl1 = new OPT_StackLocationOperand(true, offset + 4, DW); + OPT_StackLocationOperand sl2 = new OPT_StackLocationOperand(true, offset, DW); + EMIT(CPOS(s, MIR_Move.create(IA32_MOVSD, sl, Unary.getVal(s)))); + OPT_RegisterOperand i1 = Unary.getResult(s); + OPT_RegisterOperand i2 = new OPT_RegisterOperand(regpool + .getSecondReg(i1.register), VM_TypeReference.Int); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, i1, sl1))); + EMIT(MIR_Move.mutate(s, IA32_MOV, i2, sl2)); + } /** + * Emit code to move 64 bits from GPRs to SSE2 FPRs + */ + protected final void SSE2_GPR2FPR_64(OPT_Instruction s) { + int offset = -burs.ir.stackManager.allocateSpaceForConversion(); + OPT_StackLocationOperand sl = new OPT_StackLocationOperand(true, offset, QW); + OPT_StackLocationOperand sl1 = new OPT_StackLocationOperand(true, offset + 4, DW); + OPT_StackLocationOperand sl2 = new OPT_StackLocationOperand(true, offset, DW); + OPT_Operand i1, i2; + OPT_Operand val = Unary.getVal(s); + if (val instanceof OPT_RegisterOperand) { + OPT_RegisterOperand rval = (OPT_RegisterOperand) val; + i1 = val; + i2 = new OPT_RegisterOperand(regpool.getSecondReg(rval.register), VM_TypeReference.Int); + } else { + OPT_LongConstantOperand rhs = (OPT_LongConstantOperand) val; + i1 = IC(rhs.upper32()); + i2 = IC(rhs.lower32()); + } + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, sl1, i1))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, sl2, i2))); + EMIT(MIR_Move.mutate(s, IA32_MOVSD, Unary.getResult(s), sl)); + } + + /** + * Emit code to move 32 bits from FPRs to GPRs + */ + protected final void SSE2_FPR2GPR_32(OPT_Instruction s) { + int offset = -burs.ir.stackManager.allocateSpaceForConversion(); + OPT_StackLocationOperand sl = new OPT_StackLocationOperand(true, offset, DW); + EMIT(CPOS(s, MIR_Move.create(IA32_MOVSS, sl, Unary.getVal(s)))); + EMIT(MIR_Move.mutate(s, IA32_MOV, Unary.getResult(s), sl.copy())); + } + + /** + * Emit code to move 32 bits from GPRs to FPRs + */ + protected final void SSE2_GPR2FPR_32(OPT_Instruction s) { + int offset = -burs.ir.stackManager.allocateSpaceForConversion(); + OPT_StackLocationOperand sl = new OPT_StackLocationOperand(true, offset, DW); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, sl, Unary.getVal(s)))); + EMIT(MIR_Move.mutate(s, IA32_MOVSS, Unary.getResult(s), sl.copy())); + } + + /** + * BURS expansion of a commutative SSE2 operation. + */ + protected void SSE2_COP(OPT_Operator operator, OPT_Instruction s, OPT_Operand result, OPT_Operand val1, OPT_Operand val2) { + if(VM.VerifyAssertions) VM._assert(result.isRegister()); + // Swap operands to reduce chance of generating a move or to normalize + // constants into val2 + if (val2.similar(result)) { + OPT_Operand temp = val1; + val1 = val2; + val2 = temp; + } + // Do we need to move prior to the operator - result = val1 + if (!result.similar(val1)) { + EMIT(CPOS(s, MIR_Move.create(SSE2_MOVE(result), result.copy(), val1))); + } + EMIT(MIR_BinaryAcc.mutate(s, operator, result, val2)); + } + + /** + * BURS expansion of a non commutative SSE2 operation. + */ + protected void SSE2_NCOP(OPT_Operator operator, OPT_Instruction s, OPT_Operand result, OPT_Operand val1, OPT_Operand val2) { + if(VM.VerifyAssertions) VM._assert(result.isRegister()); + if (result.similar(val1)) { + // Straight forward case where instruction is already in accumulate form + EMIT(MIR_BinaryAcc.mutate(s, operator, result, val2)); + } + else if (!result.similar(val2)) { + // Move first operand to result and perform operator on result, if + // possible redundant moves should be remove by register allocator + EMIT(CPOS(s, MIR_Move.create(SSE2_MOVE(result), result.copy(), val1))); + EMIT(MIR_BinaryAcc.mutate(s, operator, result, val2)); + } + else { + // Potential to clobber second operand during move to result. Use a + // temporary register to perform the operation and rely on register + // allocator to remove redundant moves + OPT_RegisterOperand temp = regpool.makeTemp(result); + EMIT(CPOS(s, MIR_Move.create(SSE2_MOVE(result), temp, val1))); + EMIT(MIR_BinaryAcc.mutate(s, operator, temp.copyRO(), val2)); + EMIT(CPOS(s, MIR_Move.create(SSE2_MOVE(result), result, temp.copyRO()))); + } + } + + /** + * Expansion of SSE2 negation ops + */ + protected final void SSE2_NEG(OPT_Operator xorOp, OPT_Operator subOp, OPT_Instruction s, OPT_Operand result, OPT_Operand value) { + if(VM.VerifyAssertions) VM._assert(result.isRegister()); + if (!result.similar(value)) { + EMIT(CPOS(s, MIR_BinaryAcc.create(xorOp, result.copy(), result.copy()))); + EMIT(MIR_BinaryAcc.mutate(s, subOp, result, value)); + } else { + OPT_RegisterOperand temp = regpool.makeTemp(value.getType()); + EMIT(CPOS(s, MIR_Move.create(xorOp, temp.copyRO(), temp))); + EMIT(MIR_BinaryAcc.mutate(s, subOp, temp.copyRO(), value)); + EMIT(CPOS(s, MIR_Move.create(SSE2_MOVE(result), result, temp.copyRO()))); + } + } + + /** + * Expansion of SSE2 conversions double <-> float + */ + protected final void SSE2_CONV(OPT_Operator op, OPT_Instruction s, OPT_Operand result, OPT_Operand value) { + if(VM.VerifyAssertions) VM._assert(result.isRegister()); + if(VM.VerifyAssertions) VM._assert(value.isRegister()); + EMIT(MIR_Unary.mutate(s, op, result, value)); + } + + /** + * Expansion of SSE2 comparison operations + */ + protected final void SSE2_IFCMP(OPT_Operator op, OPT_Instruction s, OPT_Operand val1, OPT_Operand val2) { + EMIT(CPOS(s, MIR_Compare.create(op, val1, val2))); + EMIT(s); // OPT_ComplexLIR2MIRExpansion will handle rest of the work. + } + + /** + * Expansion of SSE2 floating point constant loads + */ + protected final void SSE2_FPCONSTANT(OPT_Instruction s) { + EMIT(MIR_Move.mutate(s, SSE2_MOVE(Unary.getResult(s)), Binary.getResult(s), MO_MC(s))); + } + + /** * Expansion of ROUND_TO_ZERO. * * @param s the instruction to expand @@ -741,16 +1011,16 @@ OPT_Operand jtoc = OPT_MemoryOperand.BD(PR, VM_Entrypoints.jtocField .getOffset(), DW, null, null); OPT_RegisterOperand regOp = regpool.makeTempInt(); - EMIT(MIR_Move.create(IA32_MOV, regOp, jtoc)); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, regOp, jtoc))); // Store the FPU Control Word to a JTOC slot - OPT_MemoryOperand M = OPT_MemoryOperand.BD(regOp.copyRO(), - VM_Entrypoints.FPUControlWordField.getOffset(), W, null, null); - EMIT(MIR_UnaryNoRes.create(IA32_FNSTCW, M)); + OPT_MemoryOperand M = + OPT_MemoryOperand.BD(regOp.copyRO(), VM_Entrypoints.FPUControlWordField.getOffset(), W, null, null); + EMIT(CPOS(s, MIR_UnaryNoRes.create(IA32_FNSTCW, M))); // Set the bits in the status word that control round to zero. // Note that we use a 32-bit and, even though we only care about the // low-order 16 bits - EMIT(MIR_BinaryAcc.create(IA32_OR, M.copy(), IC(0x00000c00))); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_OR, M.copy(), IC(0x00000c00)))); // Now store the result back into the FPU Control Word EMIT(MIR_Nullary.mutate(s, IA32_FLDCW, M.copy())); } @@ -764,196 +1034,315 @@ * @param val2 the second operand * @param isDiv true for div, false for rem */ - protected final void INT_DIVIDES(OPT_Instruction s, - OPT_RegisterOperand result, OPT_Operand val1, OPT_Operand val2, + protected final void INT_DIVIDES(OPT_Instruction s, OPT_RegisterOperand result, OPT_Operand val1, OPT_Operand val2, boolean isDiv) { - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(getEAX(), - VM_TypeReference.Int), val1)); - EMIT(MIR_ConvertDW2QW.create(IA32_CDQ, - new OPT_RegisterOperand(getEDX(), - VM_TypeReference.Int), - new OPT_RegisterOperand(getEAX(), - VM_TypeReference.Int))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(getEAX(), VM_TypeReference.Int), val1))); + EMIT(CPOS(s, MIR_ConvertDW2QW.create(IA32_CDQ, + new OPT_RegisterOperand(getEDX(), VM_TypeReference.Int), + new OPT_RegisterOperand(getEAX(), VM_TypeReference.Int)))); if (val2 instanceof OPT_IntConstantOperand) { OPT_RegisterOperand temp = regpool.makeTempInt(); - EMIT(MIR_Move.create(IA32_MOV, temp, val2)); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, temp, val2))); val2 = temp.copyRO(); } EMIT(MIR_Divide.mutate(s, IA32_IDIV, - new OPT_RegisterOperand(getEDX(), - VM_TypeReference.Int), - new OPT_RegisterOperand(getEAX(), - VM_TypeReference.Int), + new OPT_RegisterOperand(getEDX(), VM_TypeReference.Int), + new OPT_RegisterOperand(getEAX(), VM_TypeReference.Int), val2, GuardedBinary.getGuard(s))); if (isDiv) { - EMIT(MIR_Move.create(IA32_MOV, result.copyD2D(), new OPT_RegisterOperand( - getEAX(), VM_TypeReference.Int))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, result.copyD2D(), new OPT_RegisterOperand(getEAX(), VM_TypeReference.Int)))); } else { - EMIT(MIR_Move.create(IA32_MOV, result.copyD2D(), new OPT_RegisterOperand( - getEDX(), VM_TypeReference.Int))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, result.copyD2D(), new OPT_RegisterOperand(getEDX(), VM_TypeReference.Int)))); } } /** - * Expansion of LONG_ADD_ACC + * Expansion of LONG_ADD * * @param s the instruction to expand - * @param result the result/first operand - * @param value the second operand + * @param result the result operand + * @param value1 the first operand + * @param value2 the second operand */ protected final void LONG_ADD(OPT_Instruction s, OPT_RegisterOperand result, - OPT_Operand value) { + OPT_Operand value1, OPT_Operand value2) { + // The value of value1 should be identical to result, to avoid moves, and a + // register in the case of addition with a constant + if ((value2.similar(result)) || value1.isLongConstant()) { + OPT_Operand temp = value1; + value1 = value2; + value2 = temp; + } OPT_Register lhsReg = result.register; OPT_Register lowlhsReg = regpool.getSecondReg(lhsReg); - if (value instanceof OPT_RegisterOperand) { - OPT_Register rhsReg = ((OPT_RegisterOperand) value).register; - OPT_Register lowrhsReg = regpool.getSecondReg(rhsReg); - EMIT(MIR_BinaryAcc.create(IA32_ADD, - new OPT_RegisterOperand(lowlhsReg, - VM_TypeReference.Int), - new OPT_RegisterOperand(lowrhsReg, - VM_TypeReference.Int))); - EMIT(MIR_BinaryAcc.mutate(s, - IA32_ADC, - new OPT_RegisterOperand(lhsReg, - VM_TypeReference.Int), - new OPT_RegisterOperand(rhsReg, - VM_TypeReference.Int))); - } else { - OPT_LongConstantOperand rhs = (OPT_LongConstantOperand) value; - int low = rhs.lower32(); - int high = rhs.upper32(); + if (value1.isRegister() && value2.isRegister()) { + OPT_Register rhsReg1 = ((OPT_RegisterOperand) value1).register; + OPT_Register lowrhsReg1 = regpool.getSecondReg(rhsReg1); + OPT_Register rhsReg2 = ((OPT_RegisterOperand) value2).register; + OPT_Register lowrhsReg2 = regpool.getSecondReg(rhsReg2); + // Do we need to move prior to the add - result = value1 + if (!value1.similar(result)) { + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(lowrhsReg1, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(rhsReg1, VM_TypeReference.Int)))); + } + // Perform add - result += value2 + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_ADD, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(lowrhsReg2, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_BinaryAcc.mutate(s, IA32_ADC, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(rhsReg2, VM_TypeReference.Int)))); + } else if (value1.isRegister()){ + OPT_Register rhsReg1 = ((OPT_RegisterOperand) value1).register; + OPT_Register lowrhsReg1 = regpool.getSecondReg(rhsReg1); + OPT_LongConstantOperand rhs2 = (OPT_LongConstantOperand) value2; + int low = rhs2.lower32(); + int high = rhs2.upper32(); + // Do we need to move prior to the add - result = value1 + if (!value1.similar(result)) { + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(lowrhsReg1, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(rhsReg1, VM_TypeReference.Int)))); + } + // Perform add - result += value2 if (low == 0) { - EMIT(MIR_BinaryAcc.mutate(s, IA32_ADD, new OPT_RegisterOperand(lhsReg, - VM_TypeReference.Int), IC(high))); + EMIT(CPOS(s, MIR_BinaryAcc.mutate(s, IA32_ADD, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + IC(high)))); } else { - EMIT(MIR_BinaryAcc.create(IA32_ADD, new OPT_RegisterOperand(lowlhsReg, - VM_TypeReference.Int), IC(low))); - EMIT(MIR_BinaryAcc.mutate(s, IA32_ADC, new OPT_RegisterOperand(lhsReg, - VM_TypeReference.Int), IC(high))); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_ADD, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + IC(low)))); + EMIT(CPOS(s, MIR_BinaryAcc.mutate(s, IA32_ADC, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + IC(high)))); } + } else { + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", + "unexpected parameters: " + result + "=" + value1 + "+" + value2); } } /** - * Expansion of LONG_SUB_ACC + * Expansion of LONG_SUB * * @param s the instruction to expand - * @param result the result/first operand - * @param value the second operand + * @param result the result operand + * @param value1 the first operand + * @param value2 the second operand */ - protected final void LONG_SUB(OPT_Instruction s, OPT_RegisterOperand result, - OPT_Operand value) { - OPT_Register lhsReg = result.register; - OPT_Register lowlhsReg = regpool.getSecondReg(lhsReg); - if (value instanceof OPT_RegisterOperand) { - OPT_Register rhsReg = ((OPT_RegisterOperand) value).register; - OPT_Register lowrhsReg = regpool.getSecondReg(rhsReg); - EMIT(MIR_BinaryAcc.create(IA32_SUB, - new OPT_RegisterOperand(lowlhsReg, - VM_TypeReference.Int), - new OPT_RegisterOperand(lowrhsReg, - VM_TypeReference.Int))); - EMIT(MIR_BinaryAcc.mutate(s, - IA32_SBB, - new OPT_RegisterOperand(lhsReg, - VM_TypeReference.Int), - new OPT_RegisterOperand(rhsReg, - VM_TypeReference.Int))); - } else { - OPT_LongConstantOperand rhs = (OPT_LongConstantOperand) value; - int low = rhs.lower32(); - int high = rhs.upper32(); - if (low == 0) { - EMIT(MIR_BinaryAcc.mutate(s, IA32_SUB, new OPT_RegisterOperand(lhsReg, - VM_TypeReference.Int), IC(high))); + protected final void LONG_SUB(OPT_Instruction s, OPT_Operand result, + OPT_Operand val1, OPT_Operand val2) { + + if (result.similar(val1)) { + // Straight forward case where instruction is already in accumulate form + if (result.isRegister()) { + OPT_Register lhsReg = result.asRegister().register; + OPT_Register lowlhsReg = regpool.getSecondReg(lhsReg); + if (val2.isRegister()) { + OPT_Register rhsReg2 = val2.asRegister().register; + OPT_Register lowrhsReg2 = regpool.getSecondReg(rhsReg2); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_SUB, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(lowrhsReg2, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_BinaryAcc.mutate(s, IA32_SBB, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(rhsReg2, VM_TypeReference.Int)))); + } else if (val2.isLongConstant()) { + OPT_LongConstantOperand rhs2 = val2.asLongConstant(); + int low = rhs2.lower32(); + int high = rhs2.upper32(); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_SUB, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + IC(low)))); + EMIT(CPOS(s, MIR_BinaryAcc.mutate(s, IA32_SBB, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + IC(high)))); + } else { + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", + "unexpected parameters: " + result + "=" + val1 + "-" + val2); + } } else { - EMIT(MIR_BinaryAcc.create(IA32_SUB, new OPT_RegisterOperand(lowlhsReg, - VM_TypeReference.Int), IC(low))); - EMIT(MIR_BinaryAcc.mutate(s, IA32_SBB, new OPT_RegisterOperand(lhsReg, - VM_TypeReference.Int), IC(high))); + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", + "unexpected parameters: " + result + "=" + val1 + "-" + val2); } + } + else if (!result.similar(val2)) { + // Move first operand to result and perform operator on result, if + // possible redundant moves should be remove by register allocator + if (result.isRegister()) { + OPT_Register lhsReg = result.asRegister().register; + OPT_Register lowlhsReg = regpool.getSecondReg(lhsReg); + // Move val1 into result + if (val1.isRegister()) { + OPT_Register rhsReg1 = val1.asRegister().register; + OPT_Register lowrhsReg1 = regpool.getSecondReg(rhsReg1); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(lowrhsReg1, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(rhsReg1, VM_TypeReference.Int)))); + } else if (val1.isLongConstant()) { + OPT_LongConstantOperand rhs1 = val1.asLongConstant(); + int low = rhs1.lower32(); + int high = rhs1.upper32(); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + IC(low)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + IC(high)))); + } else { + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", + "unexpected parameters: " + result + "=" + val1 + "-" + val2); + } + // Perform subtract + if (val2.isRegister()) { + OPT_Register rhsReg2 = val2.asRegister().register; + OPT_Register lowrhsReg2 = regpool.getSecondReg(rhsReg2); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_SUB, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(lowrhsReg2, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_BinaryAcc.mutate(s, IA32_SBB, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + new OPT_RegisterOperand(rhsReg2, VM_TypeReference.Int)))); + } else if (val2.isLongConstant()) { + OPT_LongConstantOperand rhs2 = val2.asLongConstant(); + int low = rhs2.lower32(); + int high = rhs2.upper32(); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_SUB, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + IC(low)))); + EMIT(CPOS(s, MIR_BinaryAcc.mutate(s, IA32_SBB, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + IC(high)))); + } else { + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", + "unexpected parameters: " + result + "=" + val1 + "-" + val2); + } + } else { + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", + "unexpected parameters: " + result + "=" + val1 + "-" + val2); + } } + else { + // Potential to clobber second operand during move to result. Use a + // temporary register to perform the operation and rely on register + // allocator to remove redundant moves + OPT_RegisterOperand temp1 = regpool.makeTempInt(); + OPT_RegisterOperand temp2 = regpool.makeTempInt(); + // Move val1 into temp + if (val1.isRegister()) { + OPT_Register rhsReg1 = val1.asRegister().register; + OPT_Register lowrhsReg1 = regpool.getSecondReg(rhsReg1); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + temp1, + new OPT_RegisterOperand(lowrhsReg1, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + temp2, + new OPT_RegisterOperand(rhsReg1, VM_TypeReference.Int)))); + } else if (val1.isLongConstant()) { + OPT_LongConstantOperand rhs1 = val1.asLongConstant(); + int low = rhs1.lower32(); + int high = rhs1.upper32(); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + temp1, + IC(low)))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + temp2, + IC(high)))); + } else { + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", + "unexpected parameters: " + result + "=" + val1 + "-" + val2); + } + // Perform subtract + if (val2.isRegister()) { + OPT_Register rhsReg2 = val2.asRegister().register; + OPT_Register lowrhsReg2 = regpool.getSecondReg(rhsReg2); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_SUB, + temp1.copyRO(), + new OPT_RegisterOperand(lowrhsReg2, VM_TypeReference.Int)))); + EMIT(CPOS(s, MIR_BinaryAcc.mutate(s, IA32_SBB, + temp2.copyRO(), + new OPT_RegisterOperand(rhsReg2, VM_TypeReference.Int)))); + } else if (val2.isLongConstant()) { + OPT_LongConstantOperand rhs2 = val2.asLongConstant(); + int low = rhs2.lower32(); + int high = rhs2.upper32(); + EMIT(CPOS(s, MIR_BinaryAcc.create(IA32_SUB, + temp1.copyRO(), + IC(low)))); + EMIT(CPOS(s, MIR_BinaryAcc.mutate(s, IA32_SBB, + temp2.copyRO(), + IC(high)))); + } else { + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", + "unexpected parameters: " + result + "=" + val1 + "-" + val2); + } + // Move result back + if (result.isRegister()) { + OPT_Register lhsReg = result.asRegister().register; + OPT_Register lowlhsReg = regpool.getSecondReg(lhsReg); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lowlhsReg, VM_TypeReference.Int), + temp1.copyRO()))); + EMIT(CPOS(s, MIR_Move.create(IA32_MOV, + new OPT_RegisterOperand(lhsReg, VM_TypeReference.Int), + temp2.copyRO()))); + } else { + throw new OPT_OptimizingCompilerException("OPT_BURS_Helpers", + "unexpected parameters: " + result + "=" + val1 + "-" + val2); + } + } } /** - * Expansion of RDTSC (called GET_TIME_BASE for consistency with PPC) + * Expansion of LONG_MUL * * @param s the instruction to expand - * @param result the result/first operand + * @param result the result operand + * @param value1 the first operand + * @param value2 the second operand */ - protected final void GET_TIME_BASE(OPT_Instruction s, - OPT_RegisterOperand result) { - OPT_Register highReg = result.register; - OPT_Register lowReg = regpool.getSecondReg(highReg); - EMIT(MIR_RDTSC.create(IA32_RDTSC, new OPT_RegisterOperand(getEAX(), - VM_TypeReference.Int), new OPT_RegisterOperand(getEDX(), - VM_TypeReference.Int))); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(lowReg, - VM_TypeReference.Int), new OPT_RegisterOperand(getEAX(), - VM_TypeReference.Int))); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(highReg, - VM_TypeReference.Int), new OPT_RegisterOperand(getEDX(), - VM_TypeReference.Int))); - } - - /** - * Expansion of LONG_MUL_ACC - * - * @param s the instruction to expand - * @param result the result/first operand - * @param value the second operand - */ protected final void LONG_MUL(OPT_Instruction s, OPT_RegisterOperand result, - OPT_Operand value) { - // In general, (a,b) * (c,d) = (l(a imul d)+l(b imul c)+u(b mul d), l(b mul - // d)) - OPT_Register lhsReg = result.register; - OPT_Register lowlhsReg = regpool.getSecondReg(lhsReg); - if (value instanceof OPT_RegisterOperand) { - OPT_Register rhsReg = ((OPT_RegisterOperand) value).register; - OPT_Register lowrhsReg = regpool.getSecondReg(rhsReg); - OPT_Register tmp = regpool.getInteger(); - EMIT(MIR_BinaryAcc.create(IA32_IMUL2, - new OPT_RegisterOperand(lhsReg, - VM_TypeReference.Int), - new OPT_RegisterOperand(lowrhsReg, - VM_TypeReference.Int))); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(tmp, - VM_TypeReference.Int), new OPT_RegisterOperand(rhsReg, - VM_TypeReference.Int))); - EMIT(MIR_BinaryAcc.create(IA32_IMUL2, - new OPT_RegisterOperand(tmp, - VM_TypeReference.Int), - new OPT_RegisterOperand(lowlhsReg, - VM_TypeReference.Int))); - EMIT(MIR_BinaryAcc.create(IA32_ADD, new OPT_RegisterOperand(lhsReg, - VM_TypeReference.Int), new OPT_RegisterOperand(tmp, - VM_TypeReference.Int))); - EMIT(MIR_Move.create(IA32_MOV, new OPT_RegisterOperand(getEAX(), - VM_TypeReference.Int), new OPT_RegisterOperand(lowlhsReg, - VM_TypeReference.Int))); - EMIT(MIR_Multiply.create(IA32_MUL, - new OPT_RegisterOperand(getEDX(), - VM_TypeReference.Int), - new OPT_RegisterOperand(getEAX(), - VM_TypeReference.Int), - new OPT_RegisterOperand(lowrhsReg, - ... [truncated message content] |