From: <jen...@us...> - 2008-11-15 08:43:57
|
Revision: 1516 http://dl-learner.svn.sourceforge.net/dl-learner/?rev=1516&view=rev Author: jenslehmann Date: 2008-11-15 08:43:53 +0000 (Sat, 15 Nov 2008) Log Message: ----------- added two more parameters to learning algorithm Modified Paths: -------------- trunk/examples/carcinogenesis/train.conf trunk/src/dl-learner/org/dllearner/algorithms/refexamples/ExampleBasedROLComponent.java trunk/src/dl-learner/org/dllearner/algorithms/refexamples/ExampleBasedROLearner.java trunk/src/dl-learner/org/dllearner/algorithms/refexamples/MultiHeuristic.java trunk/src/dl-learner/org/dllearner/core/configurators/ExampleBasedROLComponentConfigurator.java Modified: trunk/examples/carcinogenesis/train.conf =================================================================== --- trunk/examples/carcinogenesis/train.conf 2008-11-14 16:55:52 UTC (rev 1515) +++ trunk/examples/carcinogenesis/train.conf 2008-11-15 08:43:53 UTC (rev 1516) @@ -1,12 +1,15 @@ import("carcinogenesis.owl"); +// store some settings to make the experiment reproducable in the future reasoner = fastInstanceChecker; algorithm = refexamples; -// refexamples.noisePercentage = 28; refexamples.noisePercentage = 32; refexamples.startClass = "http://dl-learner.org/carcinogenesis#Compound"; refexamples.writeSearchTree = false; refexamples.searchTreeFile = "log/carcinogenesis/searchTree.log"; +refexamples.negativeWeight = 0.8; +refexamples.startNodeBonus = 2.0; +refexamples.forceRefinementLengthIncrease = false; +"http://dl-learner.org/carcinogenesis#d1" +"http://dl-learner.org/carcinogenesis#d10" Modified: trunk/src/dl-learner/org/dllearner/algorithms/refexamples/ExampleBasedROLComponent.java =================================================================== --- trunk/src/dl-learner/org/dllearner/algorithms/refexamples/ExampleBasedROLComponent.java 2008-11-14 16:55:52 UTC (rev 1515) +++ trunk/src/dl-learner/org/dllearner/algorithms/refexamples/ExampleBasedROLComponent.java 2008-11-15 08:43:53 UTC (rev 1516) @@ -213,6 +213,8 @@ options.add(noisePercentage); options.add(new StringConfigOption("startClass", "the named class which should be used to start the algorithm (GUI: needs a widget for selecting a class)")); options.add(new BooleanConfigOption("forceRefinementLengthIncrease", "specifies whether nodes should be expanded until only longer refinements are reached")); + options.add(new DoubleConfigOption("negativeWeight", "Used to penalise errors on negative examples different from those of positive examples (lower = less importance for negatives).",1.0)); + options.add(new DoubleConfigOption("startNodeBonus", "You can use this to give a heuristic bonus on the start node (= initially broader exploration of search space).",0.0)); return options; } @@ -328,9 +330,9 @@ } else { if(learningProblem instanceof PosOnlyLP) { // throw new RuntimeException("does not work with positive examples only yet"); - algHeuristic = new MultiHeuristic(((PosOnlyLP)learningProblem).getPositiveExamples().size(),0); + algHeuristic = new MultiHeuristic(((PosOnlyLP)learningProblem).getPositiveExamples().size(),0, configurator); } else { - algHeuristic = new MultiHeuristic(((PosNegLP)learningProblem).getPositiveExamples().size(),((PosNegLP)learningProblem).getNegativeExamples().size()); + algHeuristic = new MultiHeuristic(((PosNegLP)learningProblem).getPositiveExamples().size(),((PosNegLP)learningProblem).getNegativeExamples().size(), configurator); } } Modified: trunk/src/dl-learner/org/dllearner/algorithms/refexamples/ExampleBasedROLearner.java =================================================================== --- trunk/src/dl-learner/org/dllearner/algorithms/refexamples/ExampleBasedROLearner.java 2008-11-14 16:55:52 UTC (rev 1515) +++ trunk/src/dl-learner/org/dllearner/algorithms/refexamples/ExampleBasedROLearner.java 2008-11-15 08:43:53 UTC (rev 1516) @@ -405,9 +405,9 @@ long lastPrintTime = 0; long lastTreeTraversalTime = System.nanoTime(); long lastReductionTime = System.nanoTime(); - // try a traversal after 100 seconds + // try a traversal after x seconds long traversalInterval = 300l * 1000000000l; - long reductionInterval = 500l * 1000000000l; + long reductionInterval = 300l * 1000000000l; long currentTime; while (!solutionFound && !stop) { Modified: trunk/src/dl-learner/org/dllearner/algorithms/refexamples/MultiHeuristic.java =================================================================== --- trunk/src/dl-learner/org/dllearner/algorithms/refexamples/MultiHeuristic.java 2008-11-14 16:55:52 UTC (rev 1515) +++ trunk/src/dl-learner/org/dllearner/algorithms/refexamples/MultiHeuristic.java 2008-11-15 08:43:53 UTC (rev 1516) @@ -21,6 +21,7 @@ import java.util.List; +import org.dllearner.core.configurators.ExampleBasedROLComponentConfigurator; import org.dllearner.core.owl.DatatypeSomeRestriction; import org.dllearner.core.owl.Description; import org.dllearner.core.owl.Thing; @@ -70,8 +71,8 @@ private ConceptComparator conceptComparator = new ConceptComparator(); // heuristic parameters - private double expansionPenaltyFactor; - private double gainBonusFactor; + private double expansionPenaltyFactor = 0.02; + private double gainBonusFactor = 0.5; private double nodeChildPenalty = 0.0001; // (use higher values than 0.0001 for simple learning problems); private double startNodeBonus = 0.1; //was 2.0 // penalise errors on positive examples harder than on negative examples @@ -83,16 +84,28 @@ private int nrOfExamples; public MultiHeuristic(int nrOfPositiveExamples, int nrOfNegativeExamples) { - this(nrOfPositiveExamples, nrOfNegativeExamples, 0.02, 0.5); + this.nrOfNegativeExamples = nrOfNegativeExamples; + nrOfExamples = nrOfPositiveExamples + nrOfNegativeExamples; +// this(nrOfPositiveExamples, nrOfNegativeExamples, 0.02, 0.5); } - public MultiHeuristic(int nrOfPositiveExamples, int nrOfNegativeExamples, double expansionPenaltyFactor, double gainBonusFactor) { + public MultiHeuristic(int nrOfPositiveExamples, int nrOfNegativeExamples, ExampleBasedROLComponentConfigurator configurator) { this.nrOfNegativeExamples = nrOfNegativeExamples; nrOfExamples = nrOfPositiveExamples + nrOfNegativeExamples; - this.expansionPenaltyFactor = expansionPenaltyFactor; - this.gainBonusFactor = gainBonusFactor; + negativeWeight = configurator.getNegativeWeight(); + startNodeBonus = configurator.getStartNodeBonus(); + System.out.println(negativeWeight); + System.out.println(startNodeBonus); } +// public MultiHeuristic(int nrOfPositiveExamples, int nrOfNegativeExamples, double expansionPenaltyFactor, double gainBonusFactor) { +// this.nrOfNegativeExamples = nrOfNegativeExamples; +// nrOfExamples = nrOfPositiveExamples + nrOfNegativeExamples; +// this.expansionPenaltyFactor = expansionPenaltyFactor; +// this.gainBonusFactor = gainBonusFactor; +// } + + /* (non-Javadoc) * @see java.util.Comparator#compare(java.lang.Object, java.lang.Object) */ @@ -141,7 +154,7 @@ // do not count TOP symbols (in particular in ALL r.TOP and EXISTS r.TOP) // as they provide no extra information if(description instanceof Thing) - bonus = 2; + bonus = 1; //2; // if(description instanceof BooleanValueRestriction) // bonus = -1; @@ -149,7 +162,7 @@ // some bonus for doubles because they are already penalised by length 3 if(description instanceof DatatypeSomeRestriction) { // System.out.println(description); - bonus = 2; + bonus = 3; //2; } List<Description> children = description.getChildren(); Modified: trunk/src/dl-learner/org/dllearner/core/configurators/ExampleBasedROLComponentConfigurator.java =================================================================== --- trunk/src/dl-learner/org/dllearner/core/configurators/ExampleBasedROLComponentConfigurator.java 2008-11-14 16:55:52 UTC (rev 1515) +++ trunk/src/dl-learner/org/dllearner/core/configurators/ExampleBasedROLComponentConfigurator.java 2008-11-15 08:43:53 UTC (rev 1516) @@ -357,6 +357,24 @@ public boolean getForceRefinementLengthIncrease() { return (Boolean) ComponentManager.getInstance().getConfigOptionValue(exampleBasedROLComponent, "forceRefinementLengthIncrease") ; } +/** +* negativeWeight Used to penalise errors on negative examples different from those of positive examples (lower = less importance for negatives).. +* mandatory: false| reinit necessary: true +* default value: 1.0 +* @return double +**/ +public double getNegativeWeight() { +return (Double) ComponentManager.getInstance().getConfigOptionValue(exampleBasedROLComponent, "negativeWeight") ; +} +/** +* startNodeBonus You can use this to give a heuristic bonus on the start node (= initially broader exploration of search space).. +* mandatory: false| reinit necessary: true +* default value: 0.0 +* @return double +**/ +public double getStartNodeBonus() { +return (Double) ComponentManager.getInstance().getConfigOptionValue(exampleBasedROLComponent, "startNodeBonus") ; +} /** * @param writeSearchTree specifies whether to write a search tree. @@ -655,6 +673,24 @@ ComponentManager.getInstance().applyConfigEntry(exampleBasedROLComponent, "forceRefinementLengthIncrease", forceRefinementLengthIncrease); reinitNecessary = true; } +/** +* @param negativeWeight Used to penalise errors on negative examples different from those of positive examples (lower = less importance for negatives).. +* mandatory: false| reinit necessary: true +* default value: 1.0 +**/ +public void setNegativeWeight(double negativeWeight) { +ComponentManager.getInstance().applyConfigEntry(exampleBasedROLComponent, "negativeWeight", negativeWeight); +reinitNecessary = true; +} +/** +* @param startNodeBonus You can use this to give a heuristic bonus on the start node (= initially broader exploration of search space).. +* mandatory: false| reinit necessary: true +* default value: 0.0 +**/ +public void setStartNodeBonus(double startNodeBonus) { +ComponentManager.getInstance().applyConfigEntry(exampleBasedROLComponent, "startNodeBonus", startNodeBonus); +reinitNecessary = true; +} /** * true, if this component needs reinitializsation. This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |