From: <lor...@us...> - 2012-06-17 20:10:09
|
Revision: 3755 http://dl-learner.svn.sourceforge.net/dl-learner/?rev=3755&view=rev Author: lorenz_b Date: 2012-06-17 20:10:02 +0000 (Sun, 17 Jun 2012) Log Message: ----------- Started faster implementation of template generation process. Modified Paths: -------------- trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/learning/SPARQLTemplateBasedLearner2.java trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/ltag/parser/Parser.java trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/templator/Templator.java Modified: trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/learning/SPARQLTemplateBasedLearner2.java =================================================================== --- trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/learning/SPARQLTemplateBasedLearner2.java 2012-06-16 11:21:55 UTC (rev 3754) +++ trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/learning/SPARQLTemplateBasedLearner2.java 2012-06-17 20:10:02 UTC (rev 3755) @@ -9,6 +9,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -86,7 +87,8 @@ private static final Logger logger = Logger.getLogger(SPARQLTemplateBasedLearner2.class); - private Monitor mon = MonitorFactory.getTimeMonitor("tbsl"); + private Monitor templateMon = MonitorFactory.getTimeMonitor("template"); + private Monitor sparqlMon = MonitorFactory.getTimeMonitor("sparql"); private boolean useRemoteEndpointValidation; private boolean stopIfQueryResultNotEmpty; @@ -129,6 +131,11 @@ private String currentlyExecutedQuery; + private boolean dropZeroScoredQueries = true; + private boolean useManualMappingsIfExistOnly = true; + + private boolean multiThreaded = true; + public SPARQLTemplateBasedLearner2(SparqlEndpoint endpoint, Index resourcesIndex, Index classesIndex, Index propertiesIndex){ this(endpoint, resourcesIndex, classesIndex, propertiesIndex, new StanfordPartOfSpeechTagger()); } @@ -280,16 +287,23 @@ template2Queries = new HashMap<Template, Collection<? extends Query>>(); slot2URI = new HashMap<Slot, List<String>>(); currentlyExecutedQuery = null; + +// templateMon.reset(); +// sparqlMon.reset(); } public void learnSPARQLQueries() throws NoTemplateFoundException{ reset(); //generate SPARQL query templates logger.info("Generating SPARQL query templates..."); - mon.start(); - templates = templateGenerator.buildTemplates(question); - mon.stop(); - logger.info("Done in " + mon.getLastValue() + "ms."); + templateMon.start(); + if(multiThreaded){ + templates = templateGenerator.buildTemplatesMultiThreaded(question); + } else { + templates = templateGenerator.buildTemplates(question); + } + templateMon.stop(); + logger.info("Done in " + templateMon.getLastValue() + "ms."); if(templates.isEmpty()){ throw new NoTemplateFoundException(); } @@ -672,8 +686,16 @@ } } - for(WeightedQuery q : queries){ - q.setScore(q.getScore()/t.getSlots().size()); + for (Iterator<WeightedQuery> iterator = queries.iterator(); iterator.hasNext();) { + WeightedQuery wQ = iterator.next(); + if(dropZeroScoredQueries){ + if(wQ.getScore() == 0){ + iterator.remove(); + } + } else { + wQ.setScore(wQ.getScore()/t.getSlots().size()); + } + } allQueries.addAll(queries); List<Query> qList = new ArrayList<Query>(); @@ -752,7 +774,7 @@ private List<String> getLemmatizedWords(List<String> words){ logger.info("Pruning word list " + words + "..."); - mon.start(); +// mon.start(); List<String> pruned = new ArrayList<String>(); for(String word : words){ //currently only stem single words @@ -766,8 +788,8 @@ } } - mon.stop(); - logger.info("Done in " + mon.getLastValue() + "ms."); +// mon.stop(); +// logger.info("Done in " + mon.getLastValue() + "ms."); logger.info("Pruned list: " + pruned); return pruned; } @@ -806,46 +828,51 @@ private void validate(List<String> queries, SPARQL_QueryType queryType){ logger.info("Testing candidate SPARQL queries on remote endpoint..."); - mon.start(); + sparqlMon.start(); if(queryType == SPARQL_QueryType.SELECT){ for(String query : queries){ - logger.info("Testing query:\n" + query); - com.hp.hpl.jena.query.Query q = QueryFactory.create(query, Syntax.syntaxARQ); - q.setLimit(1); - ResultSet rs = executeSelect(q.toString());//executeSelect(query); - - List<String> results = new ArrayList<String>(); - QuerySolution qs; - String projectionVar; - while(rs.hasNext()){ - qs = rs.next(); - projectionVar = qs.varNames().next(); - if(qs.get(projectionVar).isLiteral()){ - results.add(qs.get(projectionVar).asLiteral().getLexicalForm()); - } else if(qs.get(projectionVar).isURIResource()){ - results.add(qs.get(projectionVar).asResource().getURI()); + List<String> results; + try { + logger.info("Testing query:\n" + query); + com.hp.hpl.jena.query.Query q = QueryFactory.create(query, Syntax.syntaxARQ); + q.setLimit(1); + ResultSet rs = executeSelect(q.toString());//executeSelect(query); + + results = new ArrayList<String>(); + QuerySolution qs; + String projectionVar; + while(rs.hasNext()){ + qs = rs.next(); + projectionVar = qs.varNames().next(); + if(qs.get(projectionVar).isLiteral()){ + results.add(qs.get(projectionVar).asLiteral().getLexicalForm()); + } else if(qs.get(projectionVar).isURIResource()){ + results.add(qs.get(projectionVar).asResource().getURI()); + } + } - - } - if(!results.isEmpty()){ - try{ - int cnt = Integer.parseInt(results.get(0)); - if(cnt > 0){learnedPos = queries.indexOf(query); + if(!results.isEmpty()){ + try{ + int cnt = Integer.parseInt(results.get(0)); + if(cnt > 0){learnedPos = queries.indexOf(query); + learnedSPARQLQueries.put(query, results); + if(stopIfQueryResultNotEmpty){ + return; + } + } + } catch (NumberFormatException e){ learnedSPARQLQueries.put(query, results); + learnedPos = queries.indexOf(query); if(stopIfQueryResultNotEmpty){ return; } } - } catch (NumberFormatException e){ - learnedSPARQLQueries.put(query, results); - learnedPos = queries.indexOf(query); - if(stopIfQueryResultNotEmpty){ - return; - } + logger.info("Result: " + results); } - + } catch (Exception e) { + e.printStackTrace(); } - logger.info("Result: " + results); + } } else if(queryType == SPARQL_QueryType.ASK){ for(String query : queries){ @@ -862,8 +889,8 @@ } } - mon.stop(); - logger.info("Done in " + mon.getLastValue() + "ms."); + sparqlMon.stop(); + logger.info("Done in " + sparqlMon.getLastValue() + "ms."); } private boolean executeAskQuery(String query){ @@ -976,15 +1003,19 @@ rs.add(mappingIndex.getResourcesWithScores(word)); } } - if(slot.getSlotType() == SlotType.RESOURCE){ - rs.add(index.getResourcesWithScores(word, 50)); - } else { - if(slot.getSlotType() == SlotType.CLASS){ - word = PlingStemmer.stem(word); + //use the non manual indexes only if mapping based resultset is not empty and option is set + if(!useManualMappingsIfExistOnly || rs.isEmpty()){ + if(slot.getSlotType() == SlotType.RESOURCE){ + rs.add(index.getResourcesWithScores(word, 50)); + } else { + if(slot.getSlotType() == SlotType.CLASS){ + word = PlingStemmer.stem(word); + } + rs.add(index.getResourcesWithScores(word, 20)); } - rs.add(index.getResourcesWithScores(word, 20)); } + for(IndexResultItem item : rs.getItems()){ double similarity = Similarity.getSimilarity(word, item.getLabel()); // //get the labels of the redirects and compute the highest similarity @@ -1012,6 +1043,10 @@ } + public String getTaggedInput(){ + return templateGenerator.getTaggedInput(); + } + private boolean isDatatypeProperty(String uri){ Boolean isDatatypeProperty = null; if(mappingIndex != null){ Modified: trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/ltag/parser/Parser.java =================================================================== --- trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/ltag/parser/Parser.java 2012-06-16 11:21:55 UTC (rev 3754) +++ trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/ltag/parser/Parser.java 2012-06-17 20:10:02 UTC (rev 3755) @@ -2,6 +2,8 @@ import java.util.ArrayList; import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import org.apache.log4j.Logger; import org.dllearner.algorithm.tbsl.ltag.data.TreeNode; @@ -9,6 +11,9 @@ import org.dllearner.algorithm.tbsl.sem.dudes.reader.ParseException; import org.dllearner.algorithm.tbsl.sem.util.Pair; +import com.jamonapi.Monitor; +import com.jamonapi.MonitorFactory; + public class Parser { private static final Logger logger = Logger.getLogger(Parser.class); @@ -91,7 +96,54 @@ return derivationTrees; } + + public List<DerivationTree> parseMultiThreaded(String taggeduserinput, LTAGLexicon grammar) { + derivationTrees.clear(); + derivedTrees.clear(); + dudes.clear(); + temporaryEntries.clear(); + + if (!VERBOSE) GrammarFilter.VERBOSE = false; + + /* + * create a local copy of the grammar with own treeIDs. This is + * necessary since if an input string contains the same token multiple + * times, a tree for each token is added. Both trees need to have + * different treeIDs for the parser to work correctly. + */ + parseGrammar = GrammarFilter.filter(taggeduserinput,grammar,temporaryEntries,MODE); + + String inputNoTags = ""; + for (String s : taggeduserinput.split(" ")) { + inputNoTags += s.substring(0,s.indexOf("/")) + " "; + } + + this.input = ("# ".concat(inputNoTags.replaceAll("'","").trim())).split(" "); + int n = this.input.length; + + + if (SHOW_GRAMMAR) { + logger.trace(parseGrammar); + } + if (SHOW_LEXICAL_COVERAGE) { + logger.trace("# OF TREES FOUND: " + parseGrammar.size()); + logger.trace("# OF INPUT TOKENS: " + n); + } + + List<Pair<TreeNode, Short>> initTrees = parseGrammar.getInitTrees(); + + internalParseMultiThreaded(initTrees, n); + + if (USE_DPS_AS_INITTREES && derivationTrees.isEmpty()) { + internalParseMultiThreaded(parseGrammar.getDPInitTrees(), n); + } + + if (VERBOSE) logger.trace("Constructed " + derivationTrees.size() + " derivation trees.\n"); + return derivationTrees; + + } + private void internalParse(List<Pair<TreeNode, Short>> initTrees, int n) { TREELOOP: for (int k = 0; k < initTrees.size(); k++) { @@ -211,6 +263,23 @@ } } + + private void internalParseMultiThreaded(List<Pair<TreeNode, Short>> initTrees, int n) { + Monitor parseMon = MonitorFactory.getTimeMonitor("parse"); + ExecutorService threadPool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); + parseMon.start(); + for (int k = 0; k < initTrees.size(); k++) { + Pair<TreeNode, Short> pair = initTrees.get(k); + TreeNode tree = pair.getFirst(); + short tid = pair.getSecond(); + threadPool.execute(new TreeProcessor(tree, tid, n)); + } + threadPool.shutdown(); + while(!threadPool.isTerminated()){ + + } + parseMon.start(); + } private List<List<ParseState>> makeStateSets() { @@ -298,7 +367,21 @@ return derivedTrees; } + + public List<TreeNode> buildDerivedTreesMultiThreaded(LTAGLexicon G) throws ParseException { + ExecutorService threadPool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); + for (DerivationTree dTree : derivationTrees) { + threadPool.execute(new DerivationTreeProcessor(dTree, G)); + } + threadPool.shutdown(); + while(!threadPool.isTerminated()){ + + } + return derivedTrees; + + } + /** * get List of Dudes parallely constructed by Parser.buildDerivedTrees() */ @@ -342,5 +425,169 @@ grammar.clear(temps); } + + class TreeProcessor implements Runnable{ + + private TreeNode tree; + private short tid; + private int n; + + public TreeProcessor(TreeNode tree, short tid, int n) { + this.tree = tree; + this.tid = tid; + this.n = n; + } + + @Override + public void run() { + List<List<ParseState>> stateSets = makeStateSets(); + + ParseState start = new ParseState(tree, tid); + // the inittree is already used + start.getUsedTrees().add(tid); + + stateSets.get(0).add(start); + boolean skip = false; + for (int i = 0; i < n; i++) { + + if (i > 0) { + stateSets.get(i - 1).clear(); + if (USE_LESS_MEMORY) { + System.gc(); + } + } + + List<ParseState> localStateSet = new ArrayList<ParseState>( + stateSets.get(i)); + List<ParseState> localStateSet2 = new ArrayList<ParseState>(); + + stateSets.get(i).clear(); + + while (localStateSet.size() > 0) { + + for (int j = 0; j < localStateSet.size(); j++) { + ParseState state = localStateSet.get(j); + + List<ParseState> newStates; + + OPLOOP: for (Class<?> c : operations) { + + try { + + ParserOperation op = (ParserOperation) c + .newInstance(); + + newStates = (op.go(i, state, input, + parseGrammar)); + + if (!newStates.isEmpty()) { + + for (ParseState newState : newStates) { + if (newState.i.equals(i)) { + localStateSet2.add(newState); + } + + if ((op instanceof Scanner) + || (newState.isEndState() && newState.i == n - 1)) { + stateSets.get(newState.i).add( + newState); + } + } + + op = null; + break OPLOOP; + + } + + } catch (InstantiationException e) { + e.printStackTrace(); + + } catch (IllegalAccessException e) { + e.printStackTrace(); + + } + + } + + } + + localStateSet = null; + localStateSet = new ArrayList<ParseState>(localStateSet2); + localStateSet2 = new ArrayList<ParseState>(); + + } + + localStateSet = null; + localStateSet2 = null; + + /* + * if the parser could not scan the next input token this run / + * initial tree is rejected + */ + if (i < n - 1 && stateSets.get(i + 1).isEmpty()) { + + stateSets.get(i).clear(); + skip = true; + break; + + } + + } + + if(!skip){ + for (ParseState state : stateSets.get(n - 1)) { + + +// if (state.isEndState() && state.t.equals(tree)) { + if (state.isEndState()) { + if (state.t.equals(tree)) { + + derivationTrees.add(createDerivationTree(state, + parseGrammar)); + + } + } + + } + } + + + } + + } + + class DerivationTreeProcessor implements Runnable{ + + private DerivationTree dTree; + private LTAGLexicon lexicon; + + public DerivationTreeProcessor(DerivationTree dTree, LTAGLexicon lexicon) { + this.dTree = dTree; + this.lexicon = lexicon; + } + + @Override + public void run() { + try { + List<Pair<TreeNode, Dude>> pairs = DerivedTree.build(dTree, parseGrammar, lexicon, CONSTRUCT_SEMANTICS); + + for (Pair<TreeNode,Dude> pair : pairs) { + TreeNode x = pair.getFirst(); + Dude dude = pair.getSecond(); + + if (!derivedTrees.contains(x) || !dudes.contains(dude)) { + derivedTrees.add(x); + dudes.add(dude); + } + + } + } catch (ParseException e) { + e.printStackTrace(); + } + + } + + } + } Modified: trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/templator/Templator.java =================================================================== --- trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/templator/Templator.java 2012-06-16 11:21:55 UTC (rev 3754) +++ trunk/components-ext/src/main/java/org/dllearner/algorithm/tbsl/templator/Templator.java 2012-06-17 20:10:02 UTC (rev 3755) @@ -6,11 +6,12 @@ import java.util.Hashtable; import java.util.List; import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import net.didion.jwnl.data.POS; import org.apache.log4j.Logger; - import org.dllearner.algorithm.tbsl.converter.DRS2SPARQL_Converter; import org.dllearner.algorithm.tbsl.converter.DUDE2UDRS_Converter; import org.dllearner.algorithm.tbsl.ltag.parser.LTAGLexicon; @@ -58,6 +59,11 @@ boolean USE_WORDNET = true; boolean VERBOSE = true; + private String taggedInput; + + private Set<Template> templates; + private Set<DRS> drses; + public Templator() { this(new StanfordPartOfSpeechTagger(), new WordNet()); } @@ -141,7 +147,7 @@ tagged = s; s = extractSentence(tagged); } - + taggedInput = tagged; String newtagged; if (USE_NER) { newtagged = pp.condenseNominals(pp.findNEs(tagged,s)); @@ -244,9 +250,6 @@ newwords.addAll(wordnet.getBestSynonyms(wordnetpos,att)); } } - if(newwords.isEmpty()){ - - } if (newwords.isEmpty()) { newwords.add(slot.getWords().get(0)); } @@ -271,11 +274,171 @@ if (clearAgain) { p.clear(g,p.getTemps()); } - System.gc(); +// System.gc(); return templates; } + public Set<Template> buildTemplatesMultiThreaded(String s) { + + boolean clearAgain = true; + + String tagged; + if (UNTAGGED_INPUT) { + s = pp.normalize(s); + tagged = tagger.tag(s); + if (VERBOSE) logger.trace("Tagged input: " + tagged); + } + else { + tagged = s; + s = extractSentence(tagged); + } + taggedInput = tagged; + String newtagged; + if (USE_NER) { + newtagged = pp.condenseNominals(pp.findNEs(tagged,s)); + } + else newtagged = pp.condenseNominals(tagged); + + newtagged = pp.condense(newtagged); + if (VERBOSE) logger.trace("Preprocessed: " + newtagged); + + p.parseMultiThreaded(newtagged,g); + + if (p.getDerivationTrees().isEmpty()) { + p.clear(g,p.getTemps()); + clearAgain = false; + if (VERBOSE) logger.error("[Templator.java] '" + s + "' could not be parsed."); + } + else { + try { + p.buildDerivedTreesMultiThreaded(g); + } catch (ParseException e) { + if (VERBOSE) logger.error("[Templator.java] ParseException at '" + e.getMessage() + "'", e); + } + } + + // build pairs <String,POStag> from tagged + Hashtable<String,String> postable = new Hashtable<String,String>(); + for (String st : newtagged.split(" ")) { + postable.put(st.substring(0,st.indexOf("/")).toLowerCase(),st.substring(st.indexOf("/")+1));; + } + // + + drses = new HashSet<DRS>(); + templates = new HashSet<Template>(); + +// ExecutorService threadPool = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors()); +// for (Dude dude : p.getDudes()) { +// threadPool.execute(new DudeProcessor(dude, postable)); +// } +// threadPool.shutdown(); +// while(!threadPool.isTerminated()){} + + for (Dude dude : p.getDudes()) { + + UDRS udrs = d2u.convert(dude); + if (udrs != null) { + + for (DRS drs : udrs.initResolve()) { + + List<Slot> slots = new ArrayList<Slot>(); + slots.addAll(dude.getSlots()); + d2s.setSlots(slots); + d2s.redundantEqualRenaming(drs); + + if (!containsModuloRenaming(drses,drs)) { +// // DEBUG + if (VERBOSE) { + System.out.println(dude); + System.out.println(drs); + for (Slot sl : slots) { + System.out.println(sl.toString()); + } + } +// // + drses.add(drs); + + try { + Template temp = d2s.convert(drs,slots); + temp = temp.checkandrefine(); + if (temp == null) { + continue; + } + + if (USE_WORDNET) { // find WordNet synonyms + List<String> newwords; + String word; + String pos; + for (Slot slot : temp.getSlots()) { + if (!slot.getWords().isEmpty()) { + + word = slot.getWords().get(0); + pos = postable.get(word.toLowerCase().replace(" ","_")); + + POS wordnetpos = null; + if (pos != null) { + if (equalsOneOf(pos,noun)) { + wordnetpos = POS.NOUN; + } + else if (equalsOneOf(pos,adjective)) { + wordnetpos = POS.ADJECTIVE; + } + else if (equalsOneOf(pos,verb)) { + wordnetpos = POS.VERB; + } + } + + List<String> strings = new ArrayList<String>(); + if (wordnetpos != null && wordnetpos.equals(POS.ADJECTIVE)) { + strings = wordnet.getAttributes(word); + } + + newwords = new ArrayList<String>(); + newwords.addAll(slot.getWords()); + newwords.addAll(strings); + + if (wordnetpos != null && !slot.getSlotType().equals(SlotType.RESOURCE)) { + newwords.addAll(wordnet.getBestSynonyms(wordnetpos,getLemmatizedWord(word))); + for (String att : getLemmatizedWords(strings)) { + newwords.addAll(wordnet.getBestSynonyms(wordnetpos,att)); + } + } + if (newwords.isEmpty()) { + newwords.add(slot.getWords().get(0)); + } + List<String> newwordslist = new ArrayList<String>(); + newwordslist.addAll(newwords); + slot.setWords(newwordslist); + } + } + } + // + + templates.add(temp); + } catch (java.lang.ClassCastException e) { + continue; + } + if (ONE_SCOPE_ONLY) { break; } + } + } + + } + } + + + if (clearAgain) { + p.clear(g,p.getTemps()); + } +// System.gc(); + + return templates; + } + + public String getTaggedInput() { + return taggedInput; + } + private List<String> getLemmatizedWords(List<String> words){ List<String> stemmed = new ArrayList<String>(); for(String word : words){ @@ -330,5 +493,107 @@ return taggedSentence; } + + class DudeProcessor implements Runnable{ + + private Dude dude; + private Hashtable<String,String> postable; + + public DudeProcessor(Dude dude, Hashtable<String,String> postable) { + this.dude = dude; + this.postable = postable; + } + @Override + public void run() { + UDRS udrs = d2u.convert(dude); + if (udrs != null) { + + for (DRS drs : udrs.initResolve()) { + + List<Slot> slots = new ArrayList<Slot>(); + slots.addAll(dude.getSlots()); + d2s.setSlots(slots); + d2s.redundantEqualRenaming(drs); + + if (!containsModuloRenaming(drses,drs)) { +// // DEBUG + if (VERBOSE) { + System.out.println(dude); + System.out.println(drs); + for (Slot sl : slots) { + System.out.println(sl.toString()); + } + } +// // + drses.add(drs); + + try { + Template temp = d2s.convert(drs,slots); + temp = temp.checkandrefine(); + if (temp == null) { + continue; + } + + if (USE_WORDNET) { // find WordNet synonyms + List<String> newwords; + String word; + String pos; + for (Slot slot : temp.getSlots()) { + if (!slot.getWords().isEmpty()) { + + word = slot.getWords().get(0); + pos = postable.get(word.toLowerCase().replace(" ","_")); + + POS wordnetpos = null; + if (pos != null) { + if (equalsOneOf(pos,noun)) { + wordnetpos = POS.NOUN; + } + else if (equalsOneOf(pos,adjective)) { + wordnetpos = POS.ADJECTIVE; + } + else if (equalsOneOf(pos,verb)) { + wordnetpos = POS.VERB; + } + } + + List<String> strings = new ArrayList<String>(); + if (wordnetpos != null && wordnetpos.equals(POS.ADJECTIVE)) { + strings = wordnet.getAttributes(word); + } + + newwords = new ArrayList<String>(); + newwords.addAll(slot.getWords()); + newwords.addAll(strings); + + if (wordnetpos != null && !slot.getSlotType().equals(SlotType.RESOURCE)) { + newwords.addAll(wordnet.getBestSynonyms(wordnetpos,getLemmatizedWord(word))); + for (String att : getLemmatizedWords(strings)) { + newwords.addAll(wordnet.getBestSynonyms(wordnetpos,att)); + } + } + if (newwords.isEmpty()) { + newwords.add(slot.getWords().get(0)); + } + List<String> newwordslist = new ArrayList<String>(); + newwordslist.addAll(newwords); + slot.setWords(newwordslist); + } + } + } + // + + templates.add(temp); + } catch (java.lang.ClassCastException e) { + continue; + } + if (ONE_SCOPE_ONLY) { break; } + } + } + } + } + + } + } This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |