Commits

Slavko Zitnik committed f97bb45

Coreference pairwise learner

Comments (0)

Files changed (16)

src/main/java/si/zitnik/research/iobie/core/collective/CollectiveClassifier.scala

-package si.zitnik.research.iobie.collective
-
-import si.zitnik.research.iobie.domain.{Example, Examples}
-import com.typesafe.scalalogging.slf4j.Logging
-import si.zitnik.research.iobie.domain.IOBIEConversions._
-import si.zitnik.research.iobie.domain.cluster.Cluster
-import si.zitnik.research.iobie.coreference.util.MentionExamplesBuilder
-import si.zitnik.research.iobie.algorithms.crf.{ExampleLabel, Label, Classifier}
-import collection.mutable._
-import si.zitnik.research.iobie.coreference.classifier.impl.CorefMultipleClassifier
-
-/**
- * Created by IntelliJ IDEA.
- * User: slavkoz
- * Date: 4/23/12
- * Time: 3:59 PM
- * To change this template use File | Settings | File Templates.
- */
-
-//TODO: check if better to classify more times, not just 3-times
-class CollectiveClassifier(NEC: Classifier,
-                           RELC: Classifier,
-                           COREFC: CorefMultipleClassifier,
-                           neTagsToGenerateConstituentsFrom: scala.collection.immutable.Set[String],
-                           documentIdentifier: ExampleLabel.Value) extends Logging {
-
-  def getMentions(examples: Examples, readMentionsFromExamples: Boolean) = {
-    val mentionsBuilder = new MentionExamplesBuilder(examples, neTagsToGenerateConstituentsFrom, documentIdentifier)
-    if (readMentionsFromExamples) {
-      mentionsBuilder.buildFromTagged()
-    } else {
-      mentionsBuilder.detectAndBuild()
-    }
-  }
-
-  def classify(
-           examples: Examples,
-           maxInferenceIter: Int = 3,
-           readMentionsFromExamples: Boolean = false): (ArrayBuffer[ArrayBuffer[String]], ArrayBuffer[ArrayBuffer[String]], ArrayBuffer[HashSet[Cluster]]) = {
-    var mentionExamples = getMentions(examples, readMentionsFromExamples)
-
-    var prevNeLabels: ArrayBuffer[ArrayBuffer[String]] = null
-    var prevRelLabels: ArrayBuffer[ArrayBuffer[String]] = null
-    var prevCorefClusters: ArrayBuffer[HashSet[Cluster]] = null
-
-    var converged = false
-    var iter = 1
-    while (iter <= maxInferenceIter && !converged) {
-      logger.info("Inference Iteration %d".format(iter))
-
-      val curNeLabels = NEC.classify(examples)
-      val curRelLabels = RELC.classify(examples)
-      val curCorefClusters = COREFC.classifyClusters(mentionExamples)
-
-      converged = CollectiveUtil.hasConverged(curNeLabels, prevNeLabels, curRelLabels, prevRelLabels, curCorefClusters, prevCorefClusters)
-      CollectiveUtil.addIntermediateLabelings(examples, iter, curNeLabels, curRelLabels)
-      prevNeLabels = curNeLabels
-      prevRelLabels = curRelLabels
-      prevCorefClusters = curCorefClusters
-      iter += 1
-    }
-    logger.info("Inference finished after %d iterations.".format(iter - 1))
-
-    (prevNeLabels, prevRelLabels, prevCorefClusters)
-  }
-}

src/main/java/si/zitnik/research/iobie/core/collective/CollectiveLearner.scala

-package si.zitnik.research.iobie.collective
-
-import si.zitnik.research.iobie.domain.Examples
-import scala.collection.JavaConversions._
-import java.util.ArrayList
-import com.typesafe.scalalogging.slf4j.Logging
-import si.zitnik.research.iobie.algorithms.crf.feature.{UnigramConsecutiveFeatureFunction, UnigramPreviousFeatureFunction, UnigramFeatureFunction}
-import si.zitnik.research.iobie.algorithms.crf._
-import linearchain.{LCCRFLearner, LCCRFClassifier}
-import si.zitnik.research.iobie.coreference.learner.CorefMultipleLearner
-import si.zitnik.research.iobie.coreference.util.MentionExamplesBuilder
-import si.zitnik.research.iobie.thirdparty.crfsuite.api.CRFSuiteLCCRFLearner
-import si.zitnik.research.iobie.coreference.classifier.impl.CorefMultipleClassifier
-import si.zitnik.research.iobie.algorithms.crf.stat.Statistics
-import si.zitnik.research.iobie.coreference.clustering.impl.TaggedClusterer
-import si.zitnik.research.iobie.statistics.cluster.{BCubedClusterStatistics, MUCClusterStatistics, ClusterStatistics}
-
-/**
- * Created by IntelliJ IDEA.
- * User: slavkoz
- * Date: 4/23/12
- * Time: 3:59 PM
- * To change this template use File | Settings | File Templates.
- */
-
-class CollectiveLearner(
-                        featureFunctions: Map[Label.Value, ArrayList[FeatureFunction]],
-                        examples: Examples,
-                        neTagsToGenerateConstituentsFrom: Set[String],
-                        documentIdentifier: ExampleLabel.Value = ExampleLabel.DOC_ID,
-                        numOfEachTaskIters: Int = 50,
-                        numOfCollectiveIters: Int = 5, // => 50*3*5 =  750 of all iterations
-                        maxInferenceIter: Int = 3) extends Logging {
-
-  if (!featureFunctions.keySet.containsAll(Set[Label.Value](Label.NE, Label.REL, Label.COREF)) || featureFunctions.keySet.size != 3) {
-    logger.warn("Feature functions not correctly defined!")
-  }
-
-
-  def train(testExamples: Examples = examples): CollectiveClassifier = {
-    var NEClassifier: Classifier = null
-    var RELClassifier: Classifier = null
-    var COREFClassifier: CorefMultipleClassifier = null
-    val mentionExamples = new MentionExamplesBuilder(examples, neTagsToGenerateConstituentsFrom, documentIdentifier).buildFromTagged()
-    val mentionExamplesTest = new MentionExamplesBuilder(testExamples, neTagsToGenerateConstituentsFrom, documentIdentifier).buildFromTagged()
-
-    for (i <- 1 to numOfCollectiveIters) {
-      println("Iteration "+i)
-      NEClassifier = new CRFSuiteLCCRFLearner(examples, Label.NE, featureFunctions(Label.NE), "temp/NECollective.obj").
-        train(numOfEachTaskIters) //trainAndTest(showScoringEvery, numOfEachTaskIters, examples)
-      println("NE test")
-      new Statistics(NEClassifier, testExamples).printAllStat(Label.NE)
-
-      RELClassifier = new CRFSuiteLCCRFLearner(examples, Label.REL, featureFunctions(Label.REL), "temp/RECollective.obj").
-        train(numOfEachTaskIters) //trainAndTest(showScoringEvery, numOfEachTaskIters, examples)
-      println("REL test")
-      new Statistics(RELClassifier, testExamples).printAllStat(Label.REL)
-
-      COREFClassifier = new CorefMultipleLearner(mentionExamples, featureFunctions(Label.COREF), modelSaveFilename = "temp/COREFCollective").
-        train(numOfEachTaskIters) //trainAndTest(showScoringEvery, numOfEachTaskIters, mentionExamples)
-      //TODO implement cluster statistics
-      val allRealClusters = TaggedClusterer.doClustering(mentionExamplesTest, ommitSingles = false)
-      val allTaggedClusters = COREFClassifier.classifyClusters(mentionExamplesTest)
-      val F = new ClusterStatistics().stat(allRealClusters, allTaggedClusters)
-      println("Results Pairwise: " + F)
-      val FMUC = MUCClusterStatistics.scoreExamples(allRealClusters, allTaggedClusters)
-      println("Results MUC:" + FMUC)
-      val BCubed = BCubedClusterStatistics.scoreExamples(allRealClusters, allTaggedClusters)
-      println("Results BCubed: " + BCubed)
-
-      //TODO do some easy testing
-      //test as a whole
-
-      CollectiveUtil.addIntermediateLabelings(examples, i, NEClassifier, RELClassifier)
-
-      i match {
-        case 1 => addIterativeFeatureFunctions(FeatureFunctionLevel.LEVEL_1)
-        case 2 => addIterativeFeatureFunctions(FeatureFunctionLevel.LEVEL_2)
-        case _ =>
-      }
-    }
-
-    new CollectiveClassifier(NEClassifier, RELClassifier, COREFClassifier, neTagsToGenerateConstituentsFrom, documentIdentifier)
-  }
-
-  private def createIterativeFeatureFunctions(labelType: Label.Value, iterativeLabelType: Label.Value) {
-    featureFunctions(labelType).add(new UnigramFeatureFunction(iterativeLabelType, "U" + iterativeLabelType))
-    featureFunctions(labelType).add(new UnigramPreviousFeatureFunction(iterativeLabelType, "UP" + iterativeLabelType))
-    featureFunctions(labelType).add(new UnigramConsecutiveFeatureFunction(iterativeLabelType, "UC" + iterativeLabelType))
-
-    featureFunctions(labelType).add(new UnigramFeatureFunction(iterativeLabelType, "B" + iterativeLabelType))
-    featureFunctions(labelType).add(new UnigramPreviousFeatureFunction(iterativeLabelType, "BP" + iterativeLabelType))
-    featureFunctions(labelType).add(new UnigramConsecutiveFeatureFunction(iterativeLabelType, "BC" + iterativeLabelType))
-  }
-
-  /**
-   * Feature functions are added to tasks of NE, REL.
-   */
-  private def addIterativeFeatureFunctions(functionLevel: FeatureFunctionLevel.Value) {
-    functionLevel match {
-      case FeatureFunctionLevel.LEVEL_1 =>
-        createIterativeFeatureFunctions(Label.NE, Label.L1_NE)
-        createIterativeFeatureFunctions(Label.REL, Label.L1_REL)
-      case FeatureFunctionLevel.LEVEL_2 =>
-        createIterativeFeatureFunctions(Label.NE, Label.L2_NE)
-        createIterativeFeatureFunctions(Label.REL, Label.L2_REL)
-      case _ => logger.warn("Function level not supported.")
-    }
-  }
-
-
-}

src/main/java/si/zitnik/research/iobie/core/collective/CollectiveUtil.scala

-package si.zitnik.research.iobie.collective
-
-import si.zitnik.research.iobie.algorithms.crf.{Label, Classifier}
-import com.typesafe.scalalogging.slf4j.Logging
-import si.zitnik.research.iobie.domain.Examples
-import si.zitnik.research.iobie.domain.cluster.Cluster
-import scala.collection.JavaConversions._
-import si.zitnik.research.iobie.domain.IOBIEConversions._
-import collection.mutable._
-
-/**
- * Created with IntelliJ IDEA.
- * User: slavkoz
- * Date: 8/7/12
- * Time: 4:29 PM
- * To change this template use File | Settings | File Templates.
- */
-
-object CollectiveUtil extends Logging {
-
-  def addIntermediateLabelings(examples: Examples, i: Int, NEC: Classifier, RELC: Classifier) {
-    logger.info("Adding intermediate labelings.")
-    if (i > 1) {
-      //transfer L1 labelings to L2 labelings
-      examples.transferLabeling(Label.L1_NE, Label.L2_NE)
-      examples.transferLabeling(Label.L1_REL, Label.L2_REL)
-    }
-
-    //add L1 labelings
-    examples.setLabeling(Label.L1_NE, NEC)
-    examples.setLabeling(Label.L1_REL, RELC)
-    logger.info("Intermediate labelings added.")
-  }
-
-  def addIntermediateLabelings(examples: Examples, i: Int, neLabels: ArrayBuffer[ArrayBuffer[String]], relLabels: ArrayBuffer[ArrayBuffer[String]]) {
-    logger.info("Adding intermediate labelings.")
-    if (i > 1) {
-      //transfer L1 labelings to L2 labelings
-      examples.transferLabeling(Label.L1_NE, Label.L2_NE)
-      examples.transferLabeling(Label.L1_REL, Label.L2_REL)
-    }
-
-    //add L1 labelings
-    examples.setLabeling(Label.L1_NE, neLabels)
-    examples.setLabeling(Label.L1_REL, relLabels)
-    logger.info("Intermediate labelings added.")
-  }
-
-  //CONVERGENCE
-  def labelingConverged(currentlabeling: ArrayBuffer[ArrayBuffer[String]], previousLabeling: ArrayBuffer[ArrayBuffer[String]]): Boolean = {
-    for ((p, c) <- previousLabeling.flatten.zip(currentlabeling.flatten)) {
-      if (!p.equals(c)) {
-        return false
-      }
-    }
-    return true
-  }
-
-  def clustersConverged(curCorefClusters: ArrayBuffer[HashSet[Cluster]], prevCorefClusters: ArrayBuffer[HashSet[Cluster]]): Boolean = {
-    //TODO check clusters
-    false
-  }
-
-  def hasConverged(curNeLabels: ArrayBuffer[ArrayBuffer[String]], prevNeLabels: ArrayBuffer[ArrayBuffer[String]], curRelLabels: ArrayBuffer[ArrayBuffer[String]], prevRelLabels: ArrayBuffer[ArrayBuffer[String]], curCorefClusters: ArrayBuffer[HashSet[Cluster]], prevCorefClusters: ArrayBuffer[HashSet[Cluster]]) = {
-    prevCorefClusters != null && prevNeLabels != null && prevRelLabels != null &&
-    labelingConverged(curNeLabels, prevNeLabels) &&
-    labelingConverged(curRelLabels, prevRelLabels) &&
-    clustersConverged(curCorefClusters, prevCorefClusters)
-  }
-
-}

src/main/java/si/zitnik/research/iobie/core/collective/FeatureFunctionLevel.scala

-package si.zitnik.research.iobie.collective
-
-/**
- * Created with IntelliJ IDEA.
- * User: slavkoz
- * Date: 6/15/12
- * Time: 12:05 PM
- * To change this template use File | Settings | File Templates.
- */
-
-object FeatureFunctionLevel extends Enumeration {
-  val LEVEL_1 = Value("L1")
-  val LEVEL_2 = Value("L2")
-}

src/main/java/si/zitnik/research/iobie/core/collective/classifier/CollectiveClassifier.scala

+package si.zitnik.research.iobie.core.collective.classifier
+
+import si.zitnik.research.iobie.domain.{Example, Examples}
+import com.typesafe.scalalogging.slf4j.Logging
+import si.zitnik.research.iobie.domain.IOBIEConversions._
+import si.zitnik.research.iobie.domain.cluster.Cluster
+import si.zitnik.research.iobie.coreference.util.MentionExamplesBuilder
+import si.zitnik.research.iobie.algorithms.crf.{ExampleLabel, Label, Classifier}
+import collection.mutable._
+import si.zitnik.research.iobie.coreference.classifier.impl.CorefMultipleClassifier
+import si.zitnik.research.iobie.core.collective.util.CollectiveUtil
+
+/**
+ * Created by IntelliJ IDEA.
+ * User: slavkoz
+ * Date: 4/23/12
+ * Time: 3:59 PM
+ * To change this template use File | Settings | File Templates.
+ */
+
+//TODO: check if better to classify more times, not just 3-times
+class CollectiveClassifier(NEC: Classifier,
+                           RELC: Classifier,
+                           COREFC: CorefMultipleClassifier,
+                           neTagsToGenerateConstituentsFrom: scala.collection.immutable.Set[String],
+                           documentIdentifier: ExampleLabel.Value) extends Logging {
+
+  //TODO check if numbers of classifiers is the same
+
+  def getMentions(examples: Examples, readMentionsFromExamples: Boolean) = {
+    val mentionsBuilder = new MentionExamplesBuilder(examples, neTagsToGenerateConstituentsFrom, documentIdentifier)
+    if (readMentionsFromExamples) {
+      mentionsBuilder.buildFromTagged()
+    } else {
+      mentionsBuilder.detectAndBuild()
+    }
+  }
+
+  def classify(
+           examples: Examples,
+           maxInferenceIter: Int = 3,
+           readMentionsFromExamples: Boolean = false): (ArrayBuffer[ArrayBuffer[String]], ArrayBuffer[ArrayBuffer[String]], ArrayBuffer[HashSet[Cluster]]) = {
+    var mentionExamples = getMentions(examples, readMentionsFromExamples)
+
+    var prevNeLabels: ArrayBuffer[ArrayBuffer[String]] = null
+    var prevRelLabels: ArrayBuffer[ArrayBuffer[String]] = null
+    var prevCorefClusters: ArrayBuffer[HashSet[Cluster]] = null
+
+    var converged = false
+    var iter = 1
+    while (iter <= maxInferenceIter && !converged) {
+      logger.info("Inference Iteration %d".format(iter))
+
+      val curNeLabels = NEC.classify(examples)
+      val curRelLabels = RELC.classify(examples)
+      val curCorefClusters = COREFC.classifyClusters(mentionExamples)
+
+      converged = CollectiveUtil.hasConverged(curNeLabels, prevNeLabels, curRelLabels, prevRelLabels, curCorefClusters, prevCorefClusters)
+      CollectiveUtil.addIntermediateLabelings(examples, iter, curNeLabels, curRelLabels)
+      prevNeLabels = curNeLabels
+      prevRelLabels = curRelLabels
+      prevCorefClusters = curCorefClusters
+      iter += 1
+    }
+    logger.info("Inference finished after %d iterations.".format(iter - 1))
+
+    (prevNeLabels, prevRelLabels, prevCorefClusters)
+  }
+}

src/main/java/si/zitnik/research/iobie/core/collective/learner/CollectiveLearner.scala

+package si.zitnik.research.iobie.core.collective.learner
+
+import si.zitnik.research.iobie.domain.Examples
+import scala.collection.JavaConversions._
+import java.util.ArrayList
+import com.typesafe.scalalogging.slf4j.Logging
+import si.zitnik.research.iobie.algorithms.crf.feature.{UnigramConsecutiveFeatureFunction, UnigramPreviousFeatureFunction, UnigramFeatureFunction}
+import si.zitnik.research.iobie.algorithms.crf._
+import linearchain.{LCCRFLearner, LCCRFClassifier}
+import si.zitnik.research.iobie.coreference.learner.CorefMultipleLearner
+import si.zitnik.research.iobie.coreference.util.MentionExamplesBuilder
+import si.zitnik.research.iobie.thirdparty.crfsuite.api.CRFSuiteLCCRFLearner
+import si.zitnik.research.iobie.coreference.classifier.impl.CorefMultipleClassifier
+import si.zitnik.research.iobie.algorithms.crf.stat.Statistics
+import si.zitnik.research.iobie.coreference.clustering.impl.TaggedClusterer
+import si.zitnik.research.iobie.statistics.cluster.{BCubedClusterStatistics, MUCClusterStatistics, ClusterStatistics}
+import si.zitnik.research.iobie.core.collective.classifier.CollectiveClassifier
+import si.zitnik.research.iobie.core.collective.util.{FeatureFunctionLevel, CollectiveUtil}
+import collection.mutable.ArrayBuffer
+import si.zitnik.research.iobie.core.ner.mention.classifier.impl.NERMentionClassifier
+import si.zitnik.research.iobie.core.relationship.classifier.impl.RelationshipMultipleClassifier
+
+/**
+ * Created by IntelliJ IDEA.
+ * User: slavkoz
+ * Date: 4/23/12
+ * Time: 3:59 PM
+ * To change this template use File | Settings | File Templates.
+ */
+
+class CollectiveLearner(
+                        featureFunctions: Map[Label.Value, ArrayList[FeatureFunction]],
+                        examples: Examples,
+                        documentIdentifier: ExampleLabel.Value = ExampleLabel.DOC_ID,
+                        numOfEachTaskIters: Int = 50,
+                        numOfIters: Int = 5 // => 50*3*5 =  750 of all iterations
+                          ) extends Logging {
+
+  init()
+  private def init() {
+    if (!featureFunctions.keySet.containsAll(Set[Label.Value](Label.NE, Label.REL, Label.COREF)) || featureFunctions.keySet.size != 3) {
+      logger.warn("Feature functions not correctly defined!")
+    }
+  }
+
+
+
+  def train(testExamples: Examples = examples): CollectiveClassifier = {
+    var NEClassifier = ArrayBuffer[NERMentionClassifier]()
+    var RELClassifier = ArrayBuffer[RelationshipMultipleClassifier]()
+    var COREFClassifier = ArrayBuffer[CorefMultipleClassifier]()
+
+    /*
+    for (i <- 1 to numOfIters) {
+      logger.info("Iteration " + i)
+
+      //NER
+      NEClassifier = new CRFSuiteLCCRFLearner(examples, Label.NE, featureFunctions(Label.NE), "temp/NECollective.obj").
+        train(numOfEachTaskIters) //trainAndTest(showScoringEvery, numOfEachTaskIters, examples)
+      println("NE test")
+      new Statistics(NEClassifier, testExamples).printAllStat(Label.NE)
+
+      //REL
+      RELClassifier = new CRFSuiteLCCRFLearner(examples, Label.REL, featureFunctions(Label.REL), "temp/RECollective.obj").
+        train(numOfEachTaskIters) //trainAndTest(showScoringEvery, numOfEachTaskIters, examples)
+      println("REL test")
+      new Statistics(RELClassifier, testExamples).printAllStat(Label.REL)
+
+      //COREF
+      COREFClassifier = new CorefMultipleLearner(mentionExamples, featureFunctions(Label.COREF), modelSaveFilename = "temp/COREFCollective").
+        train(numOfEachTaskIters) //trainAndTest(showScoringEvery, numOfEachTaskIters, mentionExamples)
+
+      //TODO do some easy testing
+      val allRealClusters = TaggedClusterer.doClustering(mentionExamplesTest, ommitSingles = false)
+      val allTaggedClusters = COREFClassifier.classifyClusters(mentionExamplesTest)
+      val F = new ClusterStatistics().stat(allRealClusters, allTaggedClusters)
+      println("Results Pairwise: " + F)
+      val FMUC = MUCClusterStatistics.scoreExamples(allRealClusters, allTaggedClusters)
+      println("Results MUC:" + FMUC)
+      val BCubed = BCubedClusterStatistics.scoreExamples(allRealClusters, allTaggedClusters)
+      println("Results BCubed: " + BCubed)
+
+
+
+      CollectiveUtil.addIntermediateLabelings(examples, i, NEClassifier, RELClassifier)
+      i match {
+        case 1 => addIterativeFeatureFunctions(FeatureFunctionLevel.LEVEL_1)
+        case 2 => addIterativeFeatureFunctions(FeatureFunctionLevel.LEVEL_2)
+        case _ =>
+      }
+    }
+
+    new CollectiveClassifier(NEClassifier, RELClassifier, COREFClassifier, neTagsToGenerateConstituentsFrom, documentIdentifier) */
+
+    null
+  }
+
+  private def createIterativeFeatureFunctions(labelType: Label.Value, iterativeLabelType: Label.Value) {
+    featureFunctions(labelType).add(new UnigramFeatureFunction(iterativeLabelType, "U" + iterativeLabelType))
+    featureFunctions(labelType).add(new UnigramPreviousFeatureFunction(iterativeLabelType, "UP" + iterativeLabelType))
+    featureFunctions(labelType).add(new UnigramConsecutiveFeatureFunction(iterativeLabelType, "UC" + iterativeLabelType))
+
+    featureFunctions(labelType).add(new UnigramFeatureFunction(iterativeLabelType, "B" + iterativeLabelType))
+    featureFunctions(labelType).add(new UnigramPreviousFeatureFunction(iterativeLabelType, "BP" + iterativeLabelType))
+    featureFunctions(labelType).add(new UnigramConsecutiveFeatureFunction(iterativeLabelType, "BC" + iterativeLabelType))
+  }
+
+  /**
+   * Feature functions are added to tasks of NE, REL.
+   */
+  private def addIterativeFeatureFunctions(functionLevel: FeatureFunctionLevel.Value) {
+    functionLevel match {
+      case FeatureFunctionLevel.LEVEL_1 =>
+        createIterativeFeatureFunctions(Label.NE, Label.L1_NE)
+        createIterativeFeatureFunctions(Label.REL, Label.L1_REL)
+      case FeatureFunctionLevel.LEVEL_2 =>
+        createIterativeFeatureFunctions(Label.NE, Label.L2_NE)
+        createIterativeFeatureFunctions(Label.REL, Label.L2_REL)
+      case _ => logger.warn("Function level not supported.")
+    }
+  }
+
+
+}

src/main/java/si/zitnik/research/iobie/core/collective/test/CollectiveTest.scala

 import java.util.ArrayList
 import si.zitnik.research.iobie.algorithms.crf.feature._
 import packages.FeatureFunctionPackages
-import si.zitnik.research.iobie.collective.CollectiveLearner
 import si.zitnik.research.iobie.coreference.test.CoNLL2012Data
+import si.zitnik.research.iobie.core.collective.learner.CollectiveLearner
 
 /**
  * Created with IntelliJ IDEA.
 
   def main(args: Array[String]) {
     //STEP 1
-
+    //val mentionExamples = new MentionExamplesBuilder(examples, neTagsToGenerateConstituentsFrom, documentIdentifier).buildFromTagged()
 
     //STEP 2
-    val classifier = new CollectiveLearner(
-      Map(Label.NE -> generalFFunctions(), Label.REL -> generalFFunctions(), Label.COREF -> FeatureFunctionPackages.standardCorefFFunctions()),
-      CoNLL2012Data.importTrainData(),
-      Set("PERSON")
-    ).train(CoNLL2012Data.importTestData())
-  }
-
-  def generalFFunctions(): ArrayList[FeatureFunction] = {
-    val featureFunctions = new ArrayList[FeatureFunction]()
-    featureFunctions.add(new BigramDistributionFeatureFunction())
-    featureFunctions.add(new UnigramDistributionFeatureFunction())
-
-    featureFunctions.add(new StartsUpperFeatureFunction(-1))
-    featureFunctions.add(new StartsUpperFeatureFunction())
-    featureFunctions.add(new StartsUpperTwiceFeatureFunction(-1))
-    featureFunctions.add(new StartsUpperTwiceFeatureFunction())
-
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, 2, -1 to 1).generate())
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, 3, -1 to 1).generate())
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, -3, -1 to 1).generate())
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, -2, -1 to 1).generate())
 
-
-    featureFunctions.addAll(new LabelUnigramFeatureFunctionGenerator(Label.POS, -2 to 2, "UPOS").generate())
-    featureFunctions.addAll(new LabelUnigramFeatureFunctionGenerator(Label.OBS, -2 to 2, "UOBS").generate())
-    featureFunctions.addAll(new LabelUnigramFeatureFunctionGenerator(Label.LEMMA, -2 to 2, "ULEM").generate())
-
-    featureFunctions.addAll(new LabelBigramFeatureFunctionGenerator(Label.POS, -1 to 2, "BPOS").generate())
-    featureFunctions.addAll(new LabelBigramFeatureFunctionGenerator(Label.OBS, -1 to 2, "BOBS").generate())
-    featureFunctions.addAll(new LabelBigramFeatureFunctionGenerator(Label.LEMMA, -1 to 2, "BLEM").generate())
-
-    featureFunctions
   }
 
+
 }

src/main/java/si/zitnik/research/iobie/core/collective/test/RTVSLOCollectiveTest.scala

-package si.zitnik.research.iobie.collective.test
-
-import si.zitnik.research.iobie.algorithms.crf.{FeatureFunction, Label}
-import si.zitnik.research.iobie.datasets.util.DatasetUtil
-import si.zitnik.research.iobie.datasets.tab.TabImporter
-import java.util.ArrayList
-import si.zitnik.research.iobie.algorithms.crf.feature._
-import scala.collection.JavaConversions._
-import si.zitnik.research.iobie.domain.IOBIEConversions._
-import si.zitnik.research.iobie.collective.CollectiveLearner
-import si.zitnik.research.iobie.algorithms.crf.stat.Statistics
-
-/**
- * Created with IntelliJ IDEA.
- * User: slavkoz
- * Date: 8/14/12
- * Time: 1:07 PM
- * To change this template use File | Settings | File Templates.
- */
-
-object RTVSLOCollectiveTest {
-  def importExamples() = {
-    var ds = "/Users/slavkoz/Documents/DR_Research/Datasets/rtvslo_dec2011/rtvslo_dec2011_v2.tsv"
-
-    var examples = new TabImporter(Array[Label.Value](Label.NE, Label.REL, Label.COREF, Label.LEMMA, Label.POS), ds).importForIE()
-    examples.relabel(Label.COREF, "O", "-") //TRANSFORM COREF TAGS
-    DatasetUtil.toCOREF_Tags(examples)
-    DatasetUtil.toCOREF_Constituents(examples)
-    examples.printStatistics(ommited = Array())
-    examples
-  }
-
-  def defineFFunctions(): ArrayList[FeatureFunction] = {
-    val featureFunctions = new ArrayList[FeatureFunction]()
-    featureFunctions.add(new BigramDistributionFeatureFunction())
-    featureFunctions.add(new UnigramDistributionFeatureFunction())
-
-    featureFunctions.add(new StartsUpperFeatureFunction(-1))
-    featureFunctions.add(new StartsUpperFeatureFunction())
-    featureFunctions.add(new StartsUpperTwiceFeatureFunction(-1))
-    featureFunctions.add(new StartsUpperTwiceFeatureFunction())
-
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, 2, -1 to 1).generate())
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, 3, -1 to 1).generate())
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, -3, -1 to 1).generate())
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, -2, -1 to 1).generate())
-
-
-    featureFunctions.addAll(new LabelUnigramFeatureFunctionGenerator(Label.OBS, -2 to 2, "UOBS").generate())
-
-    featureFunctions.addAll(new LabelBigramFeatureFunctionGenerator(Label.OBS, -1 to 2, "BOBS").generate())
-
-    featureFunctions
-  }
-
-  def generalFFunctions(): ArrayList[FeatureFunction] = {
-    val featureFunctions = new ArrayList[FeatureFunction]()
-    featureFunctions.add(new BigramDistributionFeatureFunction())
-    featureFunctions.add(new UnigramDistributionFeatureFunction())
-
-    featureFunctions.add(new StartsUpperFeatureFunction(-1))
-    featureFunctions.add(new StartsUpperFeatureFunction())
-    featureFunctions.add(new StartsUpperTwiceFeatureFunction(-1))
-    featureFunctions.add(new StartsUpperTwiceFeatureFunction())
-
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, 2, -1 to 1).generate())
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, 3, -1 to 1).generate())
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, -3, -1 to 1).generate())
-    featureFunctions.addAll(new UnigramXffixFeatureFunctionGenerator(Label.OBS, -2, -1 to 1).generate())
-
-
-    featureFunctions.addAll(new LabelUnigramFeatureFunctionGenerator(Label.POS, -2 to 2, "UPOS").generate())
-    featureFunctions.addAll(new LabelUnigramFeatureFunctionGenerator(Label.OBS, -2 to 2, "UOBS").generate())
-    featureFunctions.addAll(new LabelUnigramFeatureFunctionGenerator(Label.LEMMA, -2 to 2, "ULEM").generate())
-
-    featureFunctions.addAll(new LabelBigramFeatureFunctionGenerator(Label.POS, -1 to 2, "BPOS").generate())
-    featureFunctions.addAll(new LabelBigramFeatureFunctionGenerator(Label.OBS, -1 to 2, "BOBS").generate())
-    featureFunctions.addAll(new LabelBigramFeatureFunctionGenerator(Label.LEMMA, -1 to 2, "BLEM").generate())
-
-    featureFunctions
-  }
-
-
-  def main(args: Array[String]) {
-    val rawExamples = importExamples()
-    rawExamples.printLabelingDistribution(Label.NE)
-    rawExamples.printLabelingDistribution(Label.REL)
-    println("Cluster numbers: "+rawExamples.getAllMentionClusters().map(_._2.size()).sum)
-    println("Mention numbers: "+rawExamples.map(_.getAllMentions().size).sum)
-
-
-    for (i <- 0 to 8) { //leave one out principle
-    val (inExamples, outExamples) = importExamples().leaveOneOut(i+"")
-    //val inExamples = importExamples()
-    //val outExamples = inExamples
-
-      val classifier = new CollectiveLearner(
-        Map(Label.NE -> generalFFunctions(), Label.REL -> generalFFunctions(), Label.COREF -> generalFFunctions()),
-        inExamples,
-        Set("PER", "ORG")
-      ).train()
-
-      val result = classifier.classify(outExamples, readMentionsFromExamples = true)
-      val resolvedCLusters = result._3
-      val realClusters = outExamples.getAllMentionClusters()
-      println("B-PER: ")
-      Statistics.printClassificationTable(Statistics.stat(Label.NE, "B-PER", outExamples.getLabeling(Label.NE), result._1))
-      Statistics.printStandardClassification(Statistics.stat(Label.NE, "B-PER", outExamples.getLabeling(Label.NE), result._1))
-      println("I-PER: ")
-      Statistics.printClassificationTable(Statistics.stat(Label.NE, "I-PER", outExamples.getLabeling(Label.NE), result._1))
-      Statistics.printStandardClassification(Statistics.stat(Label.NE, "I-PER", outExamples.getLabeling(Label.NE), result._1))
-      println("O: ")
-      Statistics.printClassificationTable(Statistics.stat(Label.NE, "O", outExamples.getLabeling(Label.NE), result._1))
-      Statistics.printStandardClassification(Statistics.stat(Label.NE, "O", outExamples.getLabeling(Label.NE), result._1))
-
-
-      println("B-REL: ")
-      Statistics.printClassificationTable(Statistics.stat(Label.REL, "B-REL", outExamples.getLabeling(Label.REL), result._2))
-      Statistics.printStandardClassification(Statistics.stat(Label.REL, "B-REL", outExamples.getLabeling(Label.REL), result._2))
-      println("I-REL: ")
-      Statistics.printClassificationTable(Statistics.stat(Label.REL, "I-REL", outExamples.getLabeling(Label.REL), result._2))
-      Statistics.printStandardClassification(Statistics.stat(Label.REL, "I-REL", outExamples.getLabeling(Label.REL), result._2))
-      println("O: ")
-      Statistics.printClassificationTable(Statistics.stat(Label.REL, "O", outExamples.getLabeling(Label.REL), result._2))
-      Statistics.printStandardClassification(Statistics.stat(Label.REL, "O", outExamples.getLabeling(Label.REL), result._2))
-    }
-
-  }
-}

src/main/java/si/zitnik/research/iobie/core/collective/util/CollectiveUtil.scala

+package si.zitnik.research.iobie.core.collective.util
+
+import si.zitnik.research.iobie.algorithms.crf.{Label, Classifier}
+import com.typesafe.scalalogging.slf4j.Logging
+import si.zitnik.research.iobie.domain.Examples
+import si.zitnik.research.iobie.domain.cluster.Cluster
+import scala.collection.JavaConversions._
+import si.zitnik.research.iobie.domain.IOBIEConversions._
+import collection.mutable._
+
+/**
+ * Created with IntelliJ IDEA.
+ * User: slavkoz
+ * Date: 8/7/12
+ * Time: 4:29 PM
+ * To change this template use File | Settings | File Templates.
+ */
+
+object CollectiveUtil extends Logging {
+
+  def addIntermediateLabelings(examples: Examples, i: Int, NEC: Classifier, RELC: Classifier) {
+    logger.info("Adding intermediate labelings.")
+    if (i > 1) {
+      //transfer L1 labelings to L2 labelings
+      examples.transferLabeling(Label.L1_NE, Label.L2_NE)
+      examples.transferLabeling(Label.L1_REL, Label.L2_REL)
+    }
+
+    //add L1 labelings
+    examples.setLabeling(Label.L1_NE, NEC)
+    examples.setLabeling(Label.L1_REL, RELC)
+    logger.info("Intermediate labelings added.")
+  }
+
+  def addIntermediateLabelings(examples: Examples, i: Int, neLabels: ArrayBuffer[ArrayBuffer[String]], relLabels: ArrayBuffer[ArrayBuffer[String]]) {
+    logger.info("Adding intermediate labelings.")
+    if (i > 1) {
+      //transfer L1 labelings to L2 labelings
+      examples.transferLabeling(Label.L1_NE, Label.L2_NE)
+      examples.transferLabeling(Label.L1_REL, Label.L2_REL)
+    }
+
+    //add L1 labelings
+    examples.setLabeling(Label.L1_NE, neLabels)
+    examples.setLabeling(Label.L1_REL, relLabels)
+    logger.info("Intermediate labelings added.")
+  }
+
+  //CONVERGENCE
+  def labelingConverged(currentlabeling: ArrayBuffer[ArrayBuffer[String]], previousLabeling: ArrayBuffer[ArrayBuffer[String]]): Boolean = {
+    for ((p, c) <- previousLabeling.flatten.zip(currentlabeling.flatten)) {
+      if (!p.equals(c)) {
+        return false
+      }
+    }
+    return true
+  }
+
+  def clustersConverged(curCorefClusters: ArrayBuffer[HashSet[Cluster]], prevCorefClusters: ArrayBuffer[HashSet[Cluster]]): Boolean = {
+    //TODO check clusters
+    false
+  }
+
+  def hasConverged(curNeLabels: ArrayBuffer[ArrayBuffer[String]], prevNeLabels: ArrayBuffer[ArrayBuffer[String]], curRelLabels: ArrayBuffer[ArrayBuffer[String]], prevRelLabels: ArrayBuffer[ArrayBuffer[String]], curCorefClusters: ArrayBuffer[HashSet[Cluster]], prevCorefClusters: ArrayBuffer[HashSet[Cluster]]) = {
+    prevCorefClusters != null && prevNeLabels != null && prevRelLabels != null &&
+    labelingConverged(curNeLabels, prevNeLabels) &&
+    labelingConverged(curRelLabels, prevRelLabels) &&
+    clustersConverged(curCorefClusters, prevCorefClusters)
+  }
+
+}

src/main/java/si/zitnik/research/iobie/core/collective/util/FeatureFunctionLevel.scala

+package si.zitnik.research.iobie.core.collective.util
+
+/**
+ * Created with IntelliJ IDEA.
+ * User: slavkoz
+ * Date: 6/15/12
+ * Time: 12:05 PM
+ * To change this template use File | Settings | File Templates.
+ */
+
+object FeatureFunctionLevel extends Enumeration {
+  val LEVEL_1 = Value("L1")
+  val LEVEL_2 = Value("L2")
+}

src/main/java/si/zitnik/research/iobie/core/coreference/test/CoreferenceEvaluation.scala

 import si.zitnik.research.iobie.gui.coref.CorefVisualizer
 import si.zitnik.research.iobie.thirdparty.lemmagen.api.LemmaTagger
 import io.Source
+import si.zitnik.research.iobie.core.coreference.learner.CorefPairwiseLearner
 
 /**
  * Created with IntelliJ IDEA.
     println("*******")
     println("Doing runners package \"%s\"".format(evaluationName))
     println("*******")
+
+    /*
     val distribution = CoreferenceAnalysis.getCoreferentMentionsDistanceDistribution(mentionExamples).keySet.toList.sorted
-    val minSkipMentions = 20
-    val maxSkipMentions = math.min(30, distribution.size)//minSkipMentions //distribution.size
+    println("Distance: %d".format(distribution.take(1).get(0).toInt))
+    val minSkipMentions = 1//20
+    val maxSkipMentions = 1//math.min(30, distribution.size)//minSkipMentions //distribution.size
     for (idx <- minSkipMentions to maxSkipMentions) {
       val classifier = new CorefMultipleLearner(
         mentionExamples,
       //clusterResults(classifier, mentionExamples)
       clusterResults(classifier, mentionExamplesTest)
     }
+    */
+
+    val classifier = new CorefPairwiseLearner(
+      mentionExamples,
+      featureFunctions = featureFunctionSet,
+      modelSaveFilename = evaluationName
+    ).train()
+    clusterResults(classifier, mentionExamplesTest)
   }
 
   def evaluate(evaluationName: String, dataProvider: Data, featureFunctionSet: ArrayList[FeatureFunction]) {
           val examples = dataProvider.importTrainData()
           println("TRAIN RAW DATA: ")
-          examples.printStatistics(ommited = Array(Label.EXTENT, Label.OBS, Label.PARSE_NODE, Label.REL, Label.COREF, Label.LEMMA), ommitedExample = Array(ExampleLabel.DOC_ID), ommitMentions = true)
+          examples.printStatistics(ommited = Array(Label.EXTENT, Label.OBS, Label.PARSE_NODE, Label.REL, Label.COREF, Label.LEMMA), ommitedExample = Array(ExampleLabel.DOC_ID), ommitMentions = true, ommitedMentionAttributes = Array(Label.OBS, Label.COREF, Label.EXTENT))
           val examplesTest = dataProvider.importTestData()// an ACE2004 document: .getDocumentExamples("NYT20001128.1940.0266")
           println("TEST RAW DATA: ")
-          examplesTest.printStatistics(ommited = Array(Label.EXTENT, Label.OBS, Label.PARSE_NODE, Label.REL, Label.COREF, Label.LEMMA), ommitedExample = Array(ExampleLabel.DOC_ID), ommitMentions = true)
+          examplesTest.printStatistics(ommited = Array(Label.EXTENT, Label.OBS, Label.PARSE_NODE, Label.REL, Label.COREF, Label.LEMMA), ommitedExample = Array(ExampleLabel.DOC_ID), ommitMentions = true, ommitedMentionAttributes = Array(Label.OBS, Label.COREF, Label.EXTENT))
 
           transformAndEval(evaluationName, featureFunctionSet, examples, examplesTest)
   }
     evaluate ("SemEval2010", SemEvalData, FeatureFunctionPackages.bestSemEval2010CorefFeatureFunctions)
 
     //CoNLL 2012
-    /*
+
     CoNLL2012Data.sources = Array(CoNLL2012ImporterSourceTypeEnum.BROADCAST_NEWS)
     evaluate ("CoNLL2012 BROADCAST_NEWS", CoNLL2012Data, FeatureFunctionPackages.bestCoNLL2012CorefFeatureFunctions)
 
 
     CoNLL2012Data.sources = CoNLL2012ImporterSourceTypeEnum.values.toArray
     evaluate ("CoNLL2012 ALL_TOGETHER", CoNLL2012Data, FeatureFunctionPackages.bestCoNLL2012CorefFeatureFunctions)
-    */
+
     //ACE2004
     /*
     ACE2004Data.sources = Array(ACE2004DocumentType.ARABIC_TREEBANK)
     val filePath = IOBIEPropertiesUtil.getProperty(IOBIEProperties.SEMEVAL2010_PATH)
     val semEval2010Importer = new SemEval2010Importer(filePath, SemEvalDataType.TRAIN_EN.toString)
     val examples = semEval2010Importer.importForIE()
+    new ParseTagger().tag(examples)
     examples
   }
 
     val filePath = IOBIEPropertiesUtil.getProperty(IOBIEProperties.SEMEVAL2010_PATH)
     val semEval2010Importer = new SemEval2010Importer(filePath, SemEvalDataType.TEST_EN.toString)
     val examples = semEval2010Importer.importForIE()
+    new ParseTagger().tag(examples)
     examples
   }
 }
     val rawData = new ACE2004Importer(datasetPath, documentTypes = sources).importForIE()
     new PoSTagger().tag(rawData)
     new LemmaTagger().tag(rawData)
-    //new ParseTagger().tag(rawData)
+    new ParseTagger().tag(rawData)
 
     //Cullota split:
     data = ExactSampler.sampleNumber(rawData, 336)

src/main/java/si/zitnik/research/iobie/core/ner/mention/classifier/impl/NERMentionClassifier.scala

+package si.zitnik.research.iobie.core.ner.mention.classifier.impl
+
+import si.zitnik.research.iobie.algorithms.crf.{Label, Classifier}
+import si.zitnik.research.iobie.domain.{Example, Examples}
+import java.util
+import si.zitnik.research.iobie.algorithms.crf.stat.Statistics
+
+/**
+ * Created with IntelliJ IDEA.
+ * User: slavkoz
+ * Date: 6/21/13
+ * Time: 4:08 PM
+ * To change this template use File | Settings | File Templates.
+ */
+class NERMentionClassifier(
+                            val classifier: Classifier,
+                            val learnLabelType: Label.Value = Label.NE) extends Classifier {
+
+  def classify(mentionExample: Example, normalized: Boolean): (util.ArrayList[String], Double) = {
+    classifier.classify(mentionExample, normalized)
+  }
+
+  def test(data: Examples) {
+    new Statistics(classifier, data).printAllStat(learnLabelType)
+  }
+}

src/main/java/si/zitnik/research/iobie/core/ner/mention/learner/NERMentionLearner.scala

+package si.zitnik.research.iobie.core.ner.mention.learner
+
+import si.zitnik.research.iobie.domain.Examples
+import java.util.ArrayList
+import si.zitnik.research.iobie.algorithms.crf.{Classifier, Learner, Label, FeatureFunction}
+import com.typesafe.scalalogging.slf4j.Logging
+import si.zitnik.research.iobie.coreference.util.MentionExamplesToCorefExamplesTransformer
+import si.zitnik.research.iobie.thirdparty.crfsuite.api.CRFSuiteLCCRFLearner
+import si.zitnik.research.iobie.coreference.classifier.impl.CorefMultipleClassifier
+import si.zitnik.research.iobie.core.ner.mention.classifier.impl.NERMentionClassifier
+
+/**
+ *
+ * Input FeatureFunctions should not refer to neighbouring examples! That is because one example represents one
+ * document and therefore mentions are independent.
+ *
+ * The same must comply for input to classifier.
+ *
+ * -------------------
+ *
+ * Input examples should be mentions. Each example represents a document.
+ * @param mentionExamples
+ * @param featureFunctions
+ * @param learnLabelType
+ */
+class NERMentionLearner(
+                            mentionExamples: Examples,
+                            val featureFunctions: ArrayList[FeatureFunction],
+                            val learnLabelType: Label.Value = Label.NE,
+                            val modelSaveFilename: String = "ner_model") extends Learner(mentionExamples) with Logging {
+
+
+  def train() = {
+    train(50)
+  }
+
+  def train(epochs: Int) = {
+    val classifier = new CRFSuiteLCCRFLearner(mentionExamples, learnLabelType, featureFunctions, modelSaveFilename + ".obj").train(epochs)
+    new NERMentionClassifier(classifier, learnLabelType)
+  }
+
+  def trainAndTest(epochsBetweenTest: Int = 5, allEpochs: Int = 50, testMentionExamples: Examples = mentionExamples): NERMentionClassifier = {
+    var classifier: NERMentionClassifier = null
+
+    for (epoch <- 1 to math.max(allEpochs / epochsBetweenTest, 1)) {
+      classifier = train(epochsBetweenTest)
+      logger.info("Training perf:")
+      classifier.test(testMentionExamples)
+
+      if (testMentionExamples != mentionExamples) {
+        logger.info("Testing perf:")
+        classifier.test(testMentionExamples)
+      }
+    }
+
+    classifier
+  }
+
+}
+

src/main/java/si/zitnik/research/iobie/datasets/conll2012/CoNLL2012Importer.scala

 
   val datasetVersion = "v4" //the last released dataset version
   //              $type/domain/$lang/annotations/$source /dsprovider/XX /file
-  val fullPath = "%s/domain/%s/annotations/%s"
+  val fullPath = "%s/data/%s/annotations/%s"
 
   def importForIE(): Examples = {
     val retVal = new Examples()

src/main/java/si/zitnik/research/iobie/thirdparty/wordnet/api/WordNetHelper.scala

 
 
   def getSameIndexWords(word: String, posType: POS): HashSet[String] = {
-    //TODO: pos type checking
     val retVal = new HashSet[String]()
 
     val iword: IndexWord = wordnet.getIndexWord(posType, word)
     for (sense: Synset <- iword.getSenses()) {
-      //println(sense.getKey())
       for (similarWord <- sense.getWords()) {
-        //if (similarWord.getIndex == 2) {
-        //println("%d, %s, %s".format(similarWord.getIndex, similarWord.getLemma, sense.getGloss))
-        //}
         retVal.add(similarWord.getLemma())
       }
     }
     var retVal: Option[String] = None
     var curDepth = Int.MaxValue
     for (pointerType <- pointerTypes) {
-      val relationships = RelationshipFinder.getInstance().findRelationships(iword1.getSense(1), iword2.getSense(1), pointerType, MAX_RELATIONSHIP_DEPTH)
-
-      if (relationships.size() > 0 && relationships.getShallowest.getDepth < curDepth) {
-        curDepth  = relationships.getShallowest.getDepth
-        retVal = Some(pointerType.getLabel)
+      try {
+        val relationships = RelationshipFinder.getInstance().findRelationships(iword1.getSense(1), iword2.getSense(1), pointerType, MAX_RELATIONSHIP_DEPTH)
+
+        if (relationships.size() > 0 && relationships.getShallowest.getDepth < curDepth) {
+          curDepth  = relationships.getShallowest.getDepth
+          retVal = Some(pointerType.getLabel)
+        }
+      } catch {
+        case e: Exception => {}
       }
     }
 

src/main/java/si/zitnik/research/iobie/thirdparty/wordnet/test/JWNLTest.scala

 
     println(wnHelper.getSameIndexWords("have", POS.VERB).mkString(" "))
 
+    println(wnHelper.findRelationship("south", POS.NOUN, "turkey", POS.NOUN))
+    println(wnHelper.findRelationship("test", POS.NOUN, "trial", POS.NOUN))
+
     println(wnHelper.findRelationship("animal", POS.NOUN, "cow", POS.NOUN))
     println(wnHelper.findRelationship("cow", POS.NOUN, "animal", POS.NOUN))
     println(wnHelper.findRelationship("pavement", POS.NOUN, "sidewalk", POS.NOUN))
+    println(wnHelper.findRelationship("sidewalk", POS.NOUN, "pavement", POS.NOUN))
     println(wnHelper.findRelationship("good", POS.NOUN, "bad", POS.NOUN))
   }
 }