Commits

Jay Barra committed 4777eee

PSO ported from old C++ code, few tweaks are needed still to finish the port

Comments (0)

Files changed (1)

 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <math.h>
+#include <time.h>
 #include <mpi.h>
 //---------------------------------------------------------------------------
-#include "mpiNode.h"
+#define ROOT 0
+
+#define MESSAGE_TAG 1000
+#define WORK_TAG    1001
+//---------------------------------------------------------------------------
+#define SWARM_WEIGHT 1.5f
+#define SELF_WEIGHT .5f
+#define GENERATIONS 16 
+//---------------------------------------------------------------------------
+struct Position {
+    float x;
+    float y;
+};
+
+typedef struct Particle {
+    struct Position current_pos;
+    float current_fitness;
+    
+    struct Position best_pos;
+    float best_fitness;
+} MPI_PARTICLE;
+//---------------------------------------------------------------------------
+// Algorithm Functions
+float fitness(struct Particle& f);
+//---------------------------------------------------------------------------
+// MPI Functions
+void getStatus(MPI_Status status, char* pcStatus);
 //---------------------------------------------------------------------------
 main (int argc, char** argv)
 {
-	MPI::Init(argc, argv);
+  srand(time(NULL));
+
+  MPI_Init(&argc, &argv);
 	
-	mpiNode* node = new mpiNode(argc, argv);
-	node->init();
-	node->printParticles();
-	//node->run();
-
-	MPI::Finalize();
-	delete node;
-	return 0;
+  int iNameLen;
+  int iNodes;
+  int id;
+  
+  MPI_Status status;
+ 
+  char* pcName    = (char*)malloc(1024);
+  char* pcMessage = (char*)malloc(1024);
+  char* pcStatus  = (char*)malloc(1024);
+
+  struct Particle vSwarm[8]; 
+  
+  MPI_Comm_rank(MPI_COMM_WORLD, &id);
+  MPI_Comm_size(MPI_COMM_WORLD, &iNodes);
+  MPI_Get_processor_name(pcName, &iNameLen);
+  
+  bool bRoot = (id == ROOT);
+  vSwarm[id].current_pos.x = ((float)(rand() % 1000) / 1000.0);
+  vSwarm[id].current_pos.y = ((float)(rand() % 1000) / 1000.0);
+
+  fitness(vSwarm[id]);
+  vSwarm[id].best_fitness = vSwarm[id].current_fitness;
+  vSwarm[id].best_pos.x = vSwarm[id].current_pos.x;
+  vSwarm[id].best_pos.y = vSwarm[id].current_pos.y;
+
+  if (!bRoot) {
+    sprintf(pcMessage, "%i, %s", id, pcName);
+    MPI_Send(pcMessage, strlen(pcMessage)+1, MPI_CHAR, ROOT, MESSAGE_TAG, MPI_COMM_WORLD);
+  } else {
+    printf("Initializing Nodes\n");
+    printf("*********************************************************\n");
+    printf("Root Node: %s\n", pcName);
+    printf("Awaiting worker node responses...\n");
+    for (int iSource = 1; iSource < iNodes; iSource++) {
+      MPI_Recv(pcMessage, 1024, MPI_CHAR, iSource, MESSAGE_TAG, MPI_COMM_WORLD, &status);
+      getStatus(status, pcStatus);
+      printf("AgentNode: %s\t| Status: %s\n", pcMessage, pcStatus);
+    }
+    printf("Node initialization complete...\n");
+    printf("%i Nodes Registered for work...\n", iNodes);
+    printf("*********************************************************\n");
+  }
+
+  int iSwarmBest = 0;
+  
+  for (int iParticle = 1; iParticle < iNodes; iParticle++) {
+    if (vSwarm[iParticle].current_fitness > vSwarm[iSwarmBest].current_fitness) {
+      iSwarmBest = iParticle;
+    }
+  }
+  if (bRoot) {
+    MPI_Bcast(&vSwarm[id].current_fitness, 1, MPI_FLOAT, ROOT, MPI_COMM_WORLD);
+    MPI_Bcast(&iSwarmBest, 1, MPI_FLOAT, ROOT, MPI_COMM_WORLD);
+  }
+  
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  for (int iGeneration = 0; iGeneration < GENERATIONS; iGeneration++) {
+  if (bRoot) {
+    printf("Generation %i\n", iGeneration);
+  }
+   // Compare fitnesses
+    for (int iParticle = 0; iParticle < iNodes; iParticle++) {
+      vSwarm[id].current_pos.x = vSwarm[id].current_pos.x + 
+                                 SELF_WEIGHT  * (vSwarm[id].best_pos.x - vSwarm[id].best_pos.x) * (((rand() % 1000) / 1000.0) - 0.5) + 
+                                 SWARM_WEIGHT * (vSwarm[iSwarmBest].best_pos.x - vSwarm[iSwarmBest].best_pos.y) * (((rand() % 1000) / 1000.0) - 0.5);
+
+      vSwarm[id].current_pos.x = vSwarm[id].current_pos.y +
+                                 SELF_WEIGHT  * (vSwarm[id].current_pos.y  - vSwarm[id].best_pos.y) * (((rand() % 1000) / 1000.0) - 0.5) + 
+                                 SWARM_WEIGHT * (vSwarm[id].current_pos.y - vSwarm[iSwarmBest].best_pos.y) * (((rand() % 1000) / 1000.0) - 0.5);
+    }
+    fitness(vSwarm[id]);
+
+    if (bRoot) {
+      for (int i = 1; i < iNodes; i++) {
+        MPI_Recv(&vSwarm[i].current_pos.x,   1, MPI_FLOAT, i, MESSAGE_TAG, MPI_COMM_WORLD, &status);
+        MPI_Recv(&vSwarm[i].current_pos.y,   1, MPI_FLOAT, i, MESSAGE_TAG, MPI_COMM_WORLD, &status);
+        MPI_Recv(&vSwarm[i].current_fitness, 1, MPI_FLOAT, i, MESSAGE_TAG, MPI_COMM_WORLD, &status);
+      }
+    } else {
+      MPI_Send(&vSwarm[id].current_pos.x,   1, MPI_FLOAT, ROOT, MESSAGE_TAG, MPI_COMM_WORLD);
+      MPI_Send(&vSwarm[id].current_pos.y,   1, MPI_FLOAT, ROOT, MESSAGE_TAG, MPI_COMM_WORLD);
+      MPI_Send(&vSwarm[id].current_fitness, 1, MPI_FLOAT, ROOT, MESSAGE_TAG, MPI_COMM_WORLD);
+    }
+    if (bRoot) {
+      iSwarmBest = 0; 
+      for (int i = 1; i < iNodes; i++) {
+        if (vSwarm[i].best_fitness > vSwarm[iSwarmBest].best_fitness) {
+          iSwarmBest = i;
+        } 
+      }
+    }
+    MPI_Bcast(&iSwarmBest, 1, MPI_INT, ROOT, MPI_COMM_WORLD);
+  }
+
+  // Find the best of the swarm
+  for (int iParticle = 1; iParticle < iNodes; iParticle++) {
+    if (vSwarm[iParticle].best_fitness > vSwarm[iSwarmBest].best_fitness) {
+      iSwarmBest = iParticle;
+    }
+  }
+  if (bRoot) {
+    printf("And the winner is: %i (%f, %f) -> %f\n", iSwarmBest, vSwarm[iSwarmBest].best_pos.x, vSwarm[iSwarmBest].best_pos.y, vSwarm[iSwarmBest].best_fitness); }
+ 
+  MPI_Finalize();
+  
+  return 0; 
+}
+//-------------------------------------------------
+float fitness(struct Particle& f)
+{
+/*
+ *(x^3-3*x + y^3-3*y)
+ */
+    //f.current_fitness = pow(f.current_pos.x, 3) - (3 * f.current_pos.x) + //                   pow(f.current_pos.y, 3) - (3 * f.current_pos.y);
+    f.current_fitness = sin(sqrt(pow(f.current_pos.x, 2) + pow(f.current_pos.y, 2)));
+    if (f.current_fitness > f.best_fitness) {
+        f.best_fitness = f.current_fitness;
+        f.best_pos     = f.current_pos;
+    }
+
+    return f.current_fitness;
+}
+//-------------------------------------------------
+void getStatus(MPI_Status status, char* pcStatus)
+{
+switch (status.MPI_ERROR) {
+	case MPI_SUCCESS:
+		sprintf(pcStatus, "Success");
+		break;
+	case MPI_ERR_BUFFER:
+		sprintf(pcStatus, "Buffer Error");
+		break;
+	case MPI_ERR_COUNT:
+		sprintf(pcStatus, "Count Error");
+		break;
+	case MPI_ERR_TYPE:
+		sprintf(pcStatus, "Type Error");
+		break;
+	case MPI_ERR_TAG:
+		sprintf(pcStatus, "Tag Error");
+		break;
+	case MPI_ERR_COMM:
+		sprintf(pcStatus, "COMM Error");
+		break;
+	case MPI_ERR_RANK:
+		sprintf(pcStatus, "Rank Error");
+		break;
+	case MPI_ERR_REQUEST:
+		sprintf(pcStatus, "Request Error");
+		break;
+	case MPI_ERR_ROOT:
+		sprintf(pcStatus, "Root Error");
+		break;
+	case MPI_ERR_GROUP:
+		sprintf(pcStatus, "Group Error");
+		break;	
+	case MPI_ERR_OP:
+		sprintf(pcStatus, "OP Error");
+		break;
+	case MPI_ERR_TOPOLOGY:
+		sprintf(pcStatus, "Topology Error");
+		break;
+	case MPI_ERR_DIMS:
+		sprintf(pcStatus, "DIMS Error");
+		break;
+	case MPI_ERR_ARG:
+		sprintf(pcStatus, "Args Error");
+		break;
+	case MPI_ERR_UNKNOWN:
+		sprintf(pcStatus, "Unknown Error");
+		break;
+	case MPI_ERR_TRUNCATE:
+		sprintf(pcStatus, "Truncate Error");
+		break;	
+	case MPI_ERR_OTHER:
+		sprintf(pcStatus, "Other Error");
+		break;	
+	case MPI_ERR_INTERN:
+		sprintf(pcStatus, "Internal Error");
+		break;
+	case MPI_ERR_IN_STATUS:
+		sprintf(pcStatus, "In Status Error");
+		break;
+	case MPI_ERR_PENDING:
+		sprintf(pcStatus, "Pending Error");
+		break;
+	default:
+		sprintf(pcStatus, "Unknown Error");
+  }
 }
-//----------------------------------------------------------------------------
+//------------------------------------------------