Commits

npe committed df5a541

everybody compiles and runs tests. Now the big job is to write comprehensive test suites for all of the modules and gradually perculate the dependencies up.

Comments (0)

Files changed (17)

 
 import (
 	"testing"
+	"reflect"
 )
 
 func TestLdd(t *testing.T) {
-
-
-	liblist, err := ldd.Ldd("/bin/date")
+	liblist, err := Ldd("/bin/date", "/", "")
 	if err != nil {
 		t.Errorf("Fail: %v\n", err)
-	} 
+	}
+	if !reflect.DeepEqual(liblist, liblist) {
+		t.Errorf("Liblist not equal got %q should be %q", liblist, liblist)
+	}
 }
- 
-/* 6g testldd.go ; 6l -o testldd testldd.6 */

locale/jaguar/jaguar.go

  * the U.S. Government retains certain rights in this software.
  */
 
-package main
+package jaguarcfg
 
 import (
-	"log"
 	"os"
+	"net"
 	"bitbucket.org/npe/cluster/locale"
 )
 
 func (s *jaguar) initHostTable() {
 }
 
+// where does this guy go?
+var cmdPort string
+
 func (s *jaguar) Init(role string) {
 	switch role {
 	case "master":
 	}
 }
 
+func (s *jaguar) DefaultFam() string {
+	return "tcp"
+}
+
 func (s *jaguar) ParentAddr() string {
 	return s.parentAddr
 }
 	return s.ip
 }
 
-func (s *jaguar) SlaveIdFromVitalData(vd *vitalData) (id string) {
-	log.Exit("Implement SlaveIdFromVitalData")
-	return "1"
-}
+// func (s *jaguar) SlaveIdFromVitalData(vd *vitalData) (id string) {
+// 	log.Exit("Implement SlaveIdFromVitalData")
+// 	return "1"
+// }
 
-func (s *jaguar) RegisterServer(l Listener) (err os.Error) {
+func (s *jaguar) RegisterServer(l net.Listener) (err os.Error) {
 	return
 }

locale/jaguar/jaguar_test.go

+package jaguarcfg
+
+import (
+	"testing"
+)
+
+func TestJaguar(t *testing.T) {
+}

locale/json/json.go

  * the U.S. Government retains certain rights in this software.
  */
 
-package main
+package jsoncfg
 
 import (
 	"os"
 	"io/ioutil"
 	"json"
 	"log"
+	"net"
 	"bitbucket.org/npe/cluster/locale"
 )
 
 
 
 func (l *JsonCfg) Init(role string) {
-	getIfc()
 	switch role {
 	case "master", "slave":
 		l.parentAddr = "127.0.0.1:2424"
 	return l.parentAddr
 }
 
+func (s *JsonCfg) DefaultFam() string {
+	return "tcp"
+}
+
 func (l *JsonCfg) Addr() string {
 	return l.addr
 }
 	return l.addr
 }
 
-func (s *JsonCfg) SlaveIdFromVitalData(vd *vitalData) (id string) {
-	log.Exit("Implement SlaveIdFromVitalData")
-	return "1"
-}
+// func (s *JsonCfg) SlaveIdFromVitalData(vd *vitalData) (id string) {
+// 	log.Exit("Implement SlaveIdFromVitalData")
+// 	return "1"
+// }
 
-func (loc *JsonCfg) RegisterServer(l Listener) (err os.Error) {
+func (loc *JsonCfg) RegisterServer(l net.Listener) (err os.Error) {
 	return
 }

locale/json/json_test.go

+package jsoncfg
+
+import (
+	"testing"
+)
+
+func TestJsonCfg(t *testing.T) {
+}

locale/local/local.go

  * the U.S. Government retains certain rights in this software.
  */
 
-package main
+package localcfg
 
 import (
 	"os"
+	"net"
 	"strings"
 	"io/ioutil"
 	"log"
-	"strconv"
+	// "strconv"
 	"bitbucket.org/npe/cluster/locale"
 )
 
 
-type local struct {
+type Local struct {
 	parentAddr string
 	addr       string
 	ip         string
 	idMap      map[string]string
 }
 
+var srvAddr = "/tmp/addr"
+
 func init() {
-	locale.Add("local", &local{"0.0.0.0:0", "0.0.0.0:0", "0.0.0.0", 0, make(map[string]string)})
+	locale.Add("local", &Local{"0.0.0.0:0", "0.0.0.0:0", "0.0.0.0", 0, make(map[string]string)})
 }
 
-func (l *local) Init(role string) {
+func (l *Local) Init(role string) {
 	switch role {
 	case "master", "slave":
 		cmd, err := ioutil.ReadFile(srvAddr)
 	}
 }
 
-func (l *local) ParentAddr() string {
+func (l *Local) ParentAddr() string {
 	return l.parentAddr
 }
 
-func (l *local) Addr() string {
+func (l *Local) Addr() string {
 	return l.addr
 }
 
-func (l *local) Ip() string {
+func (l *Local) Ip() string {
 	return l.ip
 }
 
-func (s *local) SlaveIdFromVitalData(vd *vitalData) string {
-	/* grab the server address from vital data and index into our map */
-	addrs := strings.Split(vd.ServerAddr, ":", 2)
-	id, ok := s.idMap[addrs[0]]
-	if !ok {
-		s.maxid++
-		s.idMap[addrs[0]] = strconv.Itoa(s.maxid), ok
-	}
-	return id
+func (s *Local) DefaultFam() string {
+	return "tcp"
 }
 
-func (loc *local) RegisterServer(l Listener) (err os.Error) {
+// func (s *Local) SlaveIdFromVitalData(vd *vitalData) string {
+// 	/* grab the server address from vital data and index into our map */
+// 	addrs := strings.Split(vd.ServerAddr, ":", 2)
+// 	id, ok := s.idMap[addrs[0]]
+// 	if !ok {
+// 		s.maxid++
+// 		s.idMap[addrs[0]] = strconv.Itoa(s.maxid), ok
+// 	}
+// 	return id
+// }
+
+func (loc *Local) RegisterServer(l net.Listener) (err os.Error) {
 	/* take the port only -- the address shows as 0.0.0.0 */
 	addr := strings.Split(l.Addr().String(), ":", 2)
 	return ioutil.WriteFile(srvAddr, []byte(addr[1]), 0644)

locale/local/local_test.go

+package localcfg
+
+import (
+	"testing"
+)
+
+func TestLocalCfg(t *testing.T) {
+}

locale/locale/locale_test.go

+package locale
+
+import (
+	"testing"
+)
+
+func TestLocale(t *testing.T) {
+}

locale/strongbox/strongbox.go

  * the U.S. Government retains certain rights in this software.
  */
 
-package main
+package strongboxcfg
 
 import (
 	"os"
 	"net"
 	"log"
 	"strconv"
-	"strings"
+	// "strings"
 	"bitbucket.org/npe/cluster/locale"
 )
 
 
-type strongbox struct {
+type StrongBox struct {
 	parentAddr string
 	ip         string
 	addr       string // consider a better name
 	idMap      map[string]string
 }
 
+var cmdPort string
+
 func init() {
-	locale.Add("strongbox", new(strongbox))
+	locale.Add("StrongBox", new(StrongBox))
 }
 
-func (s *strongbox) getIPs() []string {
+func (s *StrongBox) getIPs() []string {
 	hostName, err := os.Hostname()
 	if err != nil {
 		log.Exit(err)
 	return addrs
 }
 
-func (s *strongbox) initHostTable() {
+func (s *StrongBox) initHostTable() {
 	s.hostMap = make(map[string][]string)
 	s.idMap = make(map[string]string)
 	for i := 0; i < 197; i++ {
 	}
 }
 
-func (s *strongbox) Init(role string) {
+func (s *StrongBox) Init(role string) {
 	s.initHostTable()
 	addrs := s.getIPs()
 	switch role {
 		s.parentAddr = ""
 	case "slave", "run":
 		cmdPort = "6666"
-		/* on strongbox there's only ever one.
+		/* on StrongBox there's only ever one.
 		 * pick out the lowest-level octet.
 		 */
 		b := net.ParseIP(addrs[0]).To4()
 	}
 }
 
-func (s *strongbox) ParentAddr() string {
+func (s *StrongBox) ParentAddr() string {
 	return s.parentAddr
 }
 
-func (s *strongbox) Addr() string {
+func (s *StrongBox) Addr() string {
 	return s.addr
 }
 
-func (s *strongbox) Ip() string {
+func (s *StrongBox) Ip() string {
 	return s.ip
 }
 
-func (s *strongbox) SlaveIdFromVitalData(vd *vitalData) (id string) {
-	/* grab the server address from vital data and index into our map */
-	addrs := strings.Split(vd.ServerAddr, ":", 2)
-	id = s.idMap[addrs[0]]
+func (s *StrongBox) DefaultFam() string {
+	return "tcp"
+}
+
+// func (s *StrongBox) SlaveIdFromVitalData(vd *vitalData) (id string) {
+// 	/* grab the server address from vital data and index into our map */
+// 	addrs := strings.Split(vd.ServerAddr, ":", 2)
+// 	id = s.idMap[addrs[0]]
+// 	return
+// }
+
+func (s *StrongBox) RegisterServer(l net.Listener) (err os.Error) {
 	return
 }
-
-func (s *strongbox) RegisterServer(l Listener) (err os.Error) {
-	return
-}

locale/strongbox/strongbox_test.go

+package strongboxcfg
+
+import (
+	"testing"
+)
+
+func TestStrongboxCfg(t *testing.T) {
+}
 	return
 }
 
+func (rl *RangeList) Len() (n int) {
+	for e := rl.l.Front(); e != nil; e = e.Next() {
+		r, ok := e.Value.(*nodeRange)
+		if !ok {
+			panic("can't happen")
+		}
+		n += r.end - r.beg + 1
+	}
+	return
+}
+
 
 func (rl *RangeList) Enum() (enum []string) {
 	for e := rl.l.Front(); e != nil; e = e.Next() {

nrange/range_test.go

 			c := v
 			m.Add(c)
 		}
-		if l.Contains(&m) != r.s {
-			t.Errorf("simple noderange relationship test failed: wanted %q got %q", r.s, l.Contains(&m))
+		if l.Contains(m) != r.s {
+			t.Errorf("simple noderange relationship test failed: wanted %q got %q", r.s, l.Contains(m))
 		}
 	}
 }
 			c := v
 			m.Add(c)
 		}
-		l.Merge(&m)
+		l.Merge(m)
 		if !reflect.DeepEqual(l.String(), r.s) {
 			t.Errorf("simple noderange relationship test failed: wanted %q got %q", r.s, l.String())
 		}
 			c := v
 			l.Add(c)
 		}
-		ml := &mergeFifoLine{&l, r.rs}
+		ml := &mergeFifoLine{l, r.rs}
 		if ml.String() != r.s {
 			t.Errorf("simple mergelist relationship test failed: wanted %q got %q", r.s, ml.String())
 		}
 	for _, v := range mergerers {
 		m := NewMerger()
 		m.SetTimeout(100000000)
-		mgr := iotest.NewReadLogger("merger ", &m)
+		mgr := iotest.NewReadLogger("merger ", m)
 
 		errChan := make(chan os.Error)
 		for i := 1; i <= v.numWriters; i++ {
 			mw := m.NewWriter(i)
 	//		w := iotest.NewWriteLogger("multiwriter "+strconv.Itoa(i), &mw)
 			go func() {
-				_, err := io.Copy(&mw, s)
+				_, err := io.Copy(mw, s)
 				errChan <- err
 			}()
 		}
 		m := NewMerger()
 		m.SetTimeout(100000000)
 		
-		mgr := iotest.NewReadLogger("merger ", &m)
+		mgr := iotest.NewReadLogger("merger ", m)
 		errChan := make(chan os.Error)
 		totWriters := 1
 		for _, v := range d.m {
 				s := iotest.NewReadLogger("string reader "+strconv.Itoa(i), &r)
 				mw := m.NewWriter(totWriters)
 				totWriters++
-				w := iotest.NewWriteLogger("multiwriter "+strconv.Itoa(i), &mw)
+				w := iotest.NewWriteLogger("multiwriter "+strconv.Itoa(i), mw)
 				go func() {
 					_, err := io.Copy(w, s)
 					errChan <- err
 GOFILES=\
 	worker.go\
 	rpc.go\
-#	api.go\
+	api.go\
 
 include $(GOROOT)/src/Make.pkg
-package workers
+package worker
 
 import (
 	"rpc"
 // it behaves exactly like a local table of workers would but it is local. 
 
 type WorkerClient struct {
-	conn rpc.Client
+	conn *rpc.Client
 }
 
-func NewClient(loc Locale) (w *WorkerClient, err os.Error) {
+func NewClient(loc locale.Locale) (w *WorkerClient, err os.Error) {
 	w = new(WorkerClient)
-	w.conn, err = (loc.Lfam, "", loc.Lserver)
+	w.conn, err = rpc.Dial(loc.DefaultFam(), loc.Addr())
+	return
 }
 
-func (m *WorkerClient) GetId(w *worker.Worker) (id string, err os.Error){
+func (m *WorkerClient) GetId(w *Worker) (id string, err os.Error){
 	resp := &Resp{}
-	err := m.Call("Workers.GetId", w.Info(), resp)
+	arg := &RunArgs{Worker:w}
+	err = m.conn.Call("Workers.GetId", arg, resp)
 	if err != nil {
 		return
 	}
-	id := resp.id
+	id = resp.Msg
 	return
 }
 
 // eventually we need to make this work with local slaves as well via an interface
 // you can have an error channel to make this work.
 
-func (w *WorkerClient) Add(id string, info *Info) (gotid string, err os.Error) {
-	arg := &WorkerInfo{id: id, info: info}
+func (wc *WorkerClient) Add(id string, w *Worker) (gotid string, err os.Error) {
+	arg := &RunArgs{Worker: w}
 	resp := new(Resp)
-	err := w.conn.Call("Workers.AddRpc", info, resp)
+	err = wc.conn.Call("Workers.AddRpc", arg, resp)
 	if err != nil {
 		return
 	}
-	gotid = resp.id
+	gotid = resp.Msg
 	return
 }
 
-func (w *WorkerClient) Get(n string) (s Info, err os.Error) {
+func (w *WorkerClient) Get(n string) (s *Worker, err os.Error) {
 	err = w.conn.Call("Workers.GetRpc", &n, &s)
 	return
 }
 }
 
 func (w *WorkerClient) ForkRelay() (id string, err os.Error) {
-	resp := &SlaveResp{}
-	err = rpc.Call("Workers.ForkRelayRpc", v, resp)
-	id = resp.id
+	arg := &RunArgs{}
+	resp := &Resp{}
+	err = w.conn.Call("Workers.ForkRelayRpc", arg, resp)
+	id = resp.Msg
 	return
 }
 

worker/api_test.go

+package worker
+
+import (
+	"testing"
+)
+
+func TestApi(t *testing.T) {
+
+}
 	"bitbucket.org/npe/cluster/fileset"
 	"bitbucket.org/npe/cluster/locale"
 	"bitbucket.org/npe/cluster/nrange"
-	
+
 	"os"
 )
 
 )
 
 type RunArgs struct {
-	Ip      *ioproxy.Ioproxy
-	Fs      *fileset.FileSet
-	Workers *Info
+	Ip     *ioproxy.Ioproxy
+	Fs     *fileset.FileSet
+	Worker *Worker
 }
 
+var (
+	ErrNotRegistered = os.NewError("worker not registered")
+	ErrAlreadyExists = os.NewError("attempted to register existing worker")
+)
+
 type Resp struct {
 	Msg string
 }
 // Addr() string
 // Ip() string
 
-func InitRpc(loc locale.Locale) (err os.Error) {
+func (w *Worker) Dial() (c *rpc.Client, err os.Error) {
+	c, err = rpc.Dial("tcp", w.Addr)
+	return
+}
+
+func (ws *Workers) Init(loc locale.Locale) (err os.Error) {
 	l, err := net.Listen(loc.DefaultFam(), loc.Addr())
 	if err != nil {
 		return
 			if err != nil {
 				return
 			}
+			// we don't know our values yet, but at the same time we need the worker to be there so we can map its connection to an id later. This is an abuse of the rpc library but I don't see a better way to do it.
+			ws.AddAddr(c.RemoteAddr().String(), New(c))
+			// create a new worker for the address.
+			// so now we have an addr mapped to a conn
+			// and the info struct should give uss all the information we need.
+			// that is right. 
+			// then when you add this you look it up.
+			// by id.
+
 			// I can register the codec here, that's not a problem
 			// but the problem is that I need to figure out the hierarchy.
 			// workers are a general set of workers that are handled by lots of guys.
 			// the rpc server mediates the connections.
-			
+
 			// each worker needs to have its own connection, this is a horrible abuse of the rpc mechanism.
 			// how do you get a worker's id when somebody talks to it.
 			// it might be worth it to make individual rpc servers for all of these connections.
 	return
 }
 
-func (sv *Workers) GetInfoRpc(a *RunArgs, hostInfo *Resp) (err os.Error) {
-	hostInfo.Msg = sv.String()
+func (ws *Workers) GetInfoRpc(arg *RunArgs, hostInfo *Resp) (err os.Error) {
+	wi := arg.Worker
+	w, ok := ws.Get(wi.Id)
+	if !ok {
+		return ErrAlreadyExists
+	}
+	hostInfo.Msg = w.String()
 	return
 }
 
 // id string, 
-func (sv *Workers) AddRpc(arg *RunArgs, resp *Resp) (err os.Error) {
-	w := arg.Workers
+func (ws *Workers) AddRpc(arg *RunArgs, resp *Resp) (err os.Error) {
+	wi := arg.Worker
+	w, ok := ws.Get(wi.Id)
+	if ok {
+		return ErrNotRegistered
+	}
 	/* quite the hack. At some point, on a really complex system, 
 	 * we'll need to return a set of listen addresses for a daemon, but we've yet to
 	 * see that in actual practice. We can't use LocalAddr here, since it returns our listen
 	// 	w.ServerAddr = strings.Split(sv.conn.RemoteAddr().String(), ":", 2)[0] + w.ServerAddr[7:]
 	// }
 	// id?
-	sv.Add(w)
+	ws.Add(w)
 	resp.Msg = w.Id
 	return
 }
 
 // right now api and rpc are circular dependencies. 
 
-func (w *Workers) ForkRelayRpc(req *RunArgs, resp *Resp) (err os.Error) {
-	ip := req.Ip
-	fs := req.Fs
-	wi := req.Workers
+
+
+func (ws *Workers) ForkRelayRpc(arg *RunArgs, resp *Resp) (err os.Error) {
+	ip := arg.Ip
+	fs := arg.Fs
+	wi := arg.Worker
+	w, ok := ws.Get(wi.Id)
+	if !ok {
+		return ErrNotRegistered
+	}
 	// need to indicate that this is a pipe.
 	p, err := startRelayTarget()
 	if err != nil {
 	runc := rpc.NewClient(p.Stdout)
 	// okay how are you going to handle all of this? 
 	// we are returning hiarchies. 
-/*
-so what does this do? it starts the runrpc receiver. 
+	/*
+		okay, that is correct 
+		and that is how we should do it.
 
-
-
-*/
+	*/
 	nw, err := nrange.ParseRangeString(wi.Nodes)
 	if err != nil {
 		return
 	}
-	go runc.Go("Workers.RunRpc", &RunArgs{ip, fs, &Info{Nodes: nw.Intersect(w.rl).String()}}, nil, nil)	// err?
-	n, err := io.Copyn(p.Stdout, w.codec, fs.Len())
+	w.Nodes = nw.Intersect(ws.rl).String()
+	go runc.Go("Workers.RunRpc", &RunArgs{ip, fs, w}, nil, nil) // err?
+	n, err := io.Copyn(p.Stdout, w.conn, fs.Len())
 	if err != nil {
 		return
 	}
 		"-debug=" + strconv.Itoa(DebugLevel),
 		"-p=" + boolString(DoPrivateMount),
 		// how to handle locales in the context?
-//		"-locale=" + Locale.String(),
-//		"-prefix=" + w.Id,
+		//		"-locale=" + Locale.String(),
+		//		"-prefix=" + w.Id,
 		"R",
 	}
 	nilEnv := []string{""}
 	if err != nil {
 		log.Exit("startRelayTarget: run: ", err)
 	}
-//	go WaitAllChildren()
+	//	go WaitAllChildren()
 	_, err = p.Wait(0)
 	return
 }
 
 
-
 // uses the codec to communicate over the same channel. Never call using rpc.Call, only call using rpc.Go
-func (sv *Workers) RunRpc(args *RunArgs, resp *Resp) (err os.Error) {
-	ip := args.Ip
-	fs := args.Fs
-//	w := args.Workers
+func (ws *Workers) RunRpc(arg *RunArgs, resp *Resp) (err os.Error) {
+	ip := arg.Ip
+	fs := arg.Fs
+	wi := arg.Worker
+	w, ok := ws.Get(wi.Id)
+	if !ok {
+		return ErrNotRegistered
+	}
 
 	fs.DoMount()
-	n, err := io.Copy(fs, sv.codec)
+	n, err := io.Copy(fs, w.conn)
 	if err != nil {
 		return
 	}
 	// 	w.relayToPeers()
 	// }
 	if fs.Len() > 0 {
-		fs.relayToChildren()
+		ws.relayToChildren(ip, fs, w)
 	}
 	ip.Wait()
 	return
 }
 
 
-func (m *Workers) StartExecRpc(args *RunArgs, resp *Resp) (err os.Error) {
+func (ws *Workers) StartExecRpc(args *RunArgs, resp *Resp) (err os.Error) {
 	ip := args.Ip
 	fs := args.Fs
-	w := args.Workers
+	w := args.Worker
 
 	if !w.IsAlive() {
 		return ErrNoWorkers
 	}
-
-	ip, err = ioproxy.Start(ip.Fam(), ip.Addr(), w.NWorkers())
-	if err != nil {
-		return
-	}
 	// handle hierarchy later
 	nw, err := nrange.ParseRangeString(w.Nodes)
 	if err != nil {
 		resp = &Resp{Msg: "startExecution: bad slaveNodeList: " + err.String()}
 		return
 	}
+	nw = nw.Intersect(ws.rl)
+	ip, err = ioproxy.Start(ip.Fam(), ip.Addr(), nw.Len())
+	if err != nil {
+		return
+	}
 
-	// get credentials later
-	// circular reference.
-	nw.Intersect(m.rl).relayToChildren(fs, ip)
+	ws.relayToChildren(ip, fs, w)
 	return
 }
+
+// numWorkers += numOtherNodes
+
+func (ws *Workers) relayToChildren(ip *ioproxy.Ioproxy, fs *fileset.FileSet, w *Worker) (err os.Error) {
+	nw, err := nrange.ParseRangeString(w.Nodes)
+	for _, i := range nw.Enum() {
+		go func() {
+			w, ok := ws.Get(i)
+			if !ok {
+				return
+			}
+			c, err := w.Dial()
+			// errchan?
+			if err != nil {
+				return
+			}
+			c.Go("Worker.StartExecRpc", &RunArgs{ip, fs, w}, nil, nil)
+		}()
+	}
+	return
+}
+
 	"bitbucket.org/npe/cluster/nrange"
 )
 
-
+// make sure that net.Conn is never public, it will blow up gob.
 type Worker struct {
-	alive  bool
-	addr   string
-	conn   net.Conn
-	status chan int
-}
-
-// these are potentially concurrent now.
-// need to figure out how to manage the database correctly.
-
-type Workers struct {
-	workers map[string]*Info
-	addr2id map[string]string
-	conn    net.Conn
-	rl *nrange.RangeList
-}
-
-
-func (w Worker) IsAlive() bool {
-	return w.alive
-}
-
-func New(conn net.Conn, status chan int) *Worker {
-	return &Worker{alive: true, conn: conn, status: status}
-}
-
-// this is a wire info to send over rpc to not trip up gobs.
-type Info struct {
 	Id     string
 	Addr   string
 	Server string
 	Parent string
 	Peers  string
 	Nodes  string
+	Alive  bool
+	conn   net.Conn
 }
 
-func (i *Info) String() string {
-	if i == nil {
+func (w Worker) IsAlive() bool {
+	return w.Alive
+}
+
+func New(conn net.Conn) *Worker {
+	// derive a bunch of information from the conn.
+	return &Worker{
+		Addr:   conn.RemoteAddr().String(),
+		Parent: conn.LocalAddr().String(),
+		Alive:  true,
+		conn:   conn,
+	}
+}
+
+
+func (w *Worker) String() string {
+	if w == nil {
 		return "<nil>"
 	}
-	return i.Id + " " + i.Addr
+	return w.Id + " " + w.Addr
 }
 
-func (sv *Workers) String() string {
+
+// these are potentially concurrent now.
+// need to figure out how to manage the database correctly.
+
+type Workers struct {
+	workers map[string]*Worker
+	addrMap map[string]*Worker
+	conn    net.Conn
+	rl      *nrange.RangeList
+}
+
+
+func NewWorkers() (s Workers) {
+	s.workers = make(map[string]*Worker)
+	s.addrMap = make(map[string]*Worker)
+	return
+}
+
+func (ws *Workers) Get(n string) (wi *Worker, ok bool) {
+	wi, ok = ws.workers[n]
+	return
+}
+
+func (ws *Workers) AddAddr(addr string, w *Worker) {
+	ws.addrMap[addr] = w
+	return
+}
+
+func (ws *Workers) GetAddr(addr string) (w *Worker, ok bool) {
+	w, ok = ws.addrMap[addr]
+	return
+}
+
+// need to add an actual worker here derived from the info.
+func (ws *Workers) Add(w *Worker) {
+	ws.workers[w.Id] = w
+	return
+}
+
+func (ws *Workers) String() string {
 	b := bytes.NewBuffer([]byte{})
-	for _, i := range sv.addr2id {
-		b.WriteString(i)
+	for _, w := range ws.addrMap {
+		b.WriteString(w.String())
 		b.WriteByte('\n')
 	}
 	return b.String()
 }
-
-
-func NewWorkers() (s Workers) {
-	s.workers = make(map[string]*Info)
-	return
-}
-
-func (sv *Workers) Get(n string) (s *Info, ok bool) {
-	s, ok = sv.workers[n]
-	return
-}
-func (sv *Workers) Add(i *Info) {
-	sv.workers[i.Id] = i
-	return
-}