compiles and lists hypervisors

Signed-off-by: Jeff Carr <jcarr@wit.com>
This commit is contained in:
Jeff Carr 2024-10-26 08:54:28 -05:00
parent 9020957ee7
commit b4518e8b82
8 changed files with 165 additions and 144 deletions

View File

@ -15,7 +15,7 @@ import (
)
// import a libvirt xml file
func addDomainDroplet(domcfg *libvirtxml.Domain) (*DropletT, []*pb.Event, error) {
func addDomainDroplet(domcfg *libvirtxml.Domain) (*pb.Droplet, []*pb.Event, error) {
var alle []*pb.Event
if domcfg == nil {
return nil, alle, errors.New("domcfg == nil")
@ -26,26 +26,20 @@ func addDomainDroplet(domcfg *libvirtxml.Domain) (*DropletT, []*pb.Event, error)
return nil, alle, err
}
if d == nil {
// this is a new unknown droplet (not in the config file)
d = new(DropletT)
d.pb = me.cluster.AddDroplet(domcfg.UUID, domcfg.Name, 2, 2*1024*1024)
d.pb.StartState = pb.DropletState_OFF
d = me.cluster.AddDroplet(domcfg.UUID, domcfg.Name, 2, 2*1024*1024)
d.StartState = pb.DropletState_OFF
d.CurrentState = pb.DropletState_UNKNOWN
// if the domcfg doesn't have a uuid, make a new one here
if d.pb.Uuid == "" {
if d.Uuid == "" {
u := uuid.New()
d.pb.Uuid = u.String()
d.Uuid = u.String()
}
me.droplets = append(me.droplets, d)
me.changed = true
}
alle, err = updateDroplet(d, domcfg)
if err != nil {
log.Info("updateDroplet() failed for", d.pb.Hostname)
log.Info("updateDroplet() failed for", d.Hostname)
return d, alle, errors.New("update failed for " + domcfg.Name)
}
log.Info("added new droplet", domcfg.Name, domcfg.UUID)
@ -53,36 +47,36 @@ func addDomainDroplet(domcfg *libvirtxml.Domain) (*DropletT, []*pb.Event, error)
return d, alle, nil
}
func findDomain(domcfg *libvirtxml.Domain) (*DropletT, error) {
var found *DropletT
func findDomain(domcfg *libvirtxml.Domain) (*pb.Droplet, error) {
var found *pb.Droplet
if domcfg == nil {
return nil, errors.New("domcfg == nil")
}
for _, d := range me.droplets {
if d.pb.Hostname == domcfg.Name {
if d.pb.Uuid != domcfg.UUID {
for _, d := range me.cluster.Droplets {
if d.Hostname == domcfg.Name {
if d.Uuid != domcfg.UUID {
if domcfg.UUID == "" {
// ignore blank or nonexistent UUID's
// todo: check to see if the uuid already exists ?
domcfg.UUID = d.pb.Uuid
domcfg.UUID = d.Uuid
} else {
fmt.Println("Will Change UUID from", d.pb.Uuid, "to", domcfg.UUID, "for hostname", d.pb.Hostname)
d.pb.Uuid = domcfg.UUID
fmt.Println("Will Change UUID from", d.Uuid, "to", domcfg.UUID, "for hostname", d.Hostname)
d.Uuid = domcfg.UUID
me.changed = true
}
}
if found == nil {
found = d
} else {
fmt.Println("FOUND TWICE", d.pb.Uuid, domcfg.Name, domcfg.UUID)
fmt.Println("FOUND TWICE", d.Uuid, domcfg.Name, domcfg.UUID)
return d, errors.New("Found Twice")
}
}
if d.pb.Uuid == domcfg.UUID {
if d.pb.Hostname != domcfg.Name {
fmt.Println("protobuf has: UUID and Name:", d.pb.Uuid, d.pb.Hostname)
if d.Uuid == domcfg.UUID {
if d.Hostname != domcfg.Name {
fmt.Println("protobuf has: UUID and Name:", d.Uuid, d.Hostname)
fmt.Println("libvirt has: UUID and Name:", domcfg.UUID, domcfg.Name)
fmt.Println("FOUND UUID WITH MIS-MATCHED NAME", domcfg.Name, domcfg.UUID)
return d, errors.New("UUID with mis-matched names")
@ -93,7 +87,7 @@ func findDomain(domcfg *libvirtxml.Domain) (*DropletT, error) {
return found, nil
}
func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
func updateDroplet(d *pb.Droplet, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
var alle []*pb.Event
if d == nil {
@ -116,24 +110,24 @@ func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
if (domcfg.OS != nil) && (domcfg.OS.Type != nil) {
// OS Type: &{Arch:x86_64 Machine:pc-i440fx-5.2 Type:hvm}
t := domcfg.OS.Type
if d.pb.QemuArch != t.Arch {
e := NewChangeEvent(d.pb, "Droplet.QemuArch", d.pb.QemuArch, t.Arch)
if d.QemuArch != t.Arch {
e := NewChangeEvent(d, "Droplet.QemuArch", d.QemuArch, t.Arch)
alle = append(alle, e)
d.pb.QemuArch = t.Arch
d.QemuArch = t.Arch
}
if d.pb.QemuMachine != t.Machine {
e := NewChangeEvent(d.pb, "Droplet.QemuMachine", d.pb.QemuMachine, t.Machine)
if d.QemuMachine != t.Machine {
e := NewChangeEvent(d, "Droplet.QemuMachine", d.QemuMachine, t.Machine)
alle = append(alle, e)
d.pb.QemuMachine = t.Machine
d.QemuMachine = t.Machine
}
}
// check cpus
if d.pb.Cpus != int64(domcfg.VCPU.Value) {
if d.Cpus != int64(domcfg.VCPU.Value) {
// fmt.Printf("cpus changed. VCPU = %+v\n", domcfg.VCPU)
fmt.Printf("cpus changed. from %d to %d\n", d.pb.Cpus, domcfg.VCPU.Value)
alle = append(alle, NewChangeEvent(d.pb, "Droplet.Cpus", d.pb.Cpus, domcfg.VCPU.Value))
d.pb.Cpus = int64(domcfg.VCPU.Value)
fmt.Printf("cpus changed. from %d to %d\n", d.Cpus, domcfg.VCPU.Value)
alle = append(alle, NewChangeEvent(d, "Droplet.Cpus", d.Cpus, domcfg.VCPU.Value))
d.Cpus = int64(domcfg.VCPU.Value)
}
// update spice port
@ -148,11 +142,11 @@ func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
if s.AutoPort == "yes" {
// should ignore either way
} else {
if d.pb.SpicePort != int64(s.Port) {
if d.SpicePort != int64(s.Port) {
// print out, but ignore the port number
d.pb.SpicePort = int64(s.Port)
d.SpicePort = int64(s.Port)
fmt.Printf("Spice Port set to = %d\n", s.Port)
alle = append(alle, NewChangeEvent(d.pb, "Droplet.SpicePort", d.pb.SpicePort, s.Port))
alle = append(alle, NewChangeEvent(d, "Droplet.SpicePort", d.SpicePort, s.Port))
}
}
}
@ -197,7 +191,7 @@ func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
}
// returns false if something went wrong
func updateMemory(d *DropletT, domcfg *libvirtxml.Domain) (*pb.Event, error) {
func updateMemory(d *pb.Droplet, domcfg *libvirtxml.Domain) (*pb.Event, error) {
if (d == nil) || (domcfg == nil) {
return nil, errors.New("domcfg == nil")
}
@ -221,14 +215,14 @@ func updateMemory(d *DropletT, domcfg *libvirtxml.Domain) (*pb.Event, error) {
}
e := d.SetMemory(m)
if e != nil {
fmt.Printf("Memory changed %s to %d %s\n", pb.HumanFormatBytes(d.pb.Memory), domcfg.Memory.Value, domcfg.Memory.Unit)
d.pb.Memory = m
fmt.Printf("Memory changed %s to %d %s\n", pb.HumanFormatBytes(d.Memory), domcfg.Memory.Value, domcfg.Memory.Unit)
d.Memory = m
// me.changed = true
}
return e, nil
}
func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
func updateNetwork(d *pb.Droplet, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
var allEvents []*pb.Event
if (d == nil) || (domcfg == nil) {
return nil, errors.New("domcfg == nil")
@ -297,7 +291,7 @@ func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
for mac, brname := range macs {
var found bool = false
// log.Info("XML has mac address:", mac, brname)
for _, eth := range d.pb.Networks {
for _, eth := range d.Networks {
if eth.Mac == mac {
// log.Info("OKAY. FOUND ETH:", eth.Mac, eth.Name, brname)
found = true
@ -319,7 +313,7 @@ func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
if !found {
if checkUniqueMac(mac) {
} else {
log.Info("droplet", d.pb.Hostname, "duplicate mac address", mac)
log.Info("droplet", d.Hostname, "duplicate mac address", mac)
return nil, errors.New("duplicate mac address")
}
var eth *pb.Network
@ -329,8 +323,8 @@ func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
brname = "worldbr"
}
eth.Name = brname
d.pb.Networks = append(d.pb.Networks, eth)
allEvents = append(allEvents, NewChangeEvent(d.pb, "Droplet NewNetwork", "", mac+" "+brname))
d.Networks = append(d.Networks, eth)
allEvents = append(allEvents, NewChangeEvent(d, "Droplet NewNetwork", "", mac+" "+brname))
}
}
@ -350,7 +344,7 @@ func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
*/
// returns false if something went wrong
func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
func updateDisk(d *pb.Droplet, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
var alle []*pb.Event
if (d == nil) || (domcfg == nil) {
@ -369,7 +363,7 @@ func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
continue
}
e, err := insertFilename(d.pb, filename)
e, err := insertFilename(d, filename)
if err != nil {
return alle, err
}
@ -380,7 +374,7 @@ func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
/*
var found bool = false
for _, disk := range d.pb.Disks {
for _, disk := range d.Disks {
if disk.Filename == filename {
log.Verbose("OKAY. FOUND filename", filename)
found = true
@ -390,7 +384,7 @@ func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
var disk *pb.Disk
disk = new(pb.Disk)
disk.Filename = filename
d.pb.Disks = append(d.pb.Disks, disk)
d.Disks = append(d.Disks, disk)
log.Info("New filename", filename)
me.changed = true
}

View File

@ -113,8 +113,9 @@ func NewAddEvent(a any, fname string, newval any) *pb.Event {
return e
}
/*
// update the droplet memory
func (d *DropletT) SetMemory(b int64) *pb.Event {
func (d *pb.Droplet) SetMemory(b int64) *pb.Event {
oldm := pb.HumanFormatBytes(d.pb.Memory)
newm := pb.HumanFormatBytes(b)
if d.pb.Memory == b {
@ -125,8 +126,11 @@ func (d *DropletT) SetMemory(b int64) *pb.Event {
return NewChangeEvent(d.pb, "Droplet.Memory", d.pb.Memory, b)
}
*/
/*
// update the droplet memory
func (d *DropletT) SetCpus(b int64) {
func (d *pb.Droplet) SetCpus(b int64) {
log.Info("Set the number of cpus for the droplet", b)
}
*/

View File

@ -10,9 +10,11 @@ import (
"go.wit.com/log"
)
func (d *DropletT) Start() {
/*
func (d *pb.Droplet) Start() {
log.Info("a new virtual machine is running")
}
*/
func (h *HyperT) RestartDaemon() {
url := "http://" + h.pb.Hostname + ":2520/kill"
@ -43,34 +45,34 @@ func clusterReady() (bool, string) {
return false, "clusterReady() is unstable for " + shell.FormatDuration(last)
}
func (d *DropletT) dropletReady() (bool, string) {
func dropletReady(d *pb.Droplet) (bool, string) {
if d.CurrentState == pb.DropletState_ON {
return false, "EVENT start droplet is already ON"
}
if d.starts > 2 {
if d.Starts > 2 {
// reason := "EVENT start droplet has already been started " + d.starts + " times"
return false, fmt.Sprintln("EVENT start droplet has already been started ", d.starts, " times")
return false, fmt.Sprintln("EVENT start droplet has already been started ", d.Starts, " times")
}
return true, ""
}
func (h *HyperT) Start(d *DropletT) (bool, string) {
func (h *HyperT) Start(d *pb.Droplet) (bool, string) {
ready, result := clusterReady()
if !ready {
return false, result
}
ready, result = d.dropletReady()
ready, result = dropletReady(d)
if !ready {
return false, result
}
url := "http://" + h.pb.Hostname + ":2520/start?start=" + d.pb.Hostname
url := "http://" + h.pb.Hostname + ":2520/start?start=" + d.Hostname
s := shell.Wget(url)
result = "EVENT start droplet url: " + url + "\n"
result += "EVENT start droplet response: " + s.String()
// increment the counter for a start attempt working
d.starts += 1
d.Starts += 1
// mark the cluster as unstable so droplet starts can be throttled
me.unstable = time.Now()
@ -102,7 +104,7 @@ func Start(name string) (bool, string) {
var pool []*HyperT
for _, h := range me.hypers {
result += fmt.Sprintln("could start droplet on", name, "on", h.pb.Hostname, h.pb.Active)
if d.pb.PreferredHypervisor == h.pb.Hostname {
if d.PreferredHypervisor == h.pb.Hostname {
// the config file says this droplet should run on this hypervisor
a, b := h.Start(d)
return a, result + b

68
http.go
View File

@ -4,9 +4,7 @@ import (
"fmt"
"net/http"
"strings"
"time"
"go.wit.com/lib/gui/shell"
pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/lib/virtigoxml"
"go.wit.com/log"
@ -24,23 +22,19 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
// is the cluster running what it should?
if tmp == "/droplets" {
for _, d := range me.droplets {
if d.pb.StartState != pb.DropletState_ON {
for _, d := range me.cluster.Droplets {
if d.StartState != pb.DropletState_ON {
continue
}
dur := time.Since(d.lastpoll) // Calculate the elapsed time
var hname string
if d.h == nil {
hname = ""
} else {
hname = d.h.pb.Hostname
}
if d.CurrentState != pb.DropletState_ON {
fmt.Fprintln(w, "BAD STATE ", d.pb.Hostname, hname, "(", d.pb.StartState, "vs", d.CurrentState, ")", shell.FormatDuration(dur))
} else {
dur := time.Since(d.lastpoll) // Calculate the elapsed time
fmt.Fprintln(w, "GOOD STATE ON", d.pb.Hostname, hname, shell.FormatDuration(dur))
}
/*
dur := time.Since(d.Lastpoll) // Calculate the elapsed time
if d.CurrentState != pb.DropletState_ON {
fmt.Fprintln(w, "BAD STATE ", d.Hostname, hname, "(", d.StartState, "vs", d.CurrentState, ")", shell.FormatDuration(dur))
} else {
dur := time.Since(d.lastpoll) // Calculate the elapsed time
fmt.Fprintln(w, "GOOD STATE ON", d.Hostname, hname, shell.FormatDuration(dur))
}
*/
}
return
}
@ -81,26 +75,28 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
log.Info("Handling URL:", tmp, "cluster is not right yet", s)
fmt.Fprintln(w, s)
}
for _, h := range me.hypers {
url := "http://" + h.pb.Hostname + ":2520/kill"
dur := time.Since(h.lastpoll) // Calculate the elapsed time
if dur > 90*time.Second {
h.RestartDaemon()
continue
/*
for _, h := range me.hypers {
url := "http://" + h.pb.Hostname + ":2520/kill"
dur := time.Since(h.lastpoll) // Calculate the elapsed time
if dur > 90*time.Second {
h.RestartDaemon()
continue
}
if h.killcount != 0 {
log.Info("KILL count =", h.killcount, "FOR", h.pb.Hostname, dur, "curl", url)
}
if h.killcount > 10 {
log.Info("KILL count is greater than 10 for host", h.pb.Hostname, dur, "curl", url)
}
// l := shell.FormatDuration(dur)
// log.Warn("HOST =", h.pb.Hostname, "Last poll =", l)
//if d.pb.StartState != "ON" {
// continue
//}
// dur := time.Since(d.lastpoll) // Calculate the elapsed time
}
if h.killcount != 0 {
log.Info("KILL count =", h.killcount, "FOR", h.pb.Hostname, dur, "curl", url)
}
if h.killcount > 10 {
log.Info("KILL count is greater than 10 for host", h.pb.Hostname, dur, "curl", url)
}
// l := shell.FormatDuration(dur)
// log.Warn("HOST =", h.pb.Hostname, "Last poll =", l)
//if d.pb.StartState != "ON" {
// continue
//}
// dur := time.Since(d.lastpoll) // Calculate the elapsed time
}
*/
return
}

28
main.go
View File

@ -41,7 +41,8 @@ func main() {
me.unstable = time.Now() // initialize the grid as unstable
me.delay = 5 * time.Second // how often to poll the hypervisors
me.changed = false
me.dmap = make(map[*pb.Droplet]*DropletT)
// me.dmap = make(map[*pb.Droplet]*DropletT)
me.hmap = make(map[*pb.Hypervisor]*HyperT)
// read in the config file
me.cluster = new(pb.Cluster)
@ -100,11 +101,10 @@ func main() {
log.Info(i, "Event:", e.Droplet, e.FieldName, "orig:", e.OrigVal, "new:", e.NewVal)
me.changed = true
}
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
}
// if err := me.cluster.ConfigSave(); err != nil {
// log.Info("configsave error", err)
// }
os.Exit(0)
if me.changed {
if argv.Save {
if err := me.cluster.ConfigSave(); err != nil {
@ -129,6 +129,24 @@ func main() {
newStart(argv.Start)
os.Exit(0)
}
// initialize each hypervisor
for _, pbh := range me.cluster.Hypervisors {
/*
h := findHypervisor(pbh.Hostname)
if h != nil {
continue
}
*/
// this is a new unknown droplet (not in the config file)
h := new(HyperT)
h.pb = pbh
h.lastpoll = time.Now()
me.hmap[pbh] = h
// me.hypers = append(me.hypers, h)
log.Log(EVENT, "config new hypervisors", h.pb.Hostname)
}
// start the watchdog polling for each hypervisor
for _, h := range me.hypers {

76
poll.go
View File

@ -8,6 +8,7 @@ import (
"go.wit.com/lib/gui/shell"
pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log"
"google.golang.org/protobuf/types/known/timestamppb"
)
func (h *HyperT) pollHypervisor() {
@ -34,43 +35,40 @@ func (h *HyperT) pollHypervisor() {
log.Log(POLL, h.pb.Hostname, "STATE:", state, "HOST:", name, "rest:", fields[2:])
d := findDroplet(name)
if d == nil {
// this is a new unknown droplet (not in the config file)
d = new(DropletT)
d.pb.Hostname = name
d.h = h
d.lastpoll = time.Now()
d.CurrentState = pb.DropletState_ON
me.droplets = append(me.droplets, d)
log.Log(EVENT, name, "IS NEW. ADDED ON", h.pb.Hostname)
// not sure whawt now?
}
log.Log(INFO, "ALREADY RECORDED", d.pb.Hostname)
log.Log(INFO, "ALREADY RECORDED", d.Hostname)
// update the status to ON and the last polled value
d.CurrentState = pb.DropletState_ON
d.lastpoll = time.Now()
if d.h == nil {
now := time.Now()
d.LastPoll = timestamppb.New(now)
var cur *HyperT
// cur := find(d.CurrentHypervisor)
if cur == nil {
// this means the droplet was in the config file
// but this is the first time it's shown up as running
// this should mean a droplet is running where the config file says it probably should be running
if d.pb.PreferredHypervisor == h.pb.Hostname {
log.Log(EVENT, "new droplet", d.pb.Hostname, "(matches config hypervisor", h.pb.Hostname+")")
d.h = h
if d.PreferredHypervisor == h.pb.Hostname {
log.Log(EVENT, "new droplet", d.Hostname, "(matches config hypervisor", h.pb.Hostname+")")
cur = h
continue
}
log.Log(EVENT, "new droplet", d.pb.Hostname, "on", h.pb.Hostname, "(in config file without preferred hypervisor)")
d.h = h
log.Log(EVENT, "new droplet", d.Hostname, "on", h.pb.Hostname, "(in config file without preferred hypervisor)")
cur = h
continue
}
// this means the droplet is still where it was before
if d.h.pb.Hostname != h.pb.Hostname {
log.Log(EVENT, "droplet", d.h.pb.Hostname, "moved to", h.pb.Hostname)
if cur.pb.Hostname != h.pb.Hostname {
log.Log(EVENT, "droplet", d.Hostname, "moved to", h.pb.Hostname)
continue
}
d.h = h
cur = h
}
continue
}
@ -78,16 +76,23 @@ func (h *HyperT) pollHypervisor() {
h.killcount = 0 // poll worked. reset killcount
}
func findDroplet(name string) *DropletT {
for _, d := range me.droplets {
if d.pb.Hostname == name {
return d
func findDroplet(name string) *pb.Droplet {
/*
for _, d := range me.droplets {
if d.Hostname == name {
return d
}
}
}
*/
return nil
}
/*
func findHypervisor(name string) *HyperT {
if h, ok := me.hmap[name]; ok {
return h
}
return nil
for _, h := range me.hypers {
if h.pb.Hostname == name {
return h
@ -95,6 +100,7 @@ func findHypervisor(name string) *HyperT {
}
return nil
}
*/
// check the state of the cluster and return a string
// that is intended to be sent to an uptime monitor like Kuma
@ -107,30 +113,30 @@ func clusterHealthy() (bool, string) {
var unknown int
var unknownList []string
for _, d := range me.droplets {
for _, d := range me.cluster.Droplets {
total += 1
if d.pb.StartState != pb.DropletState_ON {
if d.StartState != pb.DropletState_ON {
continue
}
dur := time.Since(d.lastpoll) // Calculate the elapsed time
dur := time.Since(d.LastPoll.AsTime()) // Calculate the elapsed time
if d.CurrentState == pb.DropletState_UNKNOWN {
// log.Info("SKIP. hostname has not been polled yet", d.pb.Hostname, d.hname)
// log.Info("SKIP. hostname has not been polled yet", d.Hostname, d.hname)
unknown += 1
unknownList = append(unknownList, d.pb.Hostname)
unknownList = append(unknownList, d.Hostname)
continue
}
var hname string
if d.h != nil {
hname = d.h.pb.Hostname
if d.CurrentHypervisor != "" {
hname = d.CurrentHypervisor
}
if d.CurrentState != pb.DropletState_ON {
log.Info("BAD STATE", d.pb.StartState, d.pb.Hostname, hname, "CurrentState =", d.CurrentState, shell.FormatDuration(dur))
log.Info("BAD STATE", d.StartState, d.Hostname, hname, "CurrentState =", d.CurrentState, shell.FormatDuration(dur))
good = false
failed += 1
} else {
dur := time.Since(d.lastpoll) // Calculate the elapsed time
dur := time.Since(d.LastPoll.AsTime()) // Calculate the elapsed time
if dur > time.Minute {
log.Info("GOOD STATE MISSING", d.pb.Hostname, hname, shell.FormatDuration(dur))
log.Info("GOOD STATE MISSING", d.Hostname, hname, shell.FormatDuration(dur))
good = false
d.CurrentState = pb.DropletState_UNKNOWN
failed += 1
@ -143,7 +149,7 @@ func clusterHealthy() (bool, string) {
continue
}
working += 1
// log.Info("GOOD STATE ON", d.pb.Hostname, d.hname, "dur =", l)
// log.Info("GOOD STATE ON", d.Hostname, d.hname, "dur =", l)
}
}
var summary string = "("

View File

@ -11,8 +11,7 @@ import (
)
func newStart(start string) {
meDrop := findDroplet(start)
d := meDrop.pb
d := findDroplet(start)
if d == nil {
log.Info("droplet is unknown:", start)
os.Exit(0)

View File

@ -4,7 +4,6 @@ import (
"time"
pb "go.wit.com/lib/protobuf/virtbuf"
"libvirt.org/go/libvirtxml"
)
var me virtigoT
@ -24,10 +23,11 @@ type virtigoT struct {
cluster *pb.Cluster // basic cluster settings
delay time.Duration // how often to poll the hypervisors
// events *pb.Events // cluster events
dmap map[*pb.Droplet]*DropletT // map to the local struct
names []string
hypers []*HyperT
droplets []*DropletT
// dmap map[*pb.Droplet]*DropletT // map to the local struct
hmap map[*pb.Hypervisor]*HyperT // map to the local struct
names []string
hypers []*HyperT
// droplets []*DropletT
killcount int
unstable time.Time // the last time the cluster was incorrect
changed bool
@ -42,6 +42,7 @@ type HyperT struct {
killcount int
}
/*
// the stuff that is needed for a hypervisor
type DropletT struct {
pb *pb.Droplet // the Droplet protobuf
@ -51,3 +52,4 @@ type DropletT struct {
lastpoll time.Time // the last time the droplet was seen running
starts int // how many times a start event has been attempted
}
*/