compiles and lists hypervisors

Signed-off-by: Jeff Carr <jcarr@wit.com>
This commit is contained in:
Jeff Carr 2024-10-26 08:54:28 -05:00
parent 9020957ee7
commit b4518e8b82
8 changed files with 165 additions and 144 deletions

View File

@ -15,7 +15,7 @@ import (
) )
// import a libvirt xml file // import a libvirt xml file
func addDomainDroplet(domcfg *libvirtxml.Domain) (*DropletT, []*pb.Event, error) { func addDomainDroplet(domcfg *libvirtxml.Domain) (*pb.Droplet, []*pb.Event, error) {
var alle []*pb.Event var alle []*pb.Event
if domcfg == nil { if domcfg == nil {
return nil, alle, errors.New("domcfg == nil") return nil, alle, errors.New("domcfg == nil")
@ -26,26 +26,20 @@ func addDomainDroplet(domcfg *libvirtxml.Domain) (*DropletT, []*pb.Event, error)
return nil, alle, err return nil, alle, err
} }
if d == nil { if d == nil {
// this is a new unknown droplet (not in the config file) d = me.cluster.AddDroplet(domcfg.UUID, domcfg.Name, 2, 2*1024*1024)
d = new(DropletT) d.StartState = pb.DropletState_OFF
d.pb = me.cluster.AddDroplet(domcfg.UUID, domcfg.Name, 2, 2*1024*1024)
d.pb.StartState = pb.DropletState_OFF
d.CurrentState = pb.DropletState_UNKNOWN d.CurrentState = pb.DropletState_UNKNOWN
// if the domcfg doesn't have a uuid, make a new one here // if the domcfg doesn't have a uuid, make a new one here
if d.pb.Uuid == "" { if d.Uuid == "" {
u := uuid.New() u := uuid.New()
d.pb.Uuid = u.String() d.Uuid = u.String()
} }
me.droplets = append(me.droplets, d)
me.changed = true
} }
alle, err = updateDroplet(d, domcfg) alle, err = updateDroplet(d, domcfg)
if err != nil { if err != nil {
log.Info("updateDroplet() failed for", d.pb.Hostname) log.Info("updateDroplet() failed for", d.Hostname)
return d, alle, errors.New("update failed for " + domcfg.Name) return d, alle, errors.New("update failed for " + domcfg.Name)
} }
log.Info("added new droplet", domcfg.Name, domcfg.UUID) log.Info("added new droplet", domcfg.Name, domcfg.UUID)
@ -53,36 +47,36 @@ func addDomainDroplet(domcfg *libvirtxml.Domain) (*DropletT, []*pb.Event, error)
return d, alle, nil return d, alle, nil
} }
func findDomain(domcfg *libvirtxml.Domain) (*DropletT, error) { func findDomain(domcfg *libvirtxml.Domain) (*pb.Droplet, error) {
var found *DropletT var found *pb.Droplet
if domcfg == nil { if domcfg == nil {
return nil, errors.New("domcfg == nil") return nil, errors.New("domcfg == nil")
} }
for _, d := range me.droplets { for _, d := range me.cluster.Droplets {
if d.pb.Hostname == domcfg.Name { if d.Hostname == domcfg.Name {
if d.pb.Uuid != domcfg.UUID { if d.Uuid != domcfg.UUID {
if domcfg.UUID == "" { if domcfg.UUID == "" {
// ignore blank or nonexistent UUID's // ignore blank or nonexistent UUID's
// todo: check to see if the uuid already exists ? // todo: check to see if the uuid already exists ?
domcfg.UUID = d.pb.Uuid domcfg.UUID = d.Uuid
} else { } else {
fmt.Println("Will Change UUID from", d.pb.Uuid, "to", domcfg.UUID, "for hostname", d.pb.Hostname) fmt.Println("Will Change UUID from", d.Uuid, "to", domcfg.UUID, "for hostname", d.Hostname)
d.pb.Uuid = domcfg.UUID d.Uuid = domcfg.UUID
me.changed = true me.changed = true
} }
} }
if found == nil { if found == nil {
found = d found = d
} else { } else {
fmt.Println("FOUND TWICE", d.pb.Uuid, domcfg.Name, domcfg.UUID) fmt.Println("FOUND TWICE", d.Uuid, domcfg.Name, domcfg.UUID)
return d, errors.New("Found Twice") return d, errors.New("Found Twice")
} }
} }
if d.pb.Uuid == domcfg.UUID { if d.Uuid == domcfg.UUID {
if d.pb.Hostname != domcfg.Name { if d.Hostname != domcfg.Name {
fmt.Println("protobuf has: UUID and Name:", d.pb.Uuid, d.pb.Hostname) fmt.Println("protobuf has: UUID and Name:", d.Uuid, d.Hostname)
fmt.Println("libvirt has: UUID and Name:", domcfg.UUID, domcfg.Name) fmt.Println("libvirt has: UUID and Name:", domcfg.UUID, domcfg.Name)
fmt.Println("FOUND UUID WITH MIS-MATCHED NAME", domcfg.Name, domcfg.UUID) fmt.Println("FOUND UUID WITH MIS-MATCHED NAME", domcfg.Name, domcfg.UUID)
return d, errors.New("UUID with mis-matched names") return d, errors.New("UUID with mis-matched names")
@ -93,7 +87,7 @@ func findDomain(domcfg *libvirtxml.Domain) (*DropletT, error) {
return found, nil return found, nil
} }
func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) { func updateDroplet(d *pb.Droplet, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
var alle []*pb.Event var alle []*pb.Event
if d == nil { if d == nil {
@ -116,24 +110,24 @@ func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
if (domcfg.OS != nil) && (domcfg.OS.Type != nil) { if (domcfg.OS != nil) && (domcfg.OS.Type != nil) {
// OS Type: &{Arch:x86_64 Machine:pc-i440fx-5.2 Type:hvm} // OS Type: &{Arch:x86_64 Machine:pc-i440fx-5.2 Type:hvm}
t := domcfg.OS.Type t := domcfg.OS.Type
if d.pb.QemuArch != t.Arch { if d.QemuArch != t.Arch {
e := NewChangeEvent(d.pb, "Droplet.QemuArch", d.pb.QemuArch, t.Arch) e := NewChangeEvent(d, "Droplet.QemuArch", d.QemuArch, t.Arch)
alle = append(alle, e) alle = append(alle, e)
d.pb.QemuArch = t.Arch d.QemuArch = t.Arch
} }
if d.pb.QemuMachine != t.Machine { if d.QemuMachine != t.Machine {
e := NewChangeEvent(d.pb, "Droplet.QemuMachine", d.pb.QemuMachine, t.Machine) e := NewChangeEvent(d, "Droplet.QemuMachine", d.QemuMachine, t.Machine)
alle = append(alle, e) alle = append(alle, e)
d.pb.QemuMachine = t.Machine d.QemuMachine = t.Machine
} }
} }
// check cpus // check cpus
if d.pb.Cpus != int64(domcfg.VCPU.Value) { if d.Cpus != int64(domcfg.VCPU.Value) {
// fmt.Printf("cpus changed. VCPU = %+v\n", domcfg.VCPU) // fmt.Printf("cpus changed. VCPU = %+v\n", domcfg.VCPU)
fmt.Printf("cpus changed. from %d to %d\n", d.pb.Cpus, domcfg.VCPU.Value) fmt.Printf("cpus changed. from %d to %d\n", d.Cpus, domcfg.VCPU.Value)
alle = append(alle, NewChangeEvent(d.pb, "Droplet.Cpus", d.pb.Cpus, domcfg.VCPU.Value)) alle = append(alle, NewChangeEvent(d, "Droplet.Cpus", d.Cpus, domcfg.VCPU.Value))
d.pb.Cpus = int64(domcfg.VCPU.Value) d.Cpus = int64(domcfg.VCPU.Value)
} }
// update spice port // update spice port
@ -148,11 +142,11 @@ func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
if s.AutoPort == "yes" { if s.AutoPort == "yes" {
// should ignore either way // should ignore either way
} else { } else {
if d.pb.SpicePort != int64(s.Port) { if d.SpicePort != int64(s.Port) {
// print out, but ignore the port number // print out, but ignore the port number
d.pb.SpicePort = int64(s.Port) d.SpicePort = int64(s.Port)
fmt.Printf("Spice Port set to = %d\n", s.Port) fmt.Printf("Spice Port set to = %d\n", s.Port)
alle = append(alle, NewChangeEvent(d.pb, "Droplet.SpicePort", d.pb.SpicePort, s.Port)) alle = append(alle, NewChangeEvent(d, "Droplet.SpicePort", d.SpicePort, s.Port))
} }
} }
} }
@ -197,7 +191,7 @@ func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
} }
// returns false if something went wrong // returns false if something went wrong
func updateMemory(d *DropletT, domcfg *libvirtxml.Domain) (*pb.Event, error) { func updateMemory(d *pb.Droplet, domcfg *libvirtxml.Domain) (*pb.Event, error) {
if (d == nil) || (domcfg == nil) { if (d == nil) || (domcfg == nil) {
return nil, errors.New("domcfg == nil") return nil, errors.New("domcfg == nil")
} }
@ -221,14 +215,14 @@ func updateMemory(d *DropletT, domcfg *libvirtxml.Domain) (*pb.Event, error) {
} }
e := d.SetMemory(m) e := d.SetMemory(m)
if e != nil { if e != nil {
fmt.Printf("Memory changed %s to %d %s\n", pb.HumanFormatBytes(d.pb.Memory), domcfg.Memory.Value, domcfg.Memory.Unit) fmt.Printf("Memory changed %s to %d %s\n", pb.HumanFormatBytes(d.Memory), domcfg.Memory.Value, domcfg.Memory.Unit)
d.pb.Memory = m d.Memory = m
// me.changed = true // me.changed = true
} }
return e, nil return e, nil
} }
func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) { func updateNetwork(d *pb.Droplet, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
var allEvents []*pb.Event var allEvents []*pb.Event
if (d == nil) || (domcfg == nil) { if (d == nil) || (domcfg == nil) {
return nil, errors.New("domcfg == nil") return nil, errors.New("domcfg == nil")
@ -297,7 +291,7 @@ func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
for mac, brname := range macs { for mac, brname := range macs {
var found bool = false var found bool = false
// log.Info("XML has mac address:", mac, brname) // log.Info("XML has mac address:", mac, brname)
for _, eth := range d.pb.Networks { for _, eth := range d.Networks {
if eth.Mac == mac { if eth.Mac == mac {
// log.Info("OKAY. FOUND ETH:", eth.Mac, eth.Name, brname) // log.Info("OKAY. FOUND ETH:", eth.Mac, eth.Name, brname)
found = true found = true
@ -319,7 +313,7 @@ func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
if !found { if !found {
if checkUniqueMac(mac) { if checkUniqueMac(mac) {
} else { } else {
log.Info("droplet", d.pb.Hostname, "duplicate mac address", mac) log.Info("droplet", d.Hostname, "duplicate mac address", mac)
return nil, errors.New("duplicate mac address") return nil, errors.New("duplicate mac address")
} }
var eth *pb.Network var eth *pb.Network
@ -329,8 +323,8 @@ func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
brname = "worldbr" brname = "worldbr"
} }
eth.Name = brname eth.Name = brname
d.pb.Networks = append(d.pb.Networks, eth) d.Networks = append(d.Networks, eth)
allEvents = append(allEvents, NewChangeEvent(d.pb, "Droplet NewNetwork", "", mac+" "+brname)) allEvents = append(allEvents, NewChangeEvent(d, "Droplet NewNetwork", "", mac+" "+brname))
} }
} }
@ -350,7 +344,7 @@ func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error)
*/ */
// returns false if something went wrong // returns false if something went wrong
func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) { func updateDisk(d *pb.Droplet, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
var alle []*pb.Event var alle []*pb.Event
if (d == nil) || (domcfg == nil) { if (d == nil) || (domcfg == nil) {
@ -369,7 +363,7 @@ func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
continue continue
} }
e, err := insertFilename(d.pb, filename) e, err := insertFilename(d, filename)
if err != nil { if err != nil {
return alle, err return alle, err
} }
@ -380,7 +374,7 @@ func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
/* /*
var found bool = false var found bool = false
for _, disk := range d.pb.Disks { for _, disk := range d.Disks {
if disk.Filename == filename { if disk.Filename == filename {
log.Verbose("OKAY. FOUND filename", filename) log.Verbose("OKAY. FOUND filename", filename)
found = true found = true
@ -390,7 +384,7 @@ func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
var disk *pb.Disk var disk *pb.Disk
disk = new(pb.Disk) disk = new(pb.Disk)
disk.Filename = filename disk.Filename = filename
d.pb.Disks = append(d.pb.Disks, disk) d.Disks = append(d.Disks, disk)
log.Info("New filename", filename) log.Info("New filename", filename)
me.changed = true me.changed = true
} }

View File

@ -113,8 +113,9 @@ func NewAddEvent(a any, fname string, newval any) *pb.Event {
return e return e
} }
/*
// update the droplet memory // update the droplet memory
func (d *DropletT) SetMemory(b int64) *pb.Event { func (d *pb.Droplet) SetMemory(b int64) *pb.Event {
oldm := pb.HumanFormatBytes(d.pb.Memory) oldm := pb.HumanFormatBytes(d.pb.Memory)
newm := pb.HumanFormatBytes(b) newm := pb.HumanFormatBytes(b)
if d.pb.Memory == b { if d.pb.Memory == b {
@ -125,8 +126,11 @@ func (d *DropletT) SetMemory(b int64) *pb.Event {
return NewChangeEvent(d.pb, "Droplet.Memory", d.pb.Memory, b) return NewChangeEvent(d.pb, "Droplet.Memory", d.pb.Memory, b)
} }
*/
/*
// update the droplet memory // update the droplet memory
func (d *DropletT) SetCpus(b int64) { func (d *pb.Droplet) SetCpus(b int64) {
log.Info("Set the number of cpus for the droplet", b) log.Info("Set the number of cpus for the droplet", b)
} }
*/

View File

@ -10,9 +10,11 @@ import (
"go.wit.com/log" "go.wit.com/log"
) )
func (d *DropletT) Start() { /*
func (d *pb.Droplet) Start() {
log.Info("a new virtual machine is running") log.Info("a new virtual machine is running")
} }
*/
func (h *HyperT) RestartDaemon() { func (h *HyperT) RestartDaemon() {
url := "http://" + h.pb.Hostname + ":2520/kill" url := "http://" + h.pb.Hostname + ":2520/kill"
@ -43,34 +45,34 @@ func clusterReady() (bool, string) {
return false, "clusterReady() is unstable for " + shell.FormatDuration(last) return false, "clusterReady() is unstable for " + shell.FormatDuration(last)
} }
func (d *DropletT) dropletReady() (bool, string) { func dropletReady(d *pb.Droplet) (bool, string) {
if d.CurrentState == pb.DropletState_ON { if d.CurrentState == pb.DropletState_ON {
return false, "EVENT start droplet is already ON" return false, "EVENT start droplet is already ON"
} }
if d.starts > 2 { if d.Starts > 2 {
// reason := "EVENT start droplet has already been started " + d.starts + " times" // reason := "EVENT start droplet has already been started " + d.starts + " times"
return false, fmt.Sprintln("EVENT start droplet has already been started ", d.starts, " times") return false, fmt.Sprintln("EVENT start droplet has already been started ", d.Starts, " times")
} }
return true, "" return true, ""
} }
func (h *HyperT) Start(d *DropletT) (bool, string) { func (h *HyperT) Start(d *pb.Droplet) (bool, string) {
ready, result := clusterReady() ready, result := clusterReady()
if !ready { if !ready {
return false, result return false, result
} }
ready, result = d.dropletReady() ready, result = dropletReady(d)
if !ready { if !ready {
return false, result return false, result
} }
url := "http://" + h.pb.Hostname + ":2520/start?start=" + d.pb.Hostname url := "http://" + h.pb.Hostname + ":2520/start?start=" + d.Hostname
s := shell.Wget(url) s := shell.Wget(url)
result = "EVENT start droplet url: " + url + "\n" result = "EVENT start droplet url: " + url + "\n"
result += "EVENT start droplet response: " + s.String() result += "EVENT start droplet response: " + s.String()
// increment the counter for a start attempt working // increment the counter for a start attempt working
d.starts += 1 d.Starts += 1
// mark the cluster as unstable so droplet starts can be throttled // mark the cluster as unstable so droplet starts can be throttled
me.unstable = time.Now() me.unstable = time.Now()
@ -102,7 +104,7 @@ func Start(name string) (bool, string) {
var pool []*HyperT var pool []*HyperT
for _, h := range me.hypers { for _, h := range me.hypers {
result += fmt.Sprintln("could start droplet on", name, "on", h.pb.Hostname, h.pb.Active) result += fmt.Sprintln("could start droplet on", name, "on", h.pb.Hostname, h.pb.Active)
if d.pb.PreferredHypervisor == h.pb.Hostname { if d.PreferredHypervisor == h.pb.Hostname {
// the config file says this droplet should run on this hypervisor // the config file says this droplet should run on this hypervisor
a, b := h.Start(d) a, b := h.Start(d)
return a, result + b return a, result + b

68
http.go
View File

@ -4,9 +4,7 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"strings" "strings"
"time"
"go.wit.com/lib/gui/shell"
pb "go.wit.com/lib/protobuf/virtbuf" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/lib/virtigoxml" "go.wit.com/lib/virtigoxml"
"go.wit.com/log" "go.wit.com/log"
@ -24,23 +22,19 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
// is the cluster running what it should? // is the cluster running what it should?
if tmp == "/droplets" { if tmp == "/droplets" {
for _, d := range me.droplets { for _, d := range me.cluster.Droplets {
if d.pb.StartState != pb.DropletState_ON { if d.StartState != pb.DropletState_ON {
continue continue
} }
dur := time.Since(d.lastpoll) // Calculate the elapsed time /*
var hname string dur := time.Since(d.Lastpoll) // Calculate the elapsed time
if d.h == nil { if d.CurrentState != pb.DropletState_ON {
hname = "" fmt.Fprintln(w, "BAD STATE ", d.Hostname, hname, "(", d.StartState, "vs", d.CurrentState, ")", shell.FormatDuration(dur))
} else { } else {
hname = d.h.pb.Hostname dur := time.Since(d.lastpoll) // Calculate the elapsed time
} fmt.Fprintln(w, "GOOD STATE ON", d.Hostname, hname, shell.FormatDuration(dur))
if d.CurrentState != pb.DropletState_ON { }
fmt.Fprintln(w, "BAD STATE ", d.pb.Hostname, hname, "(", d.pb.StartState, "vs", d.CurrentState, ")", shell.FormatDuration(dur)) */
} else {
dur := time.Since(d.lastpoll) // Calculate the elapsed time
fmt.Fprintln(w, "GOOD STATE ON", d.pb.Hostname, hname, shell.FormatDuration(dur))
}
} }
return return
} }
@ -81,26 +75,28 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
log.Info("Handling URL:", tmp, "cluster is not right yet", s) log.Info("Handling URL:", tmp, "cluster is not right yet", s)
fmt.Fprintln(w, s) fmt.Fprintln(w, s)
} }
for _, h := range me.hypers { /*
url := "http://" + h.pb.Hostname + ":2520/kill" for _, h := range me.hypers {
dur := time.Since(h.lastpoll) // Calculate the elapsed time url := "http://" + h.pb.Hostname + ":2520/kill"
if dur > 90*time.Second { dur := time.Since(h.lastpoll) // Calculate the elapsed time
h.RestartDaemon() if dur > 90*time.Second {
continue h.RestartDaemon()
continue
}
if h.killcount != 0 {
log.Info("KILL count =", h.killcount, "FOR", h.pb.Hostname, dur, "curl", url)
}
if h.killcount > 10 {
log.Info("KILL count is greater than 10 for host", h.pb.Hostname, dur, "curl", url)
}
// l := shell.FormatDuration(dur)
// log.Warn("HOST =", h.pb.Hostname, "Last poll =", l)
//if d.pb.StartState != "ON" {
// continue
//}
// dur := time.Since(d.lastpoll) // Calculate the elapsed time
} }
if h.killcount != 0 { */
log.Info("KILL count =", h.killcount, "FOR", h.pb.Hostname, dur, "curl", url)
}
if h.killcount > 10 {
log.Info("KILL count is greater than 10 for host", h.pb.Hostname, dur, "curl", url)
}
// l := shell.FormatDuration(dur)
// log.Warn("HOST =", h.pb.Hostname, "Last poll =", l)
//if d.pb.StartState != "ON" {
// continue
//}
// dur := time.Since(d.lastpoll) // Calculate the elapsed time
}
return return
} }

28
main.go
View File

@ -41,7 +41,8 @@ func main() {
me.unstable = time.Now() // initialize the grid as unstable me.unstable = time.Now() // initialize the grid as unstable
me.delay = 5 * time.Second // how often to poll the hypervisors me.delay = 5 * time.Second // how often to poll the hypervisors
me.changed = false me.changed = false
me.dmap = make(map[*pb.Droplet]*DropletT) // me.dmap = make(map[*pb.Droplet]*DropletT)
me.hmap = make(map[*pb.Hypervisor]*HyperT)
// read in the config file // read in the config file
me.cluster = new(pb.Cluster) me.cluster = new(pb.Cluster)
@ -100,11 +101,10 @@ func main() {
log.Info(i, "Event:", e.Droplet, e.FieldName, "orig:", e.OrigVal, "new:", e.NewVal) log.Info(i, "Event:", e.Droplet, e.FieldName, "orig:", e.OrigVal, "new:", e.NewVal)
me.changed = true me.changed = true
} }
if err := me.cluster.ConfigSave(); err != nil { // if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err) // log.Info("configsave error", err)
} // }
os.Exit(0)
if me.changed { if me.changed {
if argv.Save { if argv.Save {
if err := me.cluster.ConfigSave(); err != nil { if err := me.cluster.ConfigSave(); err != nil {
@ -129,6 +129,24 @@ func main() {
newStart(argv.Start) newStart(argv.Start)
os.Exit(0) os.Exit(0)
} }
// initialize each hypervisor
for _, pbh := range me.cluster.Hypervisors {
/*
h := findHypervisor(pbh.Hostname)
if h != nil {
continue
}
*/
// this is a new unknown droplet (not in the config file)
h := new(HyperT)
h.pb = pbh
h.lastpoll = time.Now()
me.hmap[pbh] = h
// me.hypers = append(me.hypers, h)
log.Log(EVENT, "config new hypervisors", h.pb.Hostname)
}
// start the watchdog polling for each hypervisor // start the watchdog polling for each hypervisor
for _, h := range me.hypers { for _, h := range me.hypers {

76
poll.go
View File

@ -8,6 +8,7 @@ import (
"go.wit.com/lib/gui/shell" "go.wit.com/lib/gui/shell"
pb "go.wit.com/lib/protobuf/virtbuf" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log" "go.wit.com/log"
"google.golang.org/protobuf/types/known/timestamppb"
) )
func (h *HyperT) pollHypervisor() { func (h *HyperT) pollHypervisor() {
@ -34,43 +35,40 @@ func (h *HyperT) pollHypervisor() {
log.Log(POLL, h.pb.Hostname, "STATE:", state, "HOST:", name, "rest:", fields[2:]) log.Log(POLL, h.pb.Hostname, "STATE:", state, "HOST:", name, "rest:", fields[2:])
d := findDroplet(name) d := findDroplet(name)
if d == nil { if d == nil {
// this is a new unknown droplet (not in the config file) // not sure whawt now?
d = new(DropletT)
d.pb.Hostname = name
d.h = h
d.lastpoll = time.Now()
d.CurrentState = pb.DropletState_ON
me.droplets = append(me.droplets, d)
log.Log(EVENT, name, "IS NEW. ADDED ON", h.pb.Hostname)
} }
log.Log(INFO, "ALREADY RECORDED", d.pb.Hostname) log.Log(INFO, "ALREADY RECORDED", d.Hostname)
// update the status to ON and the last polled value // update the status to ON and the last polled value
d.CurrentState = pb.DropletState_ON d.CurrentState = pb.DropletState_ON
d.lastpoll = time.Now()
if d.h == nil { now := time.Now()
d.LastPoll = timestamppb.New(now)
var cur *HyperT
// cur := find(d.CurrentHypervisor)
if cur == nil {
// this means the droplet was in the config file // this means the droplet was in the config file
// but this is the first time it's shown up as running // but this is the first time it's shown up as running
// this should mean a droplet is running where the config file says it probably should be running // this should mean a droplet is running where the config file says it probably should be running
if d.pb.PreferredHypervisor == h.pb.Hostname { if d.PreferredHypervisor == h.pb.Hostname {
log.Log(EVENT, "new droplet", d.pb.Hostname, "(matches config hypervisor", h.pb.Hostname+")") log.Log(EVENT, "new droplet", d.Hostname, "(matches config hypervisor", h.pb.Hostname+")")
d.h = h cur = h
continue continue
} }
log.Log(EVENT, "new droplet", d.pb.Hostname, "on", h.pb.Hostname, "(in config file without preferred hypervisor)") log.Log(EVENT, "new droplet", d.Hostname, "on", h.pb.Hostname, "(in config file without preferred hypervisor)")
d.h = h cur = h
continue continue
} }
// this means the droplet is still where it was before // this means the droplet is still where it was before
if d.h.pb.Hostname != h.pb.Hostname { if cur.pb.Hostname != h.pb.Hostname {
log.Log(EVENT, "droplet", d.h.pb.Hostname, "moved to", h.pb.Hostname) log.Log(EVENT, "droplet", d.Hostname, "moved to", h.pb.Hostname)
continue continue
} }
d.h = h cur = h
} }
continue continue
} }
@ -78,16 +76,23 @@ func (h *HyperT) pollHypervisor() {
h.killcount = 0 // poll worked. reset killcount h.killcount = 0 // poll worked. reset killcount
} }
func findDroplet(name string) *DropletT { func findDroplet(name string) *pb.Droplet {
for _, d := range me.droplets { /*
if d.pb.Hostname == name { for _, d := range me.droplets {
return d if d.Hostname == name {
return d
}
} }
} */
return nil return nil
} }
/*
func findHypervisor(name string) *HyperT { func findHypervisor(name string) *HyperT {
if h, ok := me.hmap[name]; ok {
return h
}
return nil
for _, h := range me.hypers { for _, h := range me.hypers {
if h.pb.Hostname == name { if h.pb.Hostname == name {
return h return h
@ -95,6 +100,7 @@ func findHypervisor(name string) *HyperT {
} }
return nil return nil
} }
*/
// check the state of the cluster and return a string // check the state of the cluster and return a string
// that is intended to be sent to an uptime monitor like Kuma // that is intended to be sent to an uptime monitor like Kuma
@ -107,30 +113,30 @@ func clusterHealthy() (bool, string) {
var unknown int var unknown int
var unknownList []string var unknownList []string
for _, d := range me.droplets { for _, d := range me.cluster.Droplets {
total += 1 total += 1
if d.pb.StartState != pb.DropletState_ON { if d.StartState != pb.DropletState_ON {
continue continue
} }
dur := time.Since(d.lastpoll) // Calculate the elapsed time dur := time.Since(d.LastPoll.AsTime()) // Calculate the elapsed time
if d.CurrentState == pb.DropletState_UNKNOWN { if d.CurrentState == pb.DropletState_UNKNOWN {
// log.Info("SKIP. hostname has not been polled yet", d.pb.Hostname, d.hname) // log.Info("SKIP. hostname has not been polled yet", d.Hostname, d.hname)
unknown += 1 unknown += 1
unknownList = append(unknownList, d.pb.Hostname) unknownList = append(unknownList, d.Hostname)
continue continue
} }
var hname string var hname string
if d.h != nil { if d.CurrentHypervisor != "" {
hname = d.h.pb.Hostname hname = d.CurrentHypervisor
} }
if d.CurrentState != pb.DropletState_ON { if d.CurrentState != pb.DropletState_ON {
log.Info("BAD STATE", d.pb.StartState, d.pb.Hostname, hname, "CurrentState =", d.CurrentState, shell.FormatDuration(dur)) log.Info("BAD STATE", d.StartState, d.Hostname, hname, "CurrentState =", d.CurrentState, shell.FormatDuration(dur))
good = false good = false
failed += 1 failed += 1
} else { } else {
dur := time.Since(d.lastpoll) // Calculate the elapsed time dur := time.Since(d.LastPoll.AsTime()) // Calculate the elapsed time
if dur > time.Minute { if dur > time.Minute {
log.Info("GOOD STATE MISSING", d.pb.Hostname, hname, shell.FormatDuration(dur)) log.Info("GOOD STATE MISSING", d.Hostname, hname, shell.FormatDuration(dur))
good = false good = false
d.CurrentState = pb.DropletState_UNKNOWN d.CurrentState = pb.DropletState_UNKNOWN
failed += 1 failed += 1
@ -143,7 +149,7 @@ func clusterHealthy() (bool, string) {
continue continue
} }
working += 1 working += 1
// log.Info("GOOD STATE ON", d.pb.Hostname, d.hname, "dur =", l) // log.Info("GOOD STATE ON", d.Hostname, d.hname, "dur =", l)
} }
} }
var summary string = "(" var summary string = "("

View File

@ -11,8 +11,7 @@ import (
) )
func newStart(start string) { func newStart(start string) {
meDrop := findDroplet(start) d := findDroplet(start)
d := meDrop.pb
if d == nil { if d == nil {
log.Info("droplet is unknown:", start) log.Info("droplet is unknown:", start)
os.Exit(0) os.Exit(0)

View File

@ -4,7 +4,6 @@ import (
"time" "time"
pb "go.wit.com/lib/protobuf/virtbuf" pb "go.wit.com/lib/protobuf/virtbuf"
"libvirt.org/go/libvirtxml"
) )
var me virtigoT var me virtigoT
@ -24,10 +23,11 @@ type virtigoT struct {
cluster *pb.Cluster // basic cluster settings cluster *pb.Cluster // basic cluster settings
delay time.Duration // how often to poll the hypervisors delay time.Duration // how often to poll the hypervisors
// events *pb.Events // cluster events // events *pb.Events // cluster events
dmap map[*pb.Droplet]*DropletT // map to the local struct // dmap map[*pb.Droplet]*DropletT // map to the local struct
names []string hmap map[*pb.Hypervisor]*HyperT // map to the local struct
hypers []*HyperT names []string
droplets []*DropletT hypers []*HyperT
// droplets []*DropletT
killcount int killcount int
unstable time.Time // the last time the cluster was incorrect unstable time.Time // the last time the cluster was incorrect
changed bool changed bool
@ -42,6 +42,7 @@ type HyperT struct {
killcount int killcount int
} }
/*
// the stuff that is needed for a hypervisor // the stuff that is needed for a hypervisor
type DropletT struct { type DropletT struct {
pb *pb.Droplet // the Droplet protobuf pb *pb.Droplet // the Droplet protobuf
@ -51,3 +52,4 @@ type DropletT struct {
lastpoll time.Time // the last time the droplet was seen running lastpoll time.Time // the last time the droplet was seen running
starts int // how many times a start event has been attempted starts int // how many times a start event has been attempted
} }
*/