last version restarted virtigod to libvirt hang
it would appear at this point there is some sort of bug that hangs libvirtd. stracing it isn't obvious to me due to my lack of experience with really working with strace much. When virtigod hangs, 'virsh list' also hangs from the command line. If I kill virtigod, everything starts working again and I get lots of output from libvirtd --listen but I haven't investigated any of the errors yet or looked further. I probably will just work around this problem rather than solving it in this case since my need here is to get to working on riscv uboot development where my usefullness is better. hopefully. Or, maybe it's what I'm more interested in. maybe this bug doesn't exist on riscv. haha to you reading this, virtualization with riscv doesn't really even exist much yet. The chips are still too new! Signed-off-by: Jeff Carr <jcarr@wit.com>
This commit is contained in:
parent
ca1a78394f
commit
e149b3218d
6
Makefile
6
Makefile
|
@ -12,6 +12,12 @@ all:
|
|||
start:
|
||||
./virtigo --start jcarr
|
||||
|
||||
curl-uptime:
|
||||
curl --silent http://localhost:8080/uptime
|
||||
|
||||
curl-droplets:
|
||||
curl --silent http://localhost:8080/droplets
|
||||
|
||||
# this is for release builds using the go.mod files
|
||||
release-build:
|
||||
@echo ${REDOMOD}
|
||||
|
|
|
@ -4,11 +4,12 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func readConfigFile(filename string) {
|
||||
func readDropletFile(filename string) {
|
||||
// fmt.Fprintln(w, "GOT TEST?")
|
||||
homeDir, _ := os.UserHomeDir()
|
||||
fullname := filepath.Join(homeDir, ".config/virtigo/", filename)
|
||||
|
@ -39,8 +40,55 @@ func readConfigFile(filename string) {
|
|||
me.droplets = append(me.droplets, d)
|
||||
log.Log(EVENT, "NEW CONFIG DROPLET", d.Hostname, d.State)
|
||||
} else {
|
||||
log.Info("not sure what to do here. duplicate", name, "in config file")
|
||||
log.Info("not sure what to do here. duplicate droplet", name, "in config file")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func readHypervisorFile(filename string) {
|
||||
// fmt.Fprintln(w, "GOT TEST?")
|
||||
homeDir, _ := os.UserHomeDir()
|
||||
fullname := filepath.Join(homeDir, ".config/virtigo/", filename)
|
||||
pfile, err := os.ReadFile(fullname)
|
||||
if err != nil {
|
||||
log.Info("No config file :", err)
|
||||
// w.Write(pfile)
|
||||
return
|
||||
}
|
||||
|
||||
f := string(pfile)
|
||||
for _, line := range strings.Split(f, "\n") {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 1 {
|
||||
continue
|
||||
}
|
||||
name := fields[0]
|
||||
h := addHypervisor(name)
|
||||
if len(fields) < 2 || fields[1] != "active" {
|
||||
h.Active = true
|
||||
} else {
|
||||
h.Active = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addHypervisor(name string) *HyperT {
|
||||
var h *HyperT
|
||||
h = findHypervisor(name)
|
||||
if h != nil {
|
||||
log.Info("not sure what to do here. duplicate hypervisor", name, "in config file")
|
||||
return h
|
||||
}
|
||||
log.Log(EVENT, "Adding new hypervisor", name)
|
||||
h = new(HyperT)
|
||||
h.Hostname = name
|
||||
h.Autoscan = true
|
||||
h.Delay = 5 * time.Second
|
||||
h.lastpoll = time.Now()
|
||||
h.Scan = func() {
|
||||
h.pollHypervisor()
|
||||
}
|
||||
me.hypers = append(me.hypers, h)
|
||||
return h
|
||||
}
|
||||
|
|
2
http.go
2
http.go
|
@ -31,7 +31,7 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// is the cluster running what it should?
|
||||
if tmp == "/good" {
|
||||
if tmp == "/droplets" {
|
||||
var good = true
|
||||
for _, d := range me.droplets {
|
||||
if d.State != "ON" {
|
||||
|
|
20
main.go
20
main.go
|
@ -5,7 +5,6 @@ package main
|
|||
import (
|
||||
"embed"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"go.wit.com/dev/alexflint/arg"
|
||||
"go.wit.com/log"
|
||||
|
@ -28,22 +27,13 @@ func main() {
|
|||
log.DaemonMode(true)
|
||||
}
|
||||
|
||||
readConfigFile("droplets")
|
||||
readDropletFile("droplets")
|
||||
readHypervisorFile("hypervisor")
|
||||
|
||||
log.Info("create cluser for", argv.Hosts)
|
||||
for _, s := range argv.Hosts {
|
||||
me.names = append(me.names, s)
|
||||
|
||||
log.Info("Making a hypervisor struct for", s)
|
||||
h := new(HyperT)
|
||||
h.Hostname = s
|
||||
h.Autoscan = true
|
||||
h.Delay = 5 * time.Second
|
||||
h.lastpoll = time.Now()
|
||||
h.Scan = func() {
|
||||
h.pollHypervisor()
|
||||
}
|
||||
me.hypers = append(me.hypers, h)
|
||||
for _, name := range argv.Hosts {
|
||||
h := addHypervisor(name)
|
||||
h.Active = true
|
||||
}
|
||||
|
||||
if argv.Start != "" {
|
||||
|
|
9
poll.go
9
poll.go
|
@ -69,6 +69,15 @@ func findDroplet(name string) *DropletT {
|
|||
return nil
|
||||
}
|
||||
|
||||
func findHypervisor(name string) *HyperT {
|
||||
for _, h := range me.hypers {
|
||||
if h.Hostname == name {
|
||||
return h
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check the state of the cluster and return a string
|
||||
// that is intended to be sent to an uptime monitor like Kuma
|
||||
func clusterHealthy() (bool, string) {
|
||||
|
|
|
@ -24,6 +24,7 @@ type virtigoT struct {
|
|||
// the stuff that is needed for a hypervisor
|
||||
type HyperT struct {
|
||||
Hostname string // the hypervisor hostname
|
||||
Active bool // is allowed to start new droplets
|
||||
Scan func() // the function to run to scan the hypervisor
|
||||
Autoscan bool // to scan or not to scan
|
||||
Delay time.Duration // how often to poll the hypervisor
|
||||
|
|
Loading…
Reference in New Issue