okay then
This commit is contained in:
parent
8eda4cf2da
commit
d3f809b25d
3
Makefile
3
Makefile
|
@ -119,7 +119,8 @@ protogen:
|
||||||
cd ~/go/src/google.golang.org/protobuf/cmd/protoc-gen-go && go install
|
cd ~/go/src/google.golang.org/protobuf/cmd/protoc-gen-go && go install
|
||||||
|
|
||||||
gocui: install
|
gocui: install
|
||||||
virtigo --gui gocui --gui-verbose --gui-file ../../toolkits/gocui/gocui.so >/tmp/forge.log 2>&1
|
virtigo --gui gocui --gui-verbose --gui-file ../../toolkits/gocui/gocui.so --admin
|
||||||
|
# virtigo --gui gocui --gui-verbose --gui-file ../../toolkits/gocui/gocui.so >/tmp/forge.log 2>&1
|
||||||
|
|
||||||
log:
|
log:
|
||||||
journalctl -f -xeu virtigod.service
|
journalctl -f -xeu virtigod.service
|
||||||
|
|
1
argv.go
1
argv.go
|
@ -15,6 +15,7 @@ type args struct {
|
||||||
Droplet *DropletCmd `arg:"subcommand:droplet" help:"send events to a droplet"`
|
Droplet *DropletCmd `arg:"subcommand:droplet" help:"send events to a droplet"`
|
||||||
Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"`
|
Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"`
|
||||||
Server string `arg:"env:VIRTIGO_SERVER" help:"what virtigo cluster to connect to"`
|
Server string `arg:"env:VIRTIGO_SERVER" help:"what virtigo cluster to connect to"`
|
||||||
|
Localhost bool `arg:"--localhost" help:"use the local libvirt"`
|
||||||
Daemon bool `arg:"--daemon" help:"run as a daemon"`
|
Daemon bool `arg:"--daemon" help:"run as a daemon"`
|
||||||
Verbose bool `arg:"--verbose" help:"talk more"`
|
Verbose bool `arg:"--verbose" help:"talk more"`
|
||||||
Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
|
Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
|
||||||
|
|
|
@ -84,6 +84,70 @@ func (admin *adminT) refresh() {
|
||||||
|
|
||||||
var client *http.Client
|
var client *http.Client
|
||||||
|
|
||||||
|
func doLocalhostAdminGui() *adminT {
|
||||||
|
admin := new(adminT)
|
||||||
|
|
||||||
|
admin.uptime = me.gwin.Group.NewLabel("uptime")
|
||||||
|
|
||||||
|
grid := me.gwin.Group.RawGrid()
|
||||||
|
|
||||||
|
grid.NewButton("show hypervisors", func() {
|
||||||
|
if admin.hypervisors == nil {
|
||||||
|
log.Info("hypervisors not initialized")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Info("Hypervisors len=", admin.hypervisors.Len())
|
||||||
|
admin.hwin = newHypervisorsWindow()
|
||||||
|
admin.hwin.doStdHypervisors(admin.hypervisors)
|
||||||
|
admin.hwin.win.Custom = func() {
|
||||||
|
log.Info("hiding table window")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
grid.NewButton("droplets", func() {
|
||||||
|
if admin.droplets == nil {
|
||||||
|
log.Info("droplets not initialized")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
admin.dwin = newDropletsWindow()
|
||||||
|
admin.dwin.win.Custom = func() {
|
||||||
|
log.Info("hiding droplet table window")
|
||||||
|
}
|
||||||
|
var found *virtpb.Droplets
|
||||||
|
found = virtpb.NewDroplets()
|
||||||
|
all := admin.droplets.All()
|
||||||
|
for all.Scan() {
|
||||||
|
vm := all.Next()
|
||||||
|
if vm.Current.State != virtpb.DropletState_ON {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
found.Append(vm)
|
||||||
|
}
|
||||||
|
admin.dwin.doActiveDroplets(found)
|
||||||
|
})
|
||||||
|
|
||||||
|
grid.NewButton("events", func() {
|
||||||
|
if admin.events == nil {
|
||||||
|
log.Info("events are not initialized")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
log.Info("Events len=", admin.events.Len())
|
||||||
|
admin.ewin = newEventsWindow()
|
||||||
|
admin.ewin.doStdEvents(admin.events)
|
||||||
|
admin.ewin.win.Custom = func() {
|
||||||
|
log.Info("hiding table window")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
grid.NextRow()
|
||||||
|
|
||||||
|
grid.NewButton("refresh", func() {
|
||||||
|
admin.refresh()
|
||||||
|
})
|
||||||
|
|
||||||
|
return admin
|
||||||
|
}
|
||||||
|
|
||||||
func (admin *adminT) doAdminGui() {
|
func (admin *adminT) doAdminGui() {
|
||||||
me.myGui = gui.New()
|
me.myGui = gui.New()
|
||||||
me.myGui.InitEmbed(resources)
|
me.myGui.InitEmbed(resources)
|
||||||
|
@ -189,7 +253,12 @@ func (admin *adminT) makeClusterGroup(c *virtpb.Cluster) {
|
||||||
badExit(err)
|
badExit(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
group := me.gwin.Bottom.NewGroup(admin.url.Hostname())
|
name := c.GetName()
|
||||||
|
if name == "" {
|
||||||
|
name = admin.url.Hostname()
|
||||||
|
}
|
||||||
|
|
||||||
|
group := me.gwin.Bottom.NewGroup(name)
|
||||||
admin.uptime = group.NewLabel("uptime")
|
admin.uptime = group.NewLabel("uptime")
|
||||||
|
|
||||||
grid := group.RawGrid()
|
grid := group.RawGrid()
|
||||||
|
|
139
main.go
139
main.go
|
@ -99,145 +99,6 @@ func main() {
|
||||||
okExit("admin close")
|
okExit("admin close")
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
// set defaults
|
|
||||||
me.unstable = time.Now() // initialize the grid as unstable
|
|
||||||
me.changed = false
|
|
||||||
me.hmap = make(map[*virtpb.Hypervisor]*HyperT)
|
|
||||||
|
|
||||||
// how long a droplet can be missing until it's declared dead
|
|
||||||
me.unstableTimeout = 17 * time.Second
|
|
||||||
me.missingDropletTimeout = time.Minute // not sure the difference between these values
|
|
||||||
|
|
||||||
// how often to poll the hypervisors
|
|
||||||
me.hyperPollDelay = 5 * time.Second
|
|
||||||
|
|
||||||
// how long the cluster must be stable before new droplets can be started
|
|
||||||
me.clusterStableDuration = 37 * time.Second
|
|
||||||
|
|
||||||
me.cluster = virtpb.InitCluster()
|
|
||||||
if err := me.cluster.ConfigLoad(); err != nil {
|
|
||||||
log.Info("config load error", err)
|
|
||||||
log.Info("")
|
|
||||||
log.Info("You have never run this before")
|
|
||||||
log.Info("init example cloud here")
|
|
||||||
log.Sleep(2)
|
|
||||||
os.Exit(-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
|
||||||
for loop.Scan() {
|
|
||||||
d := loop.Next()
|
|
||||||
if d == nil {
|
|
||||||
fmt.Println("d == nil")
|
|
||||||
os.Exit(-1)
|
|
||||||
}
|
|
||||||
fmt.Println("Droplet UUID:", d.Uuid)
|
|
||||||
if d.Current == nil {
|
|
||||||
d.Current = new(virtpb.Current)
|
|
||||||
}
|
|
||||||
d.SetState(virtpb.DropletState_OFF)
|
|
||||||
log.Info("droplet", d.Hostname)
|
|
||||||
}
|
|
||||||
hmm := "pihole.wit.com"
|
|
||||||
d := me.cluster.FindDropletByName(hmm)
|
|
||||||
if d == nil {
|
|
||||||
log.Info("did not find found droplet", hmm)
|
|
||||||
} else {
|
|
||||||
log.Info("found droplet", d.Hostname, d)
|
|
||||||
}
|
|
||||||
|
|
||||||
var newEvents []*virtpb.Event
|
|
||||||
|
|
||||||
// sanity check the cluster & droplets
|
|
||||||
if _, _, err := ValidateDroplets(); err != nil {
|
|
||||||
log.Info("todo: add flag to ignore. for now, fix problems in the config file.")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
newe, err := ValidateDiskFilenames()
|
|
||||||
if err != nil {
|
|
||||||
log.Info(err)
|
|
||||||
os.Exit(-1)
|
|
||||||
}
|
|
||||||
// this is a new droplet. add it to the cluster
|
|
||||||
for _, e := range newe {
|
|
||||||
newEvents = append(newEvents, e)
|
|
||||||
}
|
|
||||||
ValidateUniqueFilenames()
|
|
||||||
|
|
||||||
for _, filename := range argv.Xml {
|
|
||||||
domcfg, err := virtigolib.ReadXml(filename)
|
|
||||||
if err != nil {
|
|
||||||
// parsing the libvirt xml file failed
|
|
||||||
log.Info("error:", filename, err)
|
|
||||||
log.Info("readXml() error", filename)
|
|
||||||
log.Info("readXml() error", err)
|
|
||||||
log.Info("libvirt XML will have to be fixed by hand")
|
|
||||||
os.Exit(-1)
|
|
||||||
}
|
|
||||||
// this is a new droplet. add it to the cluster
|
|
||||||
log.Info("Add XML Droplet here", domcfg.Name)
|
|
||||||
_, newe, err := virtigolib.AddDomainDroplet(me.cluster, domcfg)
|
|
||||||
if err != nil {
|
|
||||||
log.Info("addDomainDroplet() error", filename)
|
|
||||||
log.Info("addDomainDroplet() error", err)
|
|
||||||
log.Info("libvirt XML will have to be fixed by hand")
|
|
||||||
os.Exit(-1)
|
|
||||||
}
|
|
||||||
for _, e := range newe {
|
|
||||||
newEvents = append(newEvents, e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for i, e := range newEvents {
|
|
||||||
log.Info(i, "Event:", e.Droplet, e.FieldName, "orig:", e.OrigVal, "new:", e.NewVal)
|
|
||||||
me.changed = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if me.changed {
|
|
||||||
if err := me.cluster.ConfigSave(); err != nil {
|
|
||||||
log.Info("configsave error", err)
|
|
||||||
os.Exit(-1)
|
|
||||||
}
|
|
||||||
log.Info("XML changes saved in protobuf config")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
if len(argv.Xml) != 0 {
|
|
||||||
log.Info("No XML changes found")
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// initialize each hypervisor
|
|
||||||
for _, pbh := range me.cluster.H.Hypervisors {
|
|
||||||
// this is a new unknown droplet (not in the config file)
|
|
||||||
var h *HyperT
|
|
||||||
h = new(HyperT)
|
|
||||||
h.pb = pbh
|
|
||||||
h.lastDroplets = make(map[string]time.Time)
|
|
||||||
h.lastpoll = time.Now()
|
|
||||||
|
|
||||||
me.hmap[pbh] = h
|
|
||||||
me.hypers = append(me.hypers, h)
|
|
||||||
log.Log(EVENT, "config new hypervisors", h.pb.Hostname)
|
|
||||||
}
|
|
||||||
|
|
||||||
// start the watchdog polling for each hypervisor
|
|
||||||
for _, h := range me.hypers {
|
|
||||||
log.Info("starting polling on", h.pb.Hostname)
|
|
||||||
|
|
||||||
// start a watchdog on each hypervisor
|
|
||||||
go h.NewWatchdog()
|
|
||||||
}
|
|
||||||
|
|
||||||
var cloud *virtigolib.CloudManager
|
|
||||||
cloud = virtigolib.NewCloud()
|
|
||||||
found, _ := cloud.FindDropletByName("www.wit.com")
|
|
||||||
if found == nil {
|
|
||||||
log.Info("d == nil")
|
|
||||||
} else {
|
|
||||||
log.Info("d == ", found)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
if argv.Daemon {
|
if argv.Daemon {
|
||||||
if err := doDaemon(); err != nil {
|
if err := doDaemon(); err != nil {
|
||||||
badExit(err)
|
badExit(err)
|
||||||
|
|
Loading…
Reference in New Issue