Compare commits

..

No commits in common. "master" and "v0.2.2" have entirely different histories.

30 changed files with 496 additions and 2542 deletions

2
.gitignore vendored
View File

@ -2,7 +2,5 @@
go.mod go.mod
go.sum go.sum
files/
virtigo virtigo
virtigod virtigod

View File

@ -1,32 +1,17 @@
.PHONY: build .PHONY: build
VERSION = $(shell git describe --tags) VERSION = $(shell git describe --tags)
BUILDTIME = $(shell date +%Y.%m.%d)
# create the go.mod and go.sum if this is a brand new repo # create the go.mod and go.sum if this is a brand new repo
# REDOMOD = $(shell if [ -e go.mod ]; then echo go.mod; else echo no go mod; fi)
REDOMOD = $(shell if [ -e go.sum ]; then echo go.sum exists; else GO111MODULE= go mod init; GO111MODULE= go mod tidy; fi) REDOMOD = $(shell if [ -e go.sum ]; then echo go.sum exists; else GO111MODULE= go mod init; GO111MODULE= go mod tidy; fi)
all: install all: build
@echo build worked ./virtigo --version
virtigo list droplets ./virtigo --help
virtigo list droplets --on
virtigo droplet show --name check.lab.wit.org
virtigo droplet start --name check.lab.wit.org
build: goimports vet build:
GO111MODULE=off go build \ GO111MODULE=off go build -v -ldflags "-X main.Version=${VERSION} -X gui.GUIVERSION=${VERSION}"
-ldflags "-X main.VERSION=${VERSION} -X main.BUILDTIME=${BUILDTIME} -X gui.GUIVERSION=${VERSION}"
verbose: goimports vet
GO111MODULE=off go build -v -x \
-ldflags "-X main.VERSION=${VERSION} -X main.BUILDTIME=${BUILDTIME} -X gui.GUIVERSION=${VERSION}"
install: goimports vet
GO111MODULE=off go install -v -x \
-ldflags "-X main.VERSION=${VERSION} -X main.BUILDTIME=${BUILDTIME} -X gui.GUIVERSION=${VERSION}"
andlabs: verbose
./virtigo --gui andlabs
# makes a .deb package # makes a .deb package
debian: debian:
@ -79,10 +64,6 @@ release-build:
goimports: goimports:
goimports -w *.go goimports -w *.go
vet:
@GO111MODULE=off go vet
@echo this go binary package builds okay
# remake the go.mod and go.sum files # remake the go.mod and go.sum files
redomod: redomod:
rm -f go.* rm -f go.*
@ -92,7 +73,6 @@ redomod:
clean: clean:
rm -f go.* rm -f go.*
rm -f virtigo* rm -f virtigo*
go-mod-clean purge
# git clone the sources and all the golang dependancies into ~/go/src # git clone the sources and all the golang dependancies into ~/go/src
# if you don't have go-clone, you can get it from http://go.wit.com/ # if you don't have go-clone, you can get it from http://go.wit.com/
@ -113,14 +93,3 @@ http-missing:
http-dumplibvirtxml: http-dumplibvirtxml:
curl --silent http://localhost:8080//dumplibvirtxml curl --silent http://localhost:8080//dumplibvirtxml
protogen:
go-clone google.golang.org/protobuf
cd ~/go/src/google.golang.org/protobuf/cmd/protoc-gen-go && go install
gocui: install
virtigo --gui gocui --gui-verbose --gui-file ../../toolkits/gocui/gocui.so --admin
# virtigo --gui gocui --gui-verbose --gui-file ../../toolkits/gocui/gocui.so --admin >/tmp/forge.log 2>&1
log:
journalctl -f -xeu virtigod.service

View File

@ -1,8 +1,6 @@
# virtigo: a control panel for your virtual machine cluster virtigo: a control panel for your virtual machine cluster
There is no greater thrill for a linux sys admin than running your own cloud. This is an attempt to make something that should:
# This is an attempt to make something that should:
* Maintain the master list of virtual machines that should be running at all times * Maintain the master list of virtual machines that should be running at all times
* Work with a cluster of dom0 hypervisiors via libvirt and/or qemu * Work with a cluster of dom0 hypervisiors via libvirt and/or qemu
@ -14,7 +12,7 @@ There is no greater thrill for a linux sys admin than running your own cloud.
* Work in GUI mode (GTK/QT/etc) but ALSO the console (ncurses) * Work in GUI mode (GTK/QT/etc) but ALSO the console (ncurses)
* GPL'd with the intent for use with homelab and personal hobbyists * GPL'd with the intent for use with homelab and personal hobbyists
# Notes & Goals: Notes & Goals:
* Be super easy to use. * Be super easy to use.
* Automatically map access to serial and graphical consoles * Automatically map access to serial and graphical consoles
@ -28,7 +26,7 @@ There is no greater thrill for a linux sys admin than running your own cloud.
* Automatic live migration to decommission nodes * Automatic live migration to decommission nodes
* Implement iptable rules via the virtigo daemon * Implement iptable rules via the virtigo daemon
# Inspired by: Inspired by:
* kvm * kvm
* virt-manager * virt-manager

89
argv.go
View File

@ -1,11 +1,6 @@
package main package main
import ( import "go.wit.com/log"
"fmt"
"os"
"go.wit.com/log"
)
/* /*
this parses the command line arguements this parses the command line arguements
@ -16,58 +11,48 @@ import (
var argv args var argv args
type args struct { type args struct {
List *ListCmd `arg:"subcommand:list" help:"list things"` Xml []string `arg:"--libvirt" help:"import qemu xml files: --libvirt /etc/libvirt/qemu/*.xml"`
Droplet *DropletCmd `arg:"subcommand:droplet" help:"send events to a droplet"` Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"`
Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"` Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
Server string `arg:"env:VIRTIGO_SERVER" help:"what virtigo cluster to connect to"`
Localhost bool `arg:"--localhost" help:"use the local libvirt"`
Daemon bool `arg:"--daemon" help:"run as a daemon"`
Verbose bool `arg:"--verbose" help:"talk more"`
Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
Xml []string `arg:"--libvirt" help:"import qemu xml files: --libvirt /etc/libvirt/qemu/*.xml"`
Admin bool `arg:"--admin" help:"enter admin mode"`
Bash bool `arg:"--bash" help:"generate bash completion"`
BashAuto []string `arg:"--auto-complete" help:"todo: move this to go-arg"`
} }
type EmptyCmd struct { // Daemon bool `arg:"--daemon" help:"run in daemon mode"`
} // IgnoreCpu bool `arg:"--xml-ignore-cpu" default:"true" help:"ignore non-standard libvirt xml cpus"`
// IgnoreBr bool `arg:"--xml-ignore-net" default:"true" help:"ignore network bridge name changes"`
// IgnDisk bool `arg:"--xml-ignore-disk" default:"false" help:"ignore duplicate disk names"`
type testCmd string // Save bool `arg:"--save" default:"false" help:"save protobuf config after import"`
// Start string `arg:"--start" help:"start a droplet"`
type ListCmd struct { // Uptime bool `arg:"--uptime" default:"true" help:"allow uptime checks for things like Kuma"`
Droplets *EmptyCmd `arg:"subcommand:droplets" help:"list droplets"` // Hosts []string `arg:"--hosts" help:"hosts to connect to"`
Hypervisors *EmptyCmd `arg:"subcommand:hypervisors" help:"list hypervisors"`
On bool `arg:"--on" help:"only show things that are on"`
}
type DropletCmd struct {
Start *EmptyCmd `arg:"subcommand:start" help:"start droplet"`
Stop *EmptyCmd `arg:"subcommand:stop" help:"stop droplet"`
Show *EmptyCmd `arg:"subcommand:show" help:"show droplet"`
Console *EmptyCmd `arg:"subcommand:console" help:"open serial console"`
VNC *EmptyCmd `arg:"subcommand:vnc" help:"open VNC console"`
Spice *EmptyCmd `arg:"subcommand:spice" help:"open spiceconsole"`
Name string `arg:"--name" help:"what droplet to start"`
}
func (a args) Description() string { func (a args) Description() string {
return ` return `
virtigo: control your cluster virtigo will help control your cluster
This maintains a master list of all your vm's (aka 'droplets') This maintains a master list of all your vm's (aka 'droplets')
in your homelab cloud. You can import libvirt xml files. in your homelab cloud. You can import libvirt xml files.
This app talks to your hypervisors via the virtigod daemon. This app talks to your hypervisors via the virtigod daemon.
Import your existing libvirt xml files with:
virtigo --libvirt /etc/libvirt/qemu/*.xml
This runs a http server so you can control your virtual machines.
For example to start a vm called 'www.wit.com' your cluster 'foo.bar.com':
curl http://foo.bar.com/start?www.wit.com
` `
} }
func (args) Version() string { func (args) Version() string {
return ARGNAME + " " + VERSION + " Built on " + BUILDTIME return "virtigo " + Version
} }
var INFO *log.LogFlag var INFO *log.LogFlag
var POLL *log.LogFlag var POLL *log.LogFlag
var WARN *log.LogFlag var WARN *log.LogFlag
var SPEW *log.LogFlag
var EVENT *log.LogFlag var EVENT *log.LogFlag
func init() { func init() {
@ -77,30 +62,6 @@ func init() {
INFO = log.NewFlag("INFO", false, full, short, "general virtigo") INFO = log.NewFlag("INFO", false, full, short, "general virtigo")
POLL = log.NewFlag("POLL", false, full, short, "virtigo polling") POLL = log.NewFlag("POLL", false, full, short, "virtigo polling")
WARN = log.NewFlag("WARN", true, full, short, "bad things") WARN = log.NewFlag("WARN", true, full, short, "bad things")
SPEW = log.NewFlag("SPEW", true, full, short, "dump everything")
EVENT = log.NewFlag("EVENT", true, full, short, "hypeprvisor/droplet events") EVENT = log.NewFlag("EVENT", true, full, short, "hypeprvisor/droplet events")
} }
/*
handles shell autocomplete
*/
func (a args) DoAutoComplete(argv []string) {
switch argv[0] {
case "list":
fmt.Println("droplets hypervisors")
case "droplet":
fmt.Println("start stop")
case "devel":
fmt.Println("--force")
case "master":
fmt.Println("")
case "verify":
fmt.Println("user devel master")
default:
if argv[0] == ARGNAME {
// list the subcommands here
fmt.Println("--bash list droplet")
}
}
os.Exit(0)
}

2
build
View File

@ -2,4 +2,4 @@
# this is the systemd control file # this is the systemd control file
mkdir -p files/usr/bin/ mkdir -p files/usr/bin/
cp ../virtigoctl/virtigoctl files/usr/bin/ cp ../../lib/daemons/virtigod/virtigod files/usr/bin/

View File

@ -4,7 +4,7 @@ Package: virtigo
Maintainer: Jeff Carr <jcarr@wit.com> Maintainer: Jeff Carr <jcarr@wit.com>
Architecture: amd64 Architecture: amd64
Recommends: virtigod Recommends: virtigod
Depends: gus, remmina, remmina-plugin-spice Depends:
URL: https://go.wit.com/apps/virtigo URL: https://go.wit.com/apps/virtigo
Description: control your virtual machines in your cluster Description: control your virtual machines in your cluster
lets you start,stop, etc virtual machines lets you start,stop, etc virtual machines

163
create.go Normal file
View File

@ -0,0 +1,163 @@
package main
import (
"errors"
"fmt"
"math/rand"
"time"
pb "go.wit.com/lib/protobuf/virtbuf"
)
// attempts to create a new virtual machine
/*
func oldcreate(w http.ResponseWriter, r *http.Request) (string, error) {
msg, err := ioutil.ReadAll(r.Body) // Read the body as []byte
if err != nil {
result := fmt.Sprintf("ReadAll() error =", err)
log.Info(result)
fmt.Fprintln(w, result)
return result, err
}
var d *pb.Droplet
d = new(pb.Droplet)
if err := d.UnmarshalJSON(msg); err != nil {
log.Info("UnmarshalJSON() failed", err)
if err := d.Unmarshal(msg); err != nil {
log.Info("droplet protobuf.Unmarshal() failed", err)
return "", err
}
}
d.StartState = pb.DropletState_OFF
d.SetState(pb.DropletState_OFF)
d.Memory = 2048 * 1024 * 1024
d.Cpus = 2
log.Info("Got msg:", string(msg))
log.Info("hostname =", d.Hostname)
name := d.Hostname
// don't continue past here if the grid is unstable anyway
// because this will add the droplet to cluster.Droplets
if s, err := isClusterStable(); err != nil {
log.Info(s)
fmt.Fprintln(w, s)
return s, err
}
tmpd := me.cluster.FindDropletByName(name)
if tmpd != nil {
result := "create error: Droplet " + name + " is already defined"
log.Info(result)
fmt.Fprintln(w, result)
return result, errors.New(result)
}
if d.Uuid == "" {
u := uuid.New()
d.Uuid = u.String()
}
if len(d.Networks) == 0 {
var newNet *pb.Network
newNet = new(pb.Network)
newNet.Mac = getNewMac()
d.Networks = append(d.Networks, newNet)
// d.AddDefaultNetwork(mac)
}
me.cluster.AddDroplet(d)
result, err := startDroplet(d)
if err != nil {
log.Info(result)
log.Info("startDroplet(d) failed:", err)
fmt.Fprintln(w, result)
fmt.Fprintln(w, "startDroplet(d) failed:", err)
return result, err
}
fmt.Fprintln(w, result)
fmt.Fprintln(w, "START=OK")
return result, nil
}
*/
// for now, because sometimes this should write to stdout and
// sometimes to http socket, it returns a string
func startDroplet(d *pb.Droplet) (string, error) {
var result string
name := d.Hostname
// validate the droplet
if err := ValidateDroplet(d); err != nil {
result = "ValidateDroplet() failed droplet " + d.Hostname
return result, err
}
// is the droplet already on?
if d.Current.State == pb.DropletState_ON {
result = "EVENT start droplet " + d.Hostname + " is already ON"
return result, errors.New(result)
}
// how long has the cluster been stable?
// wait until it is stable. use this to throttle droplet starts
dur := time.Since(me.unstable)
result = fmt.Sprintln("should start droplet", name, "here. grid stable for:", pb.FormatDuration(dur))
if dur < me.unstableTimeout {
tmp := pb.FormatDuration(me.unstableTimeout)
result += "grid is still too unstable (unstable timeout = " + tmp + ")"
return result, errors.New("grid is still unstable")
}
// make the list of hypervisors that are active and can start new droplets
var pool []*HyperT
for _, h := range me.hypers {
// this droplet is set to use this and only this hypervisor
if d.ForceHypervisor == h.pb.Hostname {
ok, b := h.start(d)
if ok {
return result + b, nil
}
return result + b, errors.New("start " + name + " on hypervisor " + h.pb.Hostname)
}
// skip hypervisors marked inactive
if h.pb.Active != true {
result += fmt.Sprintln("hypervisor is inactive:", name, "for", h.pb.Hostname, h.pb.Active)
continue
}
// the config file says this droplet should run on this hypervisor
// attempt to start the droplet here. use this even if the hypervisor is inactive?
if d.PreferredHypervisor == h.pb.Hostname {
ok, b := h.start(d)
if ok {
return result + b, nil
}
return result + b, errors.New("start " + name + " on hypervisor " + h.pb.Hostname)
}
result += fmt.Sprintln("hypervisor ready:", name, "for", h.pb.Hostname, h.pb.Active)
pool = append(pool, h)
}
// left here as an example of how to actually do random numbers
// it's complete mathematical chaos. Randomness is simple when
// human interaction occurs -- which is exactly what happens most
// of the time. most random shit is bullshit. all you really need
// is exactly this to make sure the random functions work as they
// should. Probably, just use this everywhere in all cases. --jcarr
rand.Seed(time.Now().UnixNano())
a := 0
b := len(pool)
n := a + rand.Intn(b-a)
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
h := pool[n]
ok, output := h.start(d)
if ok {
return result + output, nil
}
return result + output, errors.New("start " + name + " on hypervisor " + h.pb.Hostname)
}

View File

@ -1,416 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/user"
"time"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
// refresh the windows & tables the user has open
func (admin *adminT) refresh() error {
if argv.Verbose {
log.Info("virtigo scan here")
}
if admin.url == nil {
log.Info("admin url == nil")
return fmt.Errorf("admin url == nil")
}
msg := []byte(`{"message": "Hello"}`)
// display the uptime
if data, err := postData(admin.url.String()+"/uptime", msg); err != nil {
log.Info("/uptime Error:", err)
} else {
log.Info("Response:", string(data))
admin.uptime.SetText(string(data))
}
// update the droplet list
if data, err := postData(admin.url.String()+"/DropletsPB", msg); err != nil {
log.Info("/DropletsPB Error:", err)
} else {
fmt.Println("DropletsPB Response len:", len(data))
admin.cluster.Droplets = new(virtpb.Droplets)
if err := admin.cluster.Droplets.Unmarshal(data); err != nil {
fmt.Println("droplets marshal failed", err)
return err
}
fmt.Println("Droplet len=", admin.cluster.Droplets.Len())
}
// update the hypervisor list
if data, err := postData(admin.url.String()+"/HypervisorsPB", msg); err != nil {
log.Info("Error:", err)
} else {
fmt.Println("HypervisorsPB Response len:", len(data))
admin.cluster.Hypervisors = new(virtpb.Hypervisors)
if err := admin.cluster.Hypervisors.Unmarshal(data); err != nil {
fmt.Println("hypervisors marshal failed", err)
return err
}
fmt.Println("Hypervisors len=", admin.cluster.Hypervisors.Len())
}
// update the events list
if data, err := postData(admin.url.String()+"/EventsPB", msg); err != nil {
log.Info("Error:", err)
} else {
fmt.Println("EventsPB Response len:", len(data))
admin.cluster.Events = new(virtpb.Events)
if err := admin.cluster.Events.Unmarshal(data); err != nil {
fmt.Println("events marshal failed", err)
return err
}
fmt.Println("Events len=", admin.cluster.Events.Len())
}
return nil
}
var client *http.Client
func doLocalhostAdminGui() *adminT {
admin := new(adminT)
admin.uptime = me.gwin.Group.NewLabel("uptime")
grid := me.gwin.Group.RawGrid()
grid.NewButton("show hypervisors", func() {
if admin.cluster.Hypervisors == nil {
log.Info("hypervisors not initialized")
return
}
log.Info("Hypervisors len=", admin.cluster.Hypervisors.Len())
admin.hwin = newHypervisorsWindow()
admin.hwin.doStdHypervisors(admin.cluster.Hypervisors)
admin.hwin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NewButton("droplets", func() {
if admin.cluster.Droplets == nil {
log.Info("droplets not initialized")
return
}
admin.dwin = newDropletsWindow(admin)
admin.dwin.win.Custom = func() {
log.Info("hiding droplet table window")
}
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
admin.dwin.doActiveDroplets(found)
})
grid.NewButton("events", func() {
if admin.cluster.Events == nil {
log.Info("events are not initialized")
return
}
log.Info("Events len=", admin.cluster.Events.Len())
admin.ewin = newEventsWindow()
admin.ewin.doStdEvents(admin.cluster.Events)
admin.ewin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NextRow()
grid.NewButton("refresh", func() {
admin.refresh()
})
return admin
}
func doAdminGui() {
// Initialize a persistent client with a custom Transport
client = &http.Client{
Transport: &http.Transport{
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
},
Timeout: 10 * time.Second, // Set a reasonable timeout
}
me.gwin = gadgets.NewGenericWindow("Virtigo: (run your cluster)", "")
me.gwin.Custom = func() {
log.Warn("Main window close")
os.Exit(0)
}
me.cmap = make(map[*virtpb.Cluster]*adminT)
for c := range me.clusters.IterAll() {
a := new(adminT)
me.cmap[c] = a
log.Info("found in the config file", c.URL[0])
a.makeClusterGroup(c)
}
// sit here forever refreshing the GUI
for {
// admin.refresh()
log.Info("todo: refresh() protobufs here")
time.Sleep(90 * time.Second)
}
}
func (admin *adminT) doAdminGui() {
// Initialize a persistent client with a custom Transport
client = &http.Client{
Transport: &http.Transport{
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
},
Timeout: 10 * time.Second, // Set a reasonable timeout
}
win := gadgets.NewGenericWindow("Virtigo: (run your cluster)", "localhost")
win.Custom = func() {
log.Warn("Main window close")
os.Exit(0)
}
me.gwin = win
admin.uptime = win.Group.NewLabel("uptime")
grid := win.Group.RawGrid()
grid.NewButton("show hypervisors", func() {
if admin.cluster.Hypervisors == nil {
log.Info("hypervisors not initialized")
return
}
log.Info("Hypervisors len=", admin.cluster.Hypervisors.Len())
admin.hwin = newHypervisorsWindow()
admin.hwin.doStdHypervisors(admin.cluster.Hypervisors)
admin.hwin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NewButton("droplets", func() {
if admin.cluster.Droplets == nil {
log.Info("droplets not initialized")
return
}
admin.dwin = newDropletsWindow(admin)
admin.dwin.win.Custom = func() {
log.Info("hiding droplet table window")
}
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
admin.dwin.doActiveDroplets(found)
})
grid.NewButton("events", func() {
if admin.cluster.Events == nil {
log.Info("events are not initialized")
return
}
log.Info("Events len=", admin.cluster.Events.Len())
admin.ewin = newEventsWindow()
admin.ewin.doStdEvents(admin.cluster.Events)
admin.ewin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NextRow()
grid.NewButton("refresh", func() {
admin.refresh()
})
grid.NewButton("test gui close", func() {
gui.StandardExit()
// okExit("admin close")
})
me.cmap = make(map[*virtpb.Cluster]*adminT)
for c := range me.clusters.IterAll() {
a := new(adminT)
me.cmap[c] = a
log.Info("found in the config file", c.URL[0])
a.makeClusterGroup(c)
}
// sit here forever refreshing the GUI
for {
admin.refresh()
time.Sleep(90 * time.Second)
}
}
func (admin *adminT) makeClusterGroup(c *virtpb.Cluster) {
var err error
admin.url, err = url.Parse(c.URL[0])
if err != nil {
badExit(err)
}
if admin.cluster == nil {
admin.cluster = new(virtpb.Cluster)
admin.cluster.Name = c.Name
admin.cluster.Uuid = c.Uuid
}
name := c.GetName()
if name == "" {
name = admin.url.Hostname()
}
group := me.gwin.Bottom.NewGroup(name)
admin.uptime = group.NewLabel("uptime")
grid := group.RawGrid()
grid.NewButton("show hypervisors", func() {
if admin.cluster.Hypervisors == nil {
log.Info("hypervisors not initialized")
return
}
log.Info("Hypervisors len=", admin.cluster.Hypervisors.Len())
admin.hwin = newHypervisorsWindow()
admin.hwin.doStdHypervisors(admin.cluster.Hypervisors)
admin.hwin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NewButton("droplets", func() {
if admin.cluster.Droplets == nil {
log.Info("droplets not initialized")
return
}
admin.dwin = newDropletsWindow(admin)
admin.dwin.win.Custom = func() {
log.Info("hiding droplet table window")
}
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
admin.dwin.doActiveDroplets(found)
})
grid.NewButton("events", func() {
if admin.cluster.Events == nil {
log.Info("events are not initialized")
return
}
log.Info("Events len=", admin.cluster.Events.Len())
admin.ewin = newEventsWindow()
admin.ewin.doStdEvents(admin.cluster.Events)
admin.ewin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NewButton("refresh", func() {
admin.refresh()
})
if err := admin.refresh(); err != nil {
return
}
grid.NewButton("save cluster.pb", func() {
admin.cluster.ConfigSave()
})
}
func postData(url string, data []byte) ([]byte, error) {
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
usr, _ := user.Current()
req.Header.Set("author", usr.Username)
req.Header.Set("Connection", "keep-alive") // Ensure keep-alive is used
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
return body, nil
}
func (admin *adminT) postEvent(e *virtpb.Event) error {
var result *virtpb.Event
result = new(virtpb.Event)
msg, err := e.Marshal()
if err != nil {
log.Info("postEvent() marshal() failed", err, e)
return err
}
url := admin.url.String() + "/event"
// update the droplet list
if data, err := postData(url, msg); err != nil {
log.Info("postEvent() /event Error:", err)
return err
} else {
if err := result.Unmarshal(data); err != nil {
log.Println("postEvent() result marshal failed", err, "len(dat) =", len(data))
log.Println("postEvent() data =", string(data))
return err
} else {
log.Println("postEvent() result marshal worked on len(dat) =", len(data))
log.Println("postEvent() result =", result.FormatTEXT())
}
}
if result.Error != "" {
return fmt.Errorf("%s", result.Error)
}
log.Printf("Event worked to %s uuid=%s\n", url, result.DropletUuid)
return nil
}

View File

@ -1,154 +0,0 @@
// Copyright 2024 WIT.COM Inc Licensed GPL 3.0
package main
import (
"fmt"
"time"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/lib/virtigolib"
"go.wit.com/log"
)
func doDaemon() error {
// set defaults
me.unstable = time.Now() // initialize the grid as unstable
me.changed = false
me.hmap = make(map[*virtpb.Hypervisor]*HyperT)
// how long a droplet can be missing until it's declared dead
me.unstableTimeout = 17 * time.Second
me.missingDropletTimeout = time.Minute // not sure the difference between these values
// how often to poll the hypervisors
me.hyperPollDelay = 5 * time.Second
// how long the cluster must be stable before new droplets can be started
me.clusterStableDuration = 37 * time.Second
me.cluster = virtpb.InitCluster()
if err := me.cluster.ConfigLoad(); err != nil {
log.Info("config load error", err)
log.Info("")
log.Info("You have never run this before")
log.Info("init example cloud here")
log.Sleep(2)
return err
}
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
if d == nil {
fmt.Println("d == nil")
return fmt.Errorf("d == nil")
}
fmt.Println("Droplet UUID:", d.Uuid)
if d.Current == nil {
d.Current = new(virtpb.Current)
}
d.SetState(virtpb.DropletState_OFF)
log.Info("droplet", d.Hostname)
}
hmm := "pihole.wit.com"
d := me.cluster.FindDropletByName(hmm)
if d == nil {
log.Info("did not find found droplet", hmm)
} else {
log.Info("found droplet", d.Hostname, d)
}
var newEvents []*virtpb.Event
// sanity check the cluster & droplets
if _, _, err := ValidateDroplets(); err != nil {
log.Info("todo: add flag to ignore. for now, fix problems in the config file.")
return err
}
newe, err := ValidateDiskFilenames()
if err != nil {
log.Info(err)
return err
}
// this is a new droplet. add it to the cluster
for _, e := range newe {
newEvents = append(newEvents, e)
}
ValidateUniqueFilenames()
for _, filename := range argv.Xml {
domcfg, err := virtigolib.ReadXml(filename)
if err != nil {
// parsing the libvirt xml file failed
log.Info("error:", filename, err)
log.Info("readXml() error", filename)
log.Info("readXml() error", err)
log.Info("libvirt XML will have to be fixed by hand")
return err
}
// this is a new droplet. add it to the cluster
log.Info("Add XML Droplet here", domcfg.Name)
_, newe, err := virtigolib.AddDomainDroplet(me.cluster, domcfg)
if err != nil {
log.Info("addDomainDroplet() error", filename)
log.Info("addDomainDroplet() error", err)
log.Info("libvirt XML will have to be fixed by hand")
return err
}
for _, e := range newe {
newEvents = append(newEvents, e)
}
}
for i, e := range newEvents {
log.Info(i, "Event:", e.Droplet, e.FieldName, "orig:", e.OrigVal, "new:", e.NewVal)
me.changed = true
}
if me.changed {
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
return err
}
log.Info("XML changes saved in protobuf config")
return nil
}
if len(argv.Xml) != 0 {
log.Info("No XML changes found")
return fmt.Errorf("No XML changes found")
}
// initialize each hypervisor
for _, pbh := range me.cluster.H.Hypervisors {
// this is a new unknown droplet (not in the config file)
var h *HyperT
h = new(HyperT)
h.pb = pbh
h.lastDroplets = make(map[string]time.Time)
h.lastpoll = time.Now()
me.hmap[pbh] = h
me.hypers = append(me.hypers, h)
log.Log(EVENT, "config new hypervisors", h.pb.Hostname)
}
// start the watchdog polling for each hypervisor
for _, h := range me.hypers {
log.Info("starting polling on", h.pb.Hostname)
// start a watchdog on each hypervisor
go h.NewWatchdog()
}
var cloud *virtigolib.CloudManager
cloud = virtigolib.NewCloud()
found, _ := cloud.FindDropletByName("www.wit.com")
if found == nil {
log.Info("d == nil")
} else {
log.Info("d == ", found)
}
startHTTP()
return nil
}

View File

@ -1,361 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"fmt"
"math/rand"
"net/http"
"net/url"
"path/filepath"
"strings"
"time"
"github.com/google/uuid"
"go.wit.com/lib/gui/shell"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func doDroplet() (string, error) {
err := me.clusters.ConfigLoad()
if err != nil {
return "", err
}
msg := []byte(`{"message": "Hello"}`)
// Initialize a persistent client with a custom Transport
client = &http.Client{
Transport: &http.Transport{
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
},
Timeout: 10 * time.Second, // Set a reasonable timeout
}
me.cmap = make(map[*virtpb.Cluster]*adminT)
for c := range me.clusters.IterAll() {
var err error
admin := new(adminT)
if admin.cluster == nil {
admin.cluster = new(virtpb.Cluster)
}
me.cmap[c] = admin
log.Info("found in the config file", c.URL[0])
// a.makeClusterGroup(c)
admin.url, err = url.Parse(c.URL[0])
if err != nil {
return "", err
}
// update the droplet list
if data, err := postData(admin.url.String()+"/DropletsPB", msg); err != nil {
log.Info("/DropletsPB Error:", err)
continue
} else {
admin.cluster.Droplets = new(virtpb.Droplets)
if err := admin.cluster.Droplets.Unmarshal(data); err != nil {
log.Printf("DropletsPB Response len:%d\n", len(data))
log.Println("droplets marshal failed", err)
continue
}
}
log.Printf("Cluster Name: %s\n", c.Name)
log.Printf("Number of Droplets: %d\n", admin.cluster.Droplets.Len())
if argv.Droplet.Name == "" {
return "", fmt.Errorf("--name droplet name was empty")
}
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if argv.Droplet.Name == vm.Hostname {
if argv.Droplet.Show != nil {
log.Info(vm.SprintHeader())
txt := vm.FormatTEXT()
log.Info(txt)
return "droplet status", nil
}
if argv.Droplet.Start != nil {
log.Info("should start droplet here")
log.Info(vm.SprintHeader())
e := new(virtpb.Event)
e.Etype = virtpb.EventType_POWERON
e.DropletUuid = vm.Uuid
if err := admin.postEvent(e); err != nil {
return "droplet start err", err
}
return "droplet start", nil
}
return "droplet found", fmt.Errorf("do what to the droplet?")
}
found.Append(vm)
}
log.Println("On Droplet count=", found.Len())
}
return "", fmt.Errorf("droplet %s not found", argv.Droplet.Name)
}
func doEvent(e *virtpb.Event) *virtpb.Event {
result := new(virtpb.Event)
if e.Etype == virtpb.EventType_POWERON {
log.Println("power on droplet on local cluster here", e.DropletUuid)
result.State = virtpb.Event_DONE
rs, err := Start(e.DropletUuid)
log.Println("Start() returned", rs)
log.Println("Start() returned err", err)
if err != nil {
result.Error = fmt.Sprintf("%v", err)
}
return result
}
if e.Etype == virtpb.EventType_EDIT {
log.Println("edit event", e.DropletUuid)
result.State = virtpb.Event_DONE
if e.Droplet != nil {
return updateDroplet(e.Droplet)
}
log.Println("unknown edit event")
result.State = virtpb.Event_FAIL
return result
}
if e.Etype == virtpb.EventType_ADD {
log.Println("START ADD droplet event", e.Droplet.FormatTEXT())
if e.Droplet == nil {
result.State = virtpb.Event_FAIL
return result
}
result.DropletName = e.Droplet.Hostname
result.Error = e.Droplet.FormatTEXT() // feedback to the other side for debugging
// attempt to create the new droplet
if err := createDroplet(e.Droplet, result); err != nil {
result.Error += fmt.Sprintf("createDroplet() err: %v", err)
result.State = virtpb.Event_FAIL
return result
}
log.Println("create droplet worked", e.Droplet.FormatTEXT())
result.State = virtpb.Event_DONE
return result
}
log.Println("unknown event", e)
result.Etype = e.Etype
result.State = virtpb.Event_FAIL
return result
}
func updateDroplet(newd *virtpb.Droplet) *virtpb.Event {
var changed bool = false
result := new(virtpb.Event)
if newd == nil {
result.Error = "updateDroplet() d == nil"
result.State = virtpb.Event_FAIL
return result
}
d := me.cluster.FindDropletByUuid(newd.Uuid)
if d == nil {
result.Error = "updateDroplet() could not find uuid"
result.State = virtpb.Event_FAIL
return result
}
log.Println("found droplet to update:", newd.Uuid, newd.Hostname, newd.Cpus, newd.Memory)
if d.Hostname != newd.Hostname && newd.Hostname != "" {
d.Hostname = newd.Hostname
changed = true
}
if d.Cpus != newd.Cpus && newd.Cpus > 0 {
d.Cpus = newd.Cpus
changed = true
}
// arbitrary check. don't make vm's with less than 64 MB of RAM
// big enough most things will load with some stdout
if d.Memory != newd.Memory && newd.Memory > (64*1024*1024) {
d.Memory = newd.Memory
changed = true
}
if changed {
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
result.Error = fmt.Sprintf("%v", err)
result.State = virtpb.Event_FAIL
return result
}
} else {
log.Println("nothing changed in", newd.Uuid, newd.Hostname)
}
result.State = virtpb.Event_DONE
return result
}
func createDroplet(newd *virtpb.Droplet, result *virtpb.Event) error {
if newd == nil {
return fmt.Errorf("droplet protobuf == nil")
}
if newd.Uuid == "" {
newd.Uuid = uuid.New().String()
}
d := me.cluster.FindDropletByUuid(newd.Uuid)
if d != nil {
return fmt.Errorf("droplet uuid already used")
}
log.Println("found droplet to update:", newd.Uuid, newd.Hostname, newd.Cpus, newd.Memory)
if newd.Hostname == "" {
return fmt.Errorf("Hostname can not be blank")
}
d = me.cluster.FindDropletByName(newd.Hostname)
if d != nil {
return fmt.Errorf("hostname already defined")
}
// by default, on locally imported domains, set the preferred hypervisor!
newd.LocalOnly = "yes on: " + "farm03"
newd.PreferredHypervisor = "farm03"
newd.StartState = virtpb.DropletState_OFF
newd.Current = new(virtpb.Current)
newd.Current.State = virtpb.DropletState_OFF
// create the network
if err := createNetwork(newd); err != nil {
return err
}
// create the disks
if err := createDisks(newd); err != nil {
return err
}
// append the protobuf and save it
me.cluster.AddDroplet(newd)
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
return fmt.Errorf("ConfigSave() error: %v", err)
}
return nil
}
func findDisks(d *virtpb.Droplet) error {
log.Info("need to do this")
return nil
}
func createDisks(d *virtpb.Droplet) error {
if d.Disks != nil {
return findDisks(d)
}
newdisk := new(virtpb.Disk)
newdisk.Filename = d.Hostname + ".qcow2"
newdisk.Filepath = "/home/nfs2"
d.Disks = append(d.Disks, newdisk)
basefile := "/home/nfs2/base2025.wit-5.qcow2"
newfile := filepath.Join(newdisk.Filepath, newdisk.Filename)
if !shell.Exists(newdisk.Filepath) {
return fmt.Errorf("disk image path missing: %s", newdisk.Filepath)
}
if !shell.Exists(basefile) {
return fmt.Errorf("basefile %s missing", basefile)
}
if shell.Exists(newfile) {
return fmt.Errorf("disk image already exists: %s", newfile)
}
cmd := []string{"dd", "bs=100M", "status=progress", "oflag=dsync", "if=" + basefile, "of=" + newfile}
result := shell.RunRealtime(cmd)
if result.Exit != 0 {
return fmt.Errorf("dd to %s failed %d\n%s\n%s", newfile, result.Exit, strings.Join(result.Stdout, "\n"), strings.Join(result.Stderr, "\n"))
}
return nil
}
func createNetwork(d *virtpb.Droplet) error {
if d.Networks != nil {
// network already done
return nil
}
if len(d.Networks) > 0 {
// network already done
return nil
}
n := new(virtpb.Network)
n.Mac = getNewMac()
n.Name = "worldbr"
d.Networks = append(d.Networks, n)
return nil
}
func getNewMac() string {
// mac address map to check for duplicates
var macs map[string]string
macs = make(map[string]string)
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
for _, n := range d.Networks {
// log.Println("network:", n.Mac, d.Uuid, d.Hostname)
if _, ok := macs[n.Mac]; ok {
// UUID already exists
log.Info("duplicate MAC", n.Mac, macs[n.Mac])
log.Info("duplicate MAC", n.Mac, d.Hostname)
return ""
}
macs[n.Mac] = d.Hostname
}
}
return generateMAC(macs)
}
func generateMAC(macs map[string]string) string {
prefix := []byte{0x22, 0x22, 0x22}
for {
// Generate last 3 bytes randomly
suffix := make([]byte, 3)
if _, err := rand.Read(suffix); err != nil {
log.Fatalf("Failed to generate random bytes: %v", err)
}
// Format full MAC address
mac := fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x",
prefix[0], prefix[1], prefix[2],
suffix[0], suffix[1], suffix[2])
// Check if MAC is already used
if _, exists := macs[mac]; !exists {
log.Println("Using new MAC:", mac)
return mac
}
log.Println("MAC already defined:", mac)
}
}

205
doGui.go
View File

@ -1,205 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"fmt"
"os"
"strings"
"time"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func debug() {
for {
time.Sleep(90 * time.Second)
// log.Info("TODO: use this?")
}
}
func doGui() {
mainWindow := gadgets.NewGenericWindow("Virtigo: (inventory your cluster)", "Local Cluster Settings")
mainWindow.Custom = func() {
log.Warn("Main window close")
os.Exit(0)
}
drawWindow(mainWindow)
}
func drawWindow(win *gadgets.GenericWindow) {
grid := win.Group.RawGrid()
var newHyperWin *stdHypervisorTableWin
grid.NewButton("show hypervisors", func() {
if newHyperWin != nil {
log.Info("redraw hypervisors")
newHyperWin.doNewStdHypervisors(me.cluster.H)
return
}
log.Info("Hypervisors len=", me.cluster.H.Len())
newHyperWin = newHypervisorsWindow()
newHyperWin.doNewStdHypervisors(me.cluster.H)
newHyperWin.win.Custom = func() {
log.Info("hiding table window")
}
})
var dropWin *gadgets.GenericWindow
grid.NewButton("droplets", func() {
if dropWin != nil {
dropWin.Toggle()
return
}
d := me.cluster.GetDropletsPB()
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := d.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
dropWin, _ = makeDropletsWindow(found)
dropWin.Win.Custom = func() {
log.Info("hiding droplet table window")
}
})
var ewin *stdEventTableWin
grid.NewButton("events", func() {
if ewin != nil {
log.Info("update events here")
e := me.cluster.GetEventsPB()
log.Info("Events len=", e.Len())
ewin.doStdEvents(e)
return
}
ewin = newEventsWindow()
ewin.win.Custom = func() {
log.Info("hiding table window")
}
e := me.cluster.GetEventsPB()
log.Info("Events len=", e.Len())
ewin.doStdEvents(e)
})
grid.NextRow()
grid.NewButton("ConfigSave()", func() {
log.Info("todo: make code for this")
})
var testWin *gadgets.GenericWindow
grid.NewButton("create droplet", func() {
if testWin != nil {
testWin.Toggle()
return
}
d := me.cluster.GetDropletsPB()
testWin, _ = makeDropletsWindow(d)
})
grid.NewButton("uptime", func() {
updateUptimeGui("kuma uptime should update this")
})
grid.NextRow()
grid = win.Middle.RawGrid()
me.status = grid.NewLabel("cur status")
grid.NextRow()
me.lastuptime = grid.NewLabel("last uptime")
grid.NextRow()
}
func updateUptimeGui(uptime string) {
if me.status == nil {
// gui is not initialized
return
}
me.status.SetLabel(uptime)
datestamp := time.Now().Format("2006-01-02 15:04:03")
me.lastuptime.SetLabel("last uptime at " + datestamp)
}
func makeDropletsWindow(pb *virtpb.Droplets) (*gadgets.GenericWindow, *virtpb.DropletsTable) {
win := gadgets.NewGenericWindow("Droplets registered with Virtigo", "Buttons of things")
t := pb.NewTable("testDroptable")
t.NewUuid()
grid := win.Group.RawGrid()
grid.NewButton("Create", func() {
log.Info("todo: open create window here")
})
grid.NewButton("Show All", func() {
log.Info("todo")
})
/*
grid.NewButton("Update", func() {
t.Update()
})
*/
tbox := win.Bottom.Box()
t.SetParent(tbox)
t.AddHostname()
t.AddStringFunc("location", func(d *virtpb.Droplet) string {
return d.Current.Hypervisor
})
t.AddMemory()
t.AddCpus()
t.AddSpicePort()
t.AddTimeFunc("age", func(d *virtpb.Droplet) time.Time {
age := d.Current.OnSince.AsTime()
log.Info("age", d.Hostname, virtpb.FormatDuration(time.Since(age)))
return age
})
t.AddStringFunc("State", func(d *virtpb.Droplet) string {
if d.Current.State == virtpb.DropletState_ON {
return "ON"
}
if d.Current.State == virtpb.DropletState_OFF {
return "OFF"
}
return "UNKNOWN"
})
t.AddStringFunc("mac addr", func(d *virtpb.Droplet) string {
var macs []string
for _, n := range d.Networks {
macs = append(macs, n.Mac)
}
tmp := strings.Join(macs, "\n")
return strings.TrimSpace(tmp)
})
t.ShowTable()
return win, t
}
func makeEventsWindow(pb *virtpb.Events) *gadgets.GenericWindow {
win := gadgets.NewGenericWindow("Cluster Events", "Buttons of things")
grid := win.Group.RawGrid()
grid.NewButton("List", func() {
log.Info("list...")
})
tmp := fmt.Sprintf("num of events = %d", pb.Len())
grid.NewLabel(tmp)
tbox := win.Bottom.Box() // a vertical box (like a stack of books)
t := pb.NewTable("test 2")
t.NewUuid()
t.SetParent(tbox)
t.AddDropletName()
t.AddHypervisor()
t.ShowTable()
return win
}

View File

@ -1,72 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"net/http"
"net/url"
"time"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func doList() {
msg := []byte(`{"message": "Hello"}`)
// Initialize a persistent client with a custom Transport
client = &http.Client{
Transport: &http.Transport{
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
},
Timeout: 10 * time.Second, // Set a reasonable timeout
}
me.cmap = make(map[*virtpb.Cluster]*adminT)
for c := range me.clusters.IterAll() {
var err error
admin := new(adminT)
admin.cluster = new(virtpb.Cluster)
me.cmap[c] = admin
log.Info("found in the config file", c.URL[0])
// a.makeClusterGroup(c)
admin.url, err = url.Parse(c.URL[0])
if err != nil {
badExit(err)
}
// update the droplet list
if data, err := postData(admin.url.String()+"/DropletsPB", msg); err != nil {
log.Info("/DropletsPB Error:", err)
continue
} else {
admin.cluster.Droplets = new(virtpb.Droplets)
if err := admin.cluster.Droplets.Unmarshal(data); err != nil {
log.Printf("DropletsPB Response len:%d\n", len(data))
log.Println("droplets marshal failed", err)
continue
}
}
log.Printf("Cluster Name: %s\n", c.Name)
log.Printf("Number of Droplets: %d\n", admin.cluster.Droplets.Len())
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current == nil {
continue
}
if argv.List.On && (vm.Current.State == virtpb.DropletState_OFF) {
continue
}
found.Append(vm)
log.Info(vm.SprintHeader())
}
log.Println("On Droplet count=", found.Len())
}
}

24
dump.go
View File

@ -5,8 +5,7 @@ import (
"net/http" "net/http"
"time" "time"
"go.wit.com/lib/protobuf/virtpb" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log"
) )
/* /*
@ -32,7 +31,8 @@ func dumpCluster(w http.ResponseWriter) {
func dumpDroplets(w http.ResponseWriter, full bool) { func dumpDroplets(w http.ResponseWriter, full bool) {
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
fmt.Println(w, "Droplet UUID:", d.Uuid)
// this line in golang could replace 80 lines of COBOL // this line in golang could replace 80 lines of COBOL
header := d.SprintDumpHeader() + " " header := d.SprintDumpHeader() + " "
@ -43,12 +43,12 @@ func dumpDroplets(w http.ResponseWriter, full bool) {
} }
header += d.Hostname header += d.Hostname
if d.Current.State == virtpb.DropletState_ON { if d.Current.State == pb.DropletState_ON {
// everything is as it should be with this vm // everything is as it should be with this vm
fmt.Fprintln(w, header) fmt.Fprintln(w, header)
continue continue
} }
if d.StartState == virtpb.DropletState_ON { if d.StartState == pb.DropletState_ON {
// this is supposed to be ON and needs to be turned on // this is supposed to be ON and needs to be turned on
fmt.Fprintln(w, header, "(should be on). todo: start() here") fmt.Fprintln(w, header, "(should be on). todo: start() here")
continue continue
@ -63,7 +63,7 @@ func dumpDroplets(w http.ResponseWriter, full bool) {
if full { if full {
var filenames string var filenames string
for _, disk := range d.Disks { for _, disk := range d.Disks {
filenames += disk.Filename + " " filenames += disk.Filename
} }
// this needs to be turned on // this needs to be turned on
@ -77,17 +77,21 @@ func dumpHypervisors(w http.ResponseWriter) {
var totalDroplets int var totalDroplets int
var totalUnknownDroplets int var totalUnknownDroplets int
for _, h := range me.hypers { for _, h := range me.hypers {
// lastpoll time.Time // the last time the hypervisor polled
dur := time.Since(h.lastpoll) dur := time.Since(h.lastpoll)
tmp := virtpb.FormatDuration(dur) tmp := pb.FormatDuration(dur)
fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp) fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp)
for name, _ := range h.lastDroplets { for name, t := range h.lastDroplets {
dur := time.Since(t)
tmp := pb.FormatDuration(dur)
totalDroplets += 1 totalDroplets += 1
d := me.cluster.FindDropletByName(name) d := me.cluster.FindDropletByName(name)
header := d.SprintDumpHeader() + " "
if d == nil { if d == nil {
totalUnknownDroplets += 1 totalUnknownDroplets += 1
fmt.Fprintln(w, "\t", h.pb.Hostname, "name =", name, "lastpoll:", tmp)
} else {
fmt.Fprintln(w, "\t", h.pb.Hostname, "name =", name, "lastpoll:", tmp, d.Current.State)
} }
log.Info("\t", header, d.Hostname)
} }
} }
if totalUnknownDroplets == 0 { if totalUnknownDroplets == 0 {

View File

@ -5,7 +5,7 @@ import (
"time" "time"
"go.wit.com/lib/gui/shell" "go.wit.com/lib/gui/shell"
"go.wit.com/lib/protobuf/virtpb" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log" "go.wit.com/log"
) )
@ -27,7 +27,7 @@ func (h *HyperT) RestartVirtigod() {
// this must be bool in string because accumulated output is sometimes // this must be bool in string because accumulated output is sometimes
// written to STDOUT, sometimes to http // written to STDOUT, sometimes to http
func (h *HyperT) start(d *virtpb.Droplet) (bool, string) { func (h *HyperT) start(d *pb.Droplet) (bool, string) {
ready, result := me.cluster.DropletReady(d) ready, result := me.cluster.DropletReady(d)
if !ready { if !ready {
return false, result return false, result

37
exit.go
View File

@ -1,37 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
import (
"os"
"go.wit.com/gui"
"go.wit.com/log"
)
func okExit(note string) {
if note != "" {
log.Info(ARGNAME, "exit:", note, "ok")
}
gui.StandardExit()
os.Exit(0)
}
func badExit(err error) {
log.Info(ARGNAME, "failed: ", err)
gui.StandardExit()
os.Exit(-1)
}
func exit(note string, err error) {
if note != "" {
log.Info(ARGNAME, "exit:", note, "ok")
}
gui.StandardExit()
if err == nil {
os.Exit(0)
}
log.Info(ARGNAME, "failed: ", err)
os.Exit(-1)
}

129
http.go
View File

@ -2,12 +2,10 @@ package main
import ( import (
"fmt" "fmt"
"io/ioutil"
"net/http" "net/http"
"os" "os"
"strings" "strings"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/lib/virtigolib" "go.wit.com/lib/virtigolib"
"go.wit.com/log" "go.wit.com/log"
) )
@ -21,95 +19,67 @@ func cleanURL(url string) string {
func okHandler(w http.ResponseWriter, r *http.Request) { func okHandler(w http.ResponseWriter, r *http.Request) {
var route string var route string
route = cleanURL(r.URL.Path) route = cleanURL(r.URL.Path)
// log.HttpMode(w)
// defer log.HttpMode(nil)
msg, err := ioutil.ReadAll(r.Body) // Read the body as []byte
if err != nil {
log.Info("ReadAll() error =", err)
return
}
if route == "/uptime" { if route == "/uptime" {
ok, s := uptimeCheck() ok, s := uptimeCheck()
fmt.Fprintln(w, s)
// log.Info(s)
updateUptimeGui(s)
if ok { if ok {
// log.Info("Handling URL:", route, "cluster is ok") log.Info("Handling URL:", route, "cluster is ok", s)
fmt.Fprintln(w, s)
} else { } else {
log.Info("Handling URL:", route, "cluster is not right yet") log.Info("Handling URL:", route, "cluster is not right yet", s)
fmt.Fprintln(w, s)
} }
return return
} }
if route == "/create" { if route == "/start" {
var d *virtpb.Droplet hostname := r.URL.Query().Get("hostname")
d = new(virtpb.Droplet) if hostname == "" {
if err := d.Unmarshal(msg); err != nil { log.Warn("start failed. hostname is blank", cleanURL(r.URL.Path))
log.Info("proto.Unmarshal() failed on wire message len", len(msg)) fmt.Fprintln(w, "start failed. hostname is blank", cleanURL(r.URL.Path))
log.Info("error =", err)
return return
} }
log.Info("proto.Unmarshal() worked on msg len", len(msg), "hostname =", d.Hostname) log.Warn("hostname is", hostname)
found := me.cluster.FindDropletByName(d.Hostname) fmt.Fprintln(w, "hostname is", hostname)
if found != nil {
log.Info("already have hostname ", d.Hostname)
return
}
log.Info("new hostname ", d.Hostname)
if !me.cluster.AddDroplet(d) {
log.Info("new hostname added ok ", d.Hostname)
} else {
log.Info("hostname add failed for ", d.Hostname)
}
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
os.Exit(-1)
}
log.Info("config file saved")
return
}
if route == "/event" { // log.Warn("Handling URL:", tmp, "start droplet", start)
var e *virtpb.Event result, err := Start(hostname)
e = new(virtpb.Event) if err == nil {
if err := e.Unmarshal(msg); err != nil { fmt.Fprintln(w, result)
log.Info("proto.Unmarshal() failed on wire message len", len(msg)) fmt.Fprintln(w, hostname, "started output ok")
log.Info("error =", err) fmt.Fprintln(w, hostname, "need to parse the output here")
return fmt.Fprintln(w, hostname, "todo: switch to protobuf here")
} else {
fmt.Fprintln(w, result)
fmt.Fprintln(w, err)
fmt.Fprintln(w, hostname, "start failed")
} }
log.Info("/event proto.Unmarshal() worked on msg len", len(msg), "hostname =", e.DropletUuid)
result := doEvent(e)
data, err := result.Marshal()
if err != nil {
log.Info("/event marshal failed", err, "len(data) =", len(data))
fmt.Fprintln(w, "/event failed", err)
return
}
w.Write(data)
// fmt.Fprintln("droplet marshal failed", err)
return return
} }
if route == "/import" { if route == "/import" {
log.Info("virtigo import starts here") log.Info("virtigo import starts here")
fmt.Fprintln(w, "virtigo import starts here")
result, err := importDomain(w, r) result, err := importDomain(w, r)
if err != nil { if err != nil {
log.Info("virtigo import failed") log.Info("virtigo import failed")
log.Info(result) log.Info(result)
fmt.Fprintln(w, "virtigo import failed")
fmt.Fprintln(w, result)
return return
} }
log.Info("virtigo import worked") log.Info("virtigo import worked")
fmt.Fprintln(w, "virtigo import worked")
return return
} }
// toggle poll logging // toggle poll logging
if route == "/poll" { if route == "/poll" {
if POLL.Enabled() { if POLL.Get() {
log.Info("POLL is true") fmt.Fprintln(w, "POLL is true")
POLL.SetBool(false) POLL.SetBool(false)
} else { } else {
log.Info("POLL is false") fmt.Fprintln(w, "POLL is false")
POLL.SetBool(true) POLL.SetBool(true)
} }
return return
@ -130,45 +100,6 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
if route == "/DropletsPB" {
pb := me.cluster.GetDropletsPB()
data, err := pb.Marshal()
if err != nil {
log.Info("droplet marshal failed", err)
fmt.Fprintln(w, "droplet marshal failed", err)
return
}
w.Write(data)
// fmt.Fprintln("droplet marshal failed", err)
return
}
if route == "/HypervisorsPB" {
pb := me.cluster.GetHypervisorsPB()
data, err := pb.Marshal()
if err != nil {
log.Info("hypervisors marshal failed", err)
fmt.Fprintln(w, "hypervisors marshal failed", err)
return
}
w.Write(data)
// fmt.Fprintln("droplet marshal failed", err)
return
}
if route == "/EventsPB" {
pb := me.cluster.GetEventsPB()
data, err := pb.Marshal()
if err != nil {
log.Info("events marshal failed", err)
fmt.Fprintln(w, "events marshal failed", err)
return
}
w.Write(data)
// fmt.Fprintln("droplet marshal failed", err)
return
}
if route == "/dumpdropletsfull" { if route == "/dumpdropletsfull" {
dumpDroplets(w, true) dumpDroplets(w, true)
return return
@ -186,6 +117,7 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
if route == "/quit" { if route == "/quit" {
log.Warn("writing out config file and exiting virtigo") log.Warn("writing out config file and exiting virtigo")
fmt.Fprintln(w, "writing out config file and exiting virtigo")
if err := me.cluster.ConfigSave(); err != nil { if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err) log.Info("configsave error", err)
} else { } else {
@ -208,6 +140,7 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
} }
log.Warn("BAD URL =", route) log.Warn("BAD URL =", route)
fmt.Fprintln(w, "BAD URL tmp =", route)
} }
// write a file out to the http socket // write a file out to the http socket

View File

@ -7,7 +7,7 @@ import (
"os" "os"
"time" "time"
"go.wit.com/lib/protobuf/virtpb" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/lib/virtigolib" "go.wit.com/lib/virtigolib"
"go.wit.com/log" "go.wit.com/log"
@ -73,7 +73,7 @@ func importDomain(w http.ResponseWriter, r *http.Request) (string, error) {
// exports and builds a libvirt.Domain from the hypervisor // exports and builds a libvirt.Domain from the hypervisor
domcfg, err := ExportLibvirtDomain(h.pb, domainName) domcfg, err := ExportLibvirtDomain(h.pb, domainName)
if err != nil { if err != nil {
result = fmt.Sprint("ExportLibvirtDomain() failed", err) result = fmt.Sprintf("ExportLibvirtDomain() failed", err)
log.Warn(result) log.Warn(result)
fmt.Fprintln(w, result) fmt.Fprintln(w, result)
return "", err return "", err
@ -82,7 +82,7 @@ func importDomain(w http.ResponseWriter, r *http.Request) (string, error) {
// merges and updates the droplet protobuf based on the libvirt XML // merges and updates the droplet protobuf based on the libvirt XML
events, err := virtigolib.MergelibvirtDomain(d, domcfg) events, err := virtigolib.MergelibvirtDomain(d, domcfg)
if err != nil { if err != nil {
result = fmt.Sprint("MerglibvirtDomain() failed for", d.Hostname, err) result = fmt.Sprintf("MerglibvirtDomain() failed for", d.Hostname, err)
log.Warn(result) log.Warn(result)
fmt.Fprintln(w, result) fmt.Fprintln(w, result)
return "", errors.New(result) return "", errors.New(result)
@ -123,7 +123,7 @@ func importDomain(w http.ResponseWriter, r *http.Request) (string, error) {
// this must be bool in string because accumulated output is sometimes // this must be bool in string because accumulated output is sometimes
// written to STDOUT, sometimes to http // written to STDOUT, sometimes to http
func (h *HyperT) importDomain(d *virtpb.Droplet) (bool, string) { func (h *HyperT) importDomain(d *pb.Droplet) (bool, string) {
ready, result := me.cluster.DropletReady(d) ready, result := me.cluster.DropletReady(d)
if !ready { if !ready {
return false, result return false, result
@ -153,7 +153,7 @@ func (h *HyperT) importDomain(d *virtpb.Droplet) (bool, string) {
return true, result return true, result
} }
func ExportLibvirtDomain(h *virtpb.Hypervisor, domainName string) (*libvirtxml.Domain, error) { func ExportLibvirtDomain(h *pb.Hypervisor, domainName string) (*libvirtxml.Domain, error) {
// attempt to get the domain record from virtigo // attempt to get the domain record from virtigo
xml, err := postImportDomain(h.Hostname, domainName) xml, err := postImportDomain(h.Hostname, domainName)
if err != nil { if err != nil {

197
main.go
View File

@ -4,101 +4,164 @@ package main
import ( import (
"embed" "embed"
"net/url" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"time"
"github.com/google/uuid"
"go.wit.com/dev/alexflint/arg" "go.wit.com/dev/alexflint/arg"
"go.wit.com/lib/gui/prep" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/lib/protobuf/virtpb" "go.wit.com/lib/virtigolib"
"go.wit.com/log" "go.wit.com/log"
) )
// sent via -ldflags var Version string
var VERSION string
var BUILDTIME string
var ARGNAME string = "virtigo"
//go:embed resources/* //go:embed resources/*
var resources embed.FS var resources embed.FS
func main() { func main() {
me = new(virtigoT)
prep.Bash(ARGNAME, argv.DoAutoComplete) // this line should be: prep.Bash(argv)
me.myGui = prep.Gui() // prepares the GUI package for go-args
me.pp = arg.MustParse(&argv)
if me.pp == nil {
me.pp.WriteHelp(os.Stdout)
os.Exit(0)
}
if os.Getenv("VIRTIGO_HOME") == "" { if os.Getenv("VIRTIGO_HOME") == "" {
homeDir, _ := os.UserHomeDir() homeDir, _ := os.UserHomeDir()
fullpath := filepath.Join(homeDir, ".config/virtigo") fullpath := filepath.Join(homeDir, ".config/virtigo")
os.Setenv("VIRTIGO_HOME", fullpath) os.Setenv("VIRTIGO_HOME", fullpath)
} }
var pp *arg.Parser
pp = arg.MustParse(&argv)
me.clusters = virtpb.NewClusters() if pp == nil {
pp.WriteHelp(os.Stdout)
os.Exit(0)
}
if argv.List != nil { // if argv.Daemon {
err := me.clusters.ConfigLoad() // log.DaemonMode(true)
// }
// set defaults
me.unstable = time.Now() // initialize the grid as unstable
me.changed = false
me.hmap = make(map[*pb.Hypervisor]*HyperT)
// how long a droplet can be missing until it's declared dead
me.unstableTimeout = 17 * time.Second
me.missingDropletTimeout = time.Minute // not sure the difference between these values
// how often to poll the hypervisors
me.hyperPollDelay = 5 * time.Second
// how long the cluster must be stable before new droplets can be started
me.clusterStableDuration = 37 * time.Second
me.cluster = pb.InitCluster()
if err := me.cluster.ConfigLoad(); err != nil {
log.Info("config load error", err)
os.Exit(-1)
}
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Droplet()
if d == nil {
fmt.Println("d == nil")
os.Exit(-1)
}
fmt.Println("Droplet UUID:", d.Uuid)
if d.Current == nil {
d.Current = new(pb.Current)
}
d.SetState(pb.DropletState_OFF)
log.Info("droplet", d.Hostname)
}
hmm := "pihole.wit.com"
d := me.cluster.FindDropletByName(hmm)
if d == nil {
log.Info("did not find found droplet", hmm)
} else {
log.Info("found droplet", d.Hostname, d)
}
var newEvents []*pb.Event
// sanity check the cluster & droplets
if _, _, err := ValidateDroplets(); err != nil {
log.Info("todo: add flag to ignore. for now, fix problems in the config file.")
os.Exit(0)
}
newe, err := ValidateDiskFilenames()
if err != nil {
log.Info(err)
os.Exit(-1)
}
// this is a new droplet. add it to the cluster
for _, e := range newe {
newEvents = append(newEvents, e)
}
ValidateUniqueFilenames()
for _, filename := range argv.Xml {
domcfg, err := virtigolib.ReadXml(filename)
if err != nil { if err != nil {
badExit(err) // parsing the libvirt xml file failed
log.Info("error:", filename, err)
log.Info("readXml() error", filename)
log.Info("readXml() error", err)
log.Info("libvirt XML will have to be fixed by hand")
os.Exit(-1)
} }
doList() // this is a new droplet. add it to the cluster
okExit("virtigo list") log.Info("Add XML Droplet here", domcfg.Name)
} _, newe, err := virtigolib.AddDomainDroplet(me.cluster, domcfg)
if argv.Droplet != nil {
exit(doDroplet())
}
me.myGui.Start() // loads the GUI toolkit
if argv.Admin {
err := me.clusters.ConfigLoad()
if err != nil { if err != nil {
badExit(err) log.Info("addDomainDroplet() error", filename)
log.Info("addDomainDroplet() error", err)
log.Info("libvirt XML will have to be fixed by hand")
os.Exit(-1)
} }
for _, e := range newe {
doAdminGui() newEvents = append(newEvents, e)
okExit("admin close") }
}
for i, e := range newEvents {
log.Info(i, "Event:", e.Droplet, e.FieldName, "orig:", e.OrigVal, "new:", e.NewVal)
me.changed = true
} }
if argv.Server != "" { if me.changed {
log.Info("start admin interface") if err := me.cluster.ConfigSave(); err != nil {
admin := new(adminT) log.Info("configsave error", err)
var err error os.Exit(-1)
admin.url, err = url.Parse(argv.Server)
if err != nil {
badExit(err)
} }
err = me.clusters.ConfigLoad() log.Info("XML changes saved in protobuf config")
if err != nil { os.Exit(0)
clusters := virtpb.NewClusters() }
c := new(virtpb.Cluster) if len(argv.Xml) != 0 {
c.Uuid = uuid.New().String() log.Info("No XML changes found")
c.URL = append(c.URL, argv.Server) os.Exit(0)
clusters.Append(c)
virtpb.ConfigWriteTEXT(clusters, "cluster.text")
badExit(err)
}
admin.doAdminGui()
okExit("admin close")
} }
if argv.Daemon { // initialize each hypervisor
if err := doDaemon(); err != nil { for _, pbh := range me.cluster.H.Hypervisors {
badExit(err) // this is a new unknown droplet (not in the config file)
} var h *HyperT
okExit("") h = new(HyperT)
h.pb = pbh
h.lastDroplets = make(map[string]time.Time)
h.lastpoll = time.Now()
me.hmap[pbh] = h
me.hypers = append(me.hypers, h)
log.Log(EVENT, "config new hypervisors", h.pb.Hostname)
} }
doGui() // start making our forge GUI // start the watchdog polling for each hypervisor
startHTTP() // sit here forever for _, h := range me.hypers {
log.Info("starting polling on", h.pb.Hostname)
// start a watchdog on each hypervisor
go h.NewWatchdog()
}
// sit here
startHTTP()
} }

84
me.go
View File

@ -1,84 +0,0 @@
package main
// RFC implementation
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net"
"net/http"
"os/user"
"time"
)
// Function to create a persistent TCP connection
func createPersistentConnection(host string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 30 * time.Second,
}
conn, err := dialer.Dial("tcp", host)
if err != nil {
return nil, fmt.Errorf("failed to establish connection: %w", err)
}
return conn, nil
}
func mesocket() {
host := "example.com:80"
// Establish a persistent TCP connection
conn, err := createPersistentConnection(host)
if err != nil {
fmt.Println("Error creating connection:", err)
return
}
defer conn.Close()
// Custom transport that forces HTTP requests to use our existing connection
transport := &http.Transport{
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
fmt.Println("Reusing existing TCP connection")
return conn, nil
},
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
}
client := &http.Client{
Transport: transport,
Timeout: 10 * time.Second,
}
url := "http://example.com/endpoint"
data := []byte(`{"message": "Hello"}`)
// Create an HTTP request
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
if err != nil {
fmt.Println("Error creating request:", err)
return
}
usr, _ := user.Current()
req.Header.Set("author", usr.Username)
req.Header.Set("Connection", "keep-alive") // Keep connection alive
// Perform the HTTP request
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error performing request:", err)
return
}
defer resp.Body.Close()
// Read and print the response
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println("Error reading response:", err)
return
}
fmt.Println("Response:", string(body))
}

42
poll.go
View File

@ -6,7 +6,7 @@ import (
"time" "time"
"go.wit.com/lib/gui/shell" "go.wit.com/lib/gui/shell"
"go.wit.com/lib/protobuf/virtpb" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log" "go.wit.com/log"
"google.golang.org/protobuf/types/known/timestamppb" "google.golang.org/protobuf/types/known/timestamppb"
) )
@ -69,7 +69,7 @@ func (h *HyperT) pollHypervisor() {
log.Log(POLL, start, "STATE:", state, "rest:", fields[2:]) log.Log(POLL, start, "STATE:", state, "rest:", fields[2:])
// update the status to ON // update the status to ON
d.SetState(virtpb.DropletState_ON) d.SetState(pb.DropletState_ON)
// set the LastPoll time to now // set the LastPoll time to now
now := time.Now() now := time.Now()
@ -121,28 +121,28 @@ func (h *HyperT) pollHypervisor() {
// should delete this from h.lastDroplets // should delete this from h.lastDroplets
continue continue
} }
if d.Current.State == virtpb.DropletState_OFF { if d.Current.State == pb.DropletState_OFF {
log.Info(header, "droplet timed out and is off. remove from h.lastDroplets[] slice") log.Info(header, "droplet timed out and is off. remove from h.lastDroplets[] slice")
delete(h.lastDroplets, name) delete(h.lastDroplets, name)
continue continue
} }
// everthing below here is dumb and needs to be rethought // everthing below here is dumb and needs to be rethought
if d.Current.State != virtpb.DropletState_UNKNOWN { if d.Current.State != pb.DropletState_UNKNOWN {
d.SetState(virtpb.DropletState_UNKNOWN) d.SetState(pb.DropletState_UNKNOWN)
log.Info(header, "set state UNKNOWN here", name) log.Info(header, "set state UNKNOWN here", name)
} }
if d.Current.State == virtpb.DropletState_UNKNOWN { if d.Current.State == pb.DropletState_UNKNOWN {
if dur > time.Minute*2 { if dur > time.Minute*2 {
// what this means is the droplet probably wasn't migrated or the migrate failed // what this means is the droplet probably wasn't migrated or the migrate failed
// where should this be checked? the status needs to be changed to OFF // where should this be checked? the status needs to be changed to OFF
s := virtpb.FormatDuration(dur) s := pb.FormatDuration(dur)
log.Info(header, "UNKNOWN state for more than 2 minutes (clearing out ?)", name, s) log.Info(header, "UNKNOWN state for more than 2 minutes (clearing out ?)", name, s)
// it might be safe to set the status to OFF here. not really. this poll needs // it might be safe to set the status to OFF here. not really. this poll needs
// to be moved somewhere else. there needs to be a new goroutine not tied to the // to be moved somewhere else. there needs to be a new goroutine not tied to the
// hypervisor // hypervisor
d.SetState(virtpb.DropletState_OFF) d.SetState(pb.DropletState_OFF)
} }
} }
} }
@ -158,15 +158,15 @@ func uptimeCheck() (bool, string) {
var total int var total int
var working int var working int
var failed int var failed int
var missing []*virtpb.Droplet var missing []*pb.Droplet
var unknown int var unknown int
var unknownList []string var unknownList []string
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
total += 1 total += 1
if d.StartState != virtpb.DropletState_ON { if d.StartState != pb.DropletState_ON {
continue continue
} }
dur := time.Since(d.Current.LastPoll.AsTime()) // Calculate the elapsed time dur := time.Since(d.Current.LastPoll.AsTime()) // Calculate the elapsed time
@ -175,19 +175,19 @@ func uptimeCheck() (bool, string) {
hname = d.Current.Hypervisor hname = d.Current.Hypervisor
} }
switch d.Current.State { switch d.Current.State {
case virtpb.DropletState_UNKNOWN: case pb.DropletState_UNKNOWN:
// log.Info("SKIP. hostname has not been polled yet", d.Hostname, d.hname) // log.Info("SKIP. hostname has not been polled yet", d.Hostname, d.hname)
unknown += 1 unknown += 1
unknownList = append(unknownList, d.Hostname) unknownList = append(unknownList, d.Hostname)
case virtpb.DropletState_ON: case pb.DropletState_ON:
if dur > me.missingDropletTimeout { if dur > me.missingDropletTimeout {
log.Info("GOOD STATE MISSING", d.Hostname, hname, virtpb.FormatDuration(dur)) log.Info("GOOD STATE MISSING", d.Hostname, hname, pb.FormatDuration(dur))
good = false good = false
d.SetState(virtpb.DropletState_UNKNOWN) d.SetState(pb.DropletState_UNKNOWN)
failed += 1 failed += 1
continue continue
} }
l := virtpb.FormatDuration(dur) l := pb.FormatDuration(dur)
if l == "" { if l == "" {
log.Info("DUR IS EMPTY", dur) log.Info("DUR IS EMPTY", dur)
missing = append(missing, d) missing = append(missing, d)
@ -195,13 +195,13 @@ func uptimeCheck() (bool, string) {
} }
working += 1 working += 1
// log.Info("GOOD STATE ON", d.Hostname, d.hname, "dur =", l) // log.Info("GOOD STATE ON", d.Hostname, d.hname, "dur =", l)
case virtpb.DropletState_OFF: case pb.DropletState_OFF:
log.Info("OFF STATE", d.StartState, d.Hostname, hname, virtpb.FormatDuration(dur)) log.Info("OFF STATE", d.StartState, d.Hostname, hname, pb.FormatDuration(dur))
good = false good = false
failed += 1 failed += 1
// missing = append(missing, d) // missing = append(missing, d)
default: default:
log.Info("WTF STATE", d.StartState, d.Hostname, hname, "Current.State =", d.Current.State, virtpb.FormatDuration(dur)) log.Info("WTF STATE", d.StartState, d.Hostname, hname, "Current.State =", d.Current.State, pb.FormatDuration(dur))
good = false good = false
failed += 1 failed += 1
missing = append(missing, d) missing = append(missing, d)
@ -214,7 +214,7 @@ func uptimeCheck() (bool, string) {
summary += fmt.Sprintf("missing = %d ", len(missing)) summary += fmt.Sprintf("missing = %d ", len(missing))
} }
if unknown > 0 { if unknown > 0 {
summary += fmt.Sprintf("unknown = %d %+v", unknown, unknownList) summary += fmt.Sprintf("unknown = %d ", unknown, unknownList)
} }
if failed > 0 { if failed > 0 {
summary += fmt.Sprintf("failed = %d ", failed) summary += fmt.Sprintf("failed = %d ", failed)
@ -225,7 +225,7 @@ func uptimeCheck() (bool, string) {
summary += "(killcount=" + fmt.Sprintf("%d", me.killcount) + ")" summary += "(killcount=" + fmt.Sprintf("%d", me.killcount) + ")"
} }
last := time.Since(me.unstable) last := time.Since(me.unstable)
s := strings.TrimSpace(virtpb.FormatDuration(last)) s := strings.TrimSpace(pb.FormatDuration(last))
if last > me.clusterStableDuration { if last > me.clusterStableDuration {
// the cluster has not been stable for 10 seconds // the cluster has not been stable for 10 seconds
summary += "(stable=" + s + ")" summary += "(stable=" + s + ")"

60
post.go
View File

@ -2,10 +2,8 @@ package main
import ( import (
"bytes" "bytes"
"fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"net/url"
"os" "os"
"os/user" "os/user"
@ -38,61 +36,3 @@ func httpPost(url string, data []byte) ([]byte, error) {
} }
return body, nil return body, nil
} }
func parseURL() (string, string) {
parsedURL, err := url.Parse(argv.Server)
if err != nil {
fmt.Println("Error parsing URL:", err)
return "", ""
}
// Extract Host (includes hostname/IP and port if present)
host := parsedURL.Host
fmt.Println("Host:", host)
// Extract Hostname (without port)
hostname := parsedURL.Hostname()
fmt.Println("Hostname:", hostname)
// Extract Port
port := parsedURL.Port()
fmt.Println("Port:", port)
return parsedURL.Hostname(), parsedURL.Port()
}
func gusPost(port string, dest string) ([]byte, error) {
var err error
var req *http.Request
gus, _ := parseURL()
url := fmt.Sprintf("http://%s:%d/%s?port=%s&dest=%s", gus, 2522, "enable", port, dest)
data := []byte("hello world")
req, err = http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
usr, _ := user.Current()
req.Header.Set("author", usr.Username)
hostname, _ := os.Hostname()
req.Header.Set("hostname", hostname)
req.Header.Set("port", port)
req.Header.Set("dest", dest)
log.Printf("gusPust url(%s) port(%s) dest(%s) hostname(%s)\n", url, port, dest, hostname)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Error(err)
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err)
return body, err
}
return body, nil
}

View File

@ -10,17 +10,16 @@ import (
"math/rand" "math/rand"
"time" "time"
"go.wit.com/lib/protobuf/virtpb" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log"
) )
func isClusterStable() (string, error) { func isClusterStable() (string, error) {
// how long has the cluster been stable? // how long has the cluster been stable?
// wait until it is stable. use this to throttle droplet starts // wait until it is stable. use this to throttle droplet starts
dur := time.Since(me.unstable) dur := time.Since(me.unstable)
good := fmt.Sprintln("trying to start droplet here. grid stable for: ", virtpb.FormatDuration(dur)) good := fmt.Sprintln("trying to start droplet here. grid stable for: ", pb.FormatDuration(dur))
if dur < me.unstableTimeout { if dur < me.unstableTimeout {
tmp := virtpb.FormatDuration(me.unstableTimeout) tmp := pb.FormatDuration(me.unstableTimeout)
err := "grid is still too unstable (unstable timeout = " + tmp + ")\n" err := "grid is still too unstable (unstable timeout = " + tmp + ")\n"
return good + err, errors.New(err) return good + err, errors.New(err)
} }
@ -29,7 +28,7 @@ func isClusterStable() (string, error) {
// for now, because sometimes this should write to stdout and // for now, because sometimes this should write to stdout and
// sometimes to http socket, it returns a string // sometimes to http socket, it returns a string
func Start(id string) (string, error) { func Start(name string) (string, error) {
var result string var result string
if s, err := isClusterStable(); err != nil { if s, err := isClusterStable(); err != nil {
@ -38,25 +37,20 @@ func Start(id string) (string, error) {
} }
// lookup the droplet by name // lookup the droplet by name
d := me.cluster.FindDropletByUuid(id) d := me.cluster.FindDropletByName(name)
if d == nil { if d == nil {
result = "can't start unknown droplet: " + id result = "can't start unknown droplet: " + name
return result, errors.New(result) return result, errors.New(result)
} }
// validate the droplet // validate the droplet
if err := ValidateDroplet(d); err != nil { if err := ValidateDroplet(d); err != nil {
log.Info("ValidateDroplet() failed", err)
result = "ValidateDroplet() failed droplet " + d.Hostname result = "ValidateDroplet() failed droplet " + d.Hostname
return result, err return result, err
} }
if d.Current == nil {
d.Current = new(virtpb.Current)
}
// is the droplet already on? // is the droplet already on?
if d.Current.State == virtpb.DropletState_ON { if d.Current.State == pb.DropletState_ON {
result = "EVENT start droplet " + d.Hostname + " is already ON" result = "EVENT start droplet " + d.Hostname + " is already ON"
return result, errors.New(result) return result, errors.New(result)
} }
@ -70,12 +64,12 @@ func Start(id string) (string, error) {
if ok { if ok {
return result + b, nil return result + b, nil
} }
return result + b, errors.New("start " + d.Hostname + " on hypervisor " + h.pb.Hostname) return result + b, errors.New("start " + name + " on hypervisor " + h.pb.Hostname)
} }
// skip hypervisors marked inactive // skip hypervisors marked inactive
if h.pb.Active != true { if h.pb.Active != true {
result += fmt.Sprintln("hypervisor is inactive:", d.Hostname, "for", h.pb.Hostname, h.pb.Active) result += fmt.Sprintln("hypervisor is inactive:", name, "for", h.pb.Hostname, h.pb.Active)
continue continue
} }
@ -86,10 +80,10 @@ func Start(id string) (string, error) {
if ok { if ok {
return result + b, nil return result + b, nil
} }
return result + b, errors.New("start " + d.Hostname + " on hypervisor " + h.pb.Hostname) return result + b, errors.New("start " + name + " on hypervisor " + h.pb.Hostname)
} }
result += fmt.Sprintln("hypervisor ready:", d.Hostname, "for", h.pb.Hostname, h.pb.Active) result += fmt.Sprintln("hypervisor ready:", name, "for", h.pb.Hostname, h.pb.Active)
pool = append(pool, h) pool = append(pool, h)
} }
@ -110,5 +104,5 @@ func Start(id string) (string, error) {
if ok { if ok {
return result + output, nil return result + output, nil
} }
return result + output, errors.New("start " + d.Hostname + " on hypervisor " + h.pb.Hostname) return result + output, errors.New("start " + name + " on hypervisor " + h.pb.Hostname)
} }

View File

@ -1,17 +1,12 @@
package main package main
import ( import (
"net/url"
"time" "time"
"go.wit.com/dev/alexflint/arg" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/gui/prep"
"go.wit.com/lib/protobuf/virtpb"
) )
var me *virtigoT var me virtigoT
// disable the GUI // disable the GUI
func (b *virtigoT) Disable() { func (b *virtigoT) Disable() {
@ -25,41 +20,24 @@ func (b *virtigoT) Enable() {
// this app's variables // this app's variables
type virtigoT struct { type virtigoT struct {
pp *arg.Parser // go-arg parser cluster *pb.NewCluster // basic cluster settings
myGui *prep.GuiPrep // the gui toolkit handle // newc *pb.NewCluster // basic cluster settings
e *virtpb.Events // virt protobuf events e *pb.Events // virtbuf events
hmap map[*virtpb.Hypervisor]*HyperT // map to the local struct hmap map[*pb.Hypervisor]*HyperT // map to the local struct
names []string // ? names []string
hypers []*HyperT // notsure hypers []*HyperT
killcount int // how many times virtigo-d has had to been killed killcount int
unstable time.Time // the last time the cluster was incorrect unstable time.Time // the last time the cluster was incorrect
changed bool // have things changed? changed bool
hyperPollDelay time.Duration // how often to poll the hypervisors hyperPollDelay time.Duration // how often to poll the hypervisors
unstableTimeout time.Duration // how long a droplet can be unstable until it's declared dead unstableTimeout time.Duration // how long a droplet can be unstable until it's declared dead
clusterStableDuration time.Duration // how long the cluster must be stable before new droplets can be started clusterStableDuration time.Duration // how long the cluster must be stable before new droplets can be started
missingDropletTimeout time.Duration // how long a droplet can be missing for missingDropletTimeout time.Duration // how long a droplet can be missing for
status *gui.Node // the cluster status
lastuptime *gui.Node // the last time uptime was checked by Kuma
clusters *virtpb.Clusters // clusters protobuf
cmap map[*virtpb.Cluster]*adminT // map to local GUI objects and the protobuf
gwin *gadgets.GenericWindow // main window
cluster *virtpb.OldCluster // basic cluster settings
// admin *adminT // the admin struct
}
// cluster "admin" mode
type adminT struct {
cluster *virtpb.Cluster // the cluster protobuf
uptime *gui.Node // the uptime message
dwin *stdDropletTableWin // the droplet window
hwin *stdHypervisorTableWin // the hypervisor window
ewin *stdEventTableWin // the events window
url *url.URL // URL for the cloud
} }
// the stuff that is needed for a hypervisor // the stuff that is needed for a hypervisor
type HyperT struct { type HyperT struct {
pb *virtpb.Hypervisor // the Hypervisor protobuf pb *pb.Hypervisor // the Hypervisor protobuf
dog *time.Ticker // the watchdog timer itself dog *time.Ticker // the watchdog timer itself
lastpoll time.Time // the last time the hypervisor polled lastpoll time.Time // the last time the hypervisor polled
lastDroplets map[string]time.Time // the vm's in the last poll lastDroplets map[string]time.Time // the vm's in the last poll

View File

@ -15,13 +15,14 @@ package main
import ( import (
"errors" "errors"
"fmt"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/google/uuid" "github.com/google/uuid"
"go.wit.com/lib/protobuf/virtpb" pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log" "go.wit.com/log"
) )
@ -29,7 +30,7 @@ import (
func ValidateUniqueMac(mac string) bool { func ValidateUniqueMac(mac string) bool {
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
for _, n := range d.Networks { for _, n := range d.Networks {
if n.Mac == mac { if n.Mac == mac {
log.Info("duplicate MAC", n.Mac, "in droplet", d.Hostname) log.Info("duplicate MAC", n.Mac, "in droplet", d.Hostname)
@ -41,9 +42,9 @@ func ValidateUniqueMac(mac string) bool {
} }
// records all the known paths. this should go in the protobuf // records all the known paths. this should go in the protobuf
func addClusterFilepath(dir string) *virtpb.Event { func addClusterFilepath(dir string) *pb.Event {
var found bool = false var found bool = false
var e *virtpb.Event var e *pb.Event
for _, d := range me.cluster.Dirs { for _, d := range me.cluster.Dirs {
if d == dir { if d == dir {
// found dir // found dir
@ -54,7 +55,7 @@ func addClusterFilepath(dir string) *virtpb.Event {
if !found { if !found {
if dir != "." { if dir != "." {
// make a new Add Event // make a new Add Event
e = virtpb.NewAddEvent(nil, "Add Cluster Directory", dir) e = pb.NewAddEvent(nil, "Add Cluster Directory", dir)
me.cluster.Dirs = append(me.cluster.Dirs, dir) me.cluster.Dirs = append(me.cluster.Dirs, dir)
} }
} }
@ -62,12 +63,12 @@ func addClusterFilepath(dir string) *virtpb.Event {
} }
// returns the droplet using a filename // returns the droplet using a filename
func lookupFilename(filename string) *virtpb.Droplet { func lookupFilename(filename string) *pb.Droplet {
filebase := filepath.Base(filename) filebase := filepath.Base(filename)
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
for _, disk := range d.Disks { for _, disk := range d.Disks {
if filebase == disk.Filename { if filebase == disk.Filename {
return d return d
@ -84,7 +85,7 @@ func ValidateUniqueFilenames() bool {
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
for _, disk := range d.Disks { for _, disk := range d.Disks {
filename := disk.Filename filename := disk.Filename
addClusterFilepath(disk.Filepath) addClusterFilepath(disk.Filepath)
@ -109,12 +110,12 @@ func ValidateUniqueFilenames() bool {
return ok return ok
} }
func ValidateDiskFilenames() ([]*virtpb.Event, error) { func ValidateDiskFilenames() ([]*pb.Event, error) {
var alle []*virtpb.Event var alle []*pb.Event
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
var found bool = false var found bool = false
for _, disk := range d.Disks { for _, disk := range d.Disks {
filename := disk.Filename filename := disk.Filename
@ -162,10 +163,43 @@ func ValidateDiskFilenames() ([]*virtpb.Event, error) {
return alle, nil return alle, nil
} }
// consistancy check. run on a regular basis func getNewMac() string {
// // mac address map to check for duplicates
var macs map[string]string
macs = make(map[string]string)
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Droplet()
for _, n := range d.Networks {
// log.Println("network:", n.Mac, d.Uuid, d.Hostname)
if _, ok := macs[n.Mac]; ok {
// UUID already exists
log.Info("duplicate MAC", n.Mac, macs[n.Mac])
log.Info("duplicate MAC", n.Mac, d.Hostname)
return ""
}
macs[n.Mac] = d.Hostname
}
}
var i int = 9
var mac string
for {
mac = fmt.Sprintf("22:22:22:22:22:%02d", i)
if _, ok := macs[mac]; ok {
log.Info("MAC already defined", mac, macs[mac])
i += 1
continue
}
log.Info("using new MAC:", mac)
return mac
}
return ""
}
// runs on startup. dies if there are duplicates // runs on startup. dies if there are duplicates
// the config file must then be edited by hand for now // the config file must then be edited by hand
func ValidateDroplets() (map[string]string, map[string]string, error) { func ValidateDroplets() (map[string]string, map[string]string, error) {
// uuid map to check for duplicates // uuid map to check for duplicates
var umap map[string]string var umap map[string]string
@ -177,7 +211,7 @@ func ValidateDroplets() (map[string]string, map[string]string, error) {
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
// Generate a new UUID // Generate a new UUID
if d.Uuid == "" { if d.Uuid == "" {
u := uuid.New() u := uuid.New()
@ -190,11 +224,11 @@ func ValidateDroplets() (map[string]string, map[string]string, error) {
log.Info("duplicate UUID", d.Uuid, umap[d.Uuid]) log.Info("duplicate UUID", d.Uuid, umap[d.Uuid])
log.Info("duplicate UUID", d.Uuid, d.Hostname) log.Info("duplicate UUID", d.Uuid, d.Hostname)
if d.Archive == nil { if d.Archive == nil {
d.Archive = new(virtpb.Archive) d.Archive = new(pb.Archive)
log.Info("d.Archive was nil for", d.Hostname) log.Info("d.Archive was nil for", d.Hostname)
// os.Exit(-1) // os.Exit(-1)
} }
d.Archive.Reason = virtpb.DropletArchive_DUP d.Archive.Reason = pb.DropletArchive_DUP
continue continue
// return umap, macs, errors.New("duplicate UUID: " + d.Uuid) // return umap, macs, errors.New("duplicate UUID: " + d.Uuid)
} }
@ -224,7 +258,7 @@ func searchForDuplicateUUIDs() {
/* /*
// remove from the slice // remove from the slice
func deleteDroplet(bad int) { func deleteDroplet(bad int) {
var all *virtpb.Droplets var all *pb.Droplets
all = me.cluster.DeleteDroplet(b *db.Droplet) all = me.cluster.DeleteDroplet(b *db.Droplet)
fmt.Println("deleting", bad, all.Droplets[bad].Hostname) fmt.Println("deleting", bad, all.Droplets[bad].Hostname)
@ -243,11 +277,11 @@ func deleteDroplet(bad int) {
// check qemu domain id // check qemu domain id
// check spice and vnc ports // check spice and vnc ports
// check filenames // check filenames
func ValidateDroplet(check *virtpb.Droplet) error { func ValidateDroplet(check *pb.Droplet) error {
// check for duplicate uuid's // check for duplicate uuid's
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
if check == d { if check == d {
continue continue
} }
@ -255,29 +289,21 @@ func ValidateDroplet(check *virtpb.Droplet) error {
// UUID already exists // UUID already exists
log.Info("duplicate UUID", d.Uuid, d.Hostname) log.Info("duplicate UUID", d.Uuid, d.Hostname)
log.Info("duplicate UUID", d.Uuid, check.Hostname) log.Info("duplicate UUID", d.Uuid, check.Hostname)
// d.Archive = new(virtpb.DropletArchive) // d.Archive = new(pb.DropletArchive)
if d.Archive == nil { if d.Archive == nil {
log.Info("d.Archive == nil") log.Info("d.Archive == nil")
os.Exit(-1) os.Exit(-1)
} }
d.Archive.Reason = virtpb.DropletArchive_DUP d.Archive.Reason = pb.DropletArchive_DUP
// return errors.New("duplicate UUID: " + d.Uuid) // return errors.New("duplicate UUID: " + d.Uuid)
} }
} }
// check for duplicate mac addresses // check for duplicate mac addresses
for _, checkn := range check.Networks { for _, checkn := range check.Networks {
log.Info("found mac = ", checkn.Mac, check.Hostname)
if checkn.Mac == "" {
checkn.Mac = getNewMac()
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
os.Exit(-1)
}
}
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
if check == d { if check == d {
continue continue
} }
@ -298,28 +324,28 @@ func ValidateDroplet(check *virtpb.Droplet) error {
return nil return nil
} }
func setUniqueSpicePort(check *virtpb.Droplet) error { func setUniqueSpicePort(check *pb.Droplet) error {
var ports map[int64]*virtpb.Droplet var ports map[int64]*pb.Droplet
ports = make(map[int64]*virtpb.Droplet) ports = make(map[int64]*pb.Droplet)
// check spice ports // check spice ports
// checkn.SpicePort = getUniqueSpicePort() // checkn.SpicePort = getUniqueSpicePort()
loop := me.cluster.DropletsAll() // get the list of droplets loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() { for loop.Scan() {
d := loop.Next() d := loop.Droplet()
if d.SpicePort == 0 { if d.SpicePort == 0 {
continue continue
} }
if dup, ok := ports[d.SpicePort]; ok { if dup, ok := ports[d.SpicePort]; ok {
// dup := ports[d.SpicePort] // dup := ports[d.SpicePort]
log.Warn("duplicate ports", d.SpicePort, d.Hostname, d.Current.State) log.Warn("duplicate ports", d.SpicePort, d.Hostname, d.Current.State)
if d.Current.State != virtpb.DropletState_ON { if d.Current.State != pb.DropletState_ON {
// hack for now. should be safe to erase this // hack for now. should be safe to erase this
d.SpicePort = 0 d.SpicePort = 0
log.Warn("erasing port for non-ON droplet", d.SpicePort, d.Hostname, d.Current.State) log.Warn("erasing port for non-ON droplet", d.SpicePort, d.Hostname, d.Current.State)
} }
log.Warn("duplicate ports", dup.SpicePort, dup.Hostname, dup.Current.State) log.Warn("duplicate ports", dup.SpicePort, dup.Hostname, dup.Current.State)
if dup.Current.State != virtpb.DropletState_ON { if dup.Current.State != pb.DropletState_ON {
// hack for now. should be safe to erase this // hack for now. should be safe to erase this
dup.SpicePort = 0 dup.SpicePort = 0
log.Warn("erasing port for non-ON droplet", dup.SpicePort, dup.Hostname, dup.Current.State) log.Warn("erasing port for non-ON droplet", dup.SpicePort, dup.Hostname, dup.Current.State)
@ -369,4 +395,6 @@ func setUniqueSpicePort(check *virtpb.Droplet) error {
return nil return nil
} }
// for loop never gets here
return nil
} }

View File

@ -1,33 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"go.wit.com/lib/gadgets"
"go.wit.com/log"
)
func createWindow() *gadgets.GenericWindow {
createWindow := gadgets.NewGenericWindow("Create Droplet", "settings")
createWindow.Custom = func() {
log.Warn("create window close")
}
grid := createWindow.Group.RawGrid()
gadgets.NewBasicEntry(grid, "memory")
grid.NextRow()
grid.NewLabel("name")
grid.NewTextbox("something")
grid.NextRow()
grid.NewButton("Start", func() {
log.Info("make a box")
})
return createWindow
}

View File

@ -1,121 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"fmt"
"strconv"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func (admin *adminT) createDropletWindow() *gadgets.GenericWindow {
d := new(virtpb.Droplet)
win := gadgets.NewGenericWindow("Create Droplet "+d.Hostname, "settings")
win.Custom = func() {
log.Warn("edit window close")
}
grid := win.Group.RawGrid()
var save *gui.Node
grid.NewLabel("name")
name := grid.NewTextbox("new2.wit.com")
d.Hostname = "new2.wit.com"
name.SetText(d.Hostname)
name.Custom = func() {
if d.Hostname == name.String() {
return
}
d.Hostname = name.String()
log.Info("changed droplet name to", d.Hostname)
save.Enable()
}
grid.NextRow()
mem := gadgets.NewBasicEntry(grid, "memory (GB)")
mem.SetText("16")
d.Memory = int64(16 * 1024 * 2024 * 1024)
grid.NextRow()
mem.Custom = func() {
newmem, err := strconv.Atoi(mem.String())
if err != nil {
log.Info("mem value error", mem.String(), err)
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
return
}
if newmem < 1 {
log.Info("mem can not be < 1")
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
return
}
d.Memory = int64(newmem * (1024 * 2024 * 1024))
log.Info("changed mem value. new val =", d.Memory)
save.Enable()
}
grid.NextRow() // each entry is on it's own row
cpus := gadgets.NewBasicEntry(grid, "cpus")
cpus.SetText("4")
d.Cpus = int64(4)
cpus.Custom = func() {
newcpu, err := strconv.Atoi(cpus.String())
if err != nil {
log.Info("cpus value error", cpus.String(), err)
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
return
}
if newcpu < 1 {
log.Info("cpus can not be < 1")
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
return
}
d.Cpus = int64(newcpu)
log.Info("changed cpus value. new val =", d.Cpus)
save.Enable()
}
grid.NextRow() // each entry is on it's own row
/*
save = grid.NewButton("postEvent() EDIT", func() {
log.Info("save droplet changes here")
e := new(virtpb.Event)
e.Etype = virtpb.EventType_EDIT
e.Droplet = d
if err := admin.postEvent(e); err != nil {
log.Info("event edit err", err)
} else {
log.Info("admin.postEvent() worked (?)")
}
})
*/
save = grid.NewButton("Create", func() {
log.Info("save droplet changes here")
e := new(virtpb.Event)
e.Etype = virtpb.EventType_ADD
e.Droplet = d
if err := admin.postEvent(e); err != nil {
log.Info("event edit err", err)
} else {
log.Info("admin.postEvent() worked (?)")
}
})
// save.Disable()
return win
}

View File

@ -1,125 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"fmt"
"strconv"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func (admin *adminT) editDropletWindow(d *virtpb.Droplet) *gadgets.GenericWindow {
win := gadgets.NewGenericWindow("Edit Droplet "+d.Hostname, "settings")
win.Custom = func() {
log.Warn("edit window close")
}
grid := win.Group.RawGrid()
var save *gui.Node
grid.NewLabel("name")
name := grid.NewTextbox("something")
name.SetText(d.Hostname)
name.Custom = func() {
if d.Hostname == name.String() {
return
}
d.Hostname = name.String()
log.Info("changed droplet name to", d.Hostname)
save.Enable()
}
grid.NextRow()
mem := gadgets.NewBasicEntry(grid, "memory (GB)")
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
grid.NextRow()
mem.Custom = func() {
newmem, err := strconv.Atoi(mem.String())
if err != nil {
log.Info("mem value error", mem.String(), err)
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
return
}
if newmem < 1 {
log.Info("mem can not be < 1")
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
return
}
d.Memory = int64(newmem * (1024 * 2024 * 1024))
log.Info("changed mem value. new val =", d.Memory)
save.Enable()
}
cpus := gadgets.NewBasicEntry(grid, "cpus")
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
grid.NextRow()
cpus.Custom = func() {
newcpu, err := strconv.Atoi(cpus.String())
if err != nil {
log.Info("cpus value error", cpus.String(), err)
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
return
}
if newcpu < 1 {
log.Info("cpus can not be < 1")
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
return
}
d.Cpus = int64(newcpu)
log.Info("changed cpus value. new val =", d.Cpus)
save.Enable()
}
grid.NewLabel("hypervisor")
hyper := grid.NewDropdown()
hyper.AddText("farm03")
hyper.AddText("farm04")
hyper.AddText("farm05")
if d.Current != nil {
hyper.SetText(d.Current.Hypervisor)
} else {
hyper.SetText("farm03")
}
grid.NextRow()
grid.NewButton("Start", func() {
log.Info("make a box")
})
save = grid.NewButton("save", func() {
log.Info("save droplet changes here")
e := new(virtpb.Event)
e.Etype = virtpb.EventType_EDIT
e.Droplet = d
/*
e.Droplet = new(virtpb.Droplet)
e.Droplet.Uuid = d.Uuid
e.Droplet.Cpus = 4
e.Droplet.Memory = 8 * (1024 * 1024 * 1024)
e.Droplet.Hostname = name.String()
*/
if err := admin.postEvent(e); err != nil {
log.Info("event edit err", err)
}
})
save.Disable()
grid.NewButton("dump", func() {
t := d.FormatTEXT()
log.Info(t)
})
return win
}

View File

@ -1,227 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
import (
"fmt"
"strings"
"sync"
"time"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
type stdDropletTableWin struct {
sync.Mutex
win *gadgets.GenericWindow // the machines gui window
box *gui.Node // the machines gui parent box widget
pb *virtpb.Droplets // the droplets protobuf
TB *virtpb.DropletsTable // the gui table buffer
update bool // if the window should be updated
Close func() // this function is called when the window is closed
admin *adminT
}
func (w *stdDropletTableWin) Toggle() {
if w == nil {
return
}
if w.win == nil {
return
}
w.win.Toggle()
}
func newDropletsWindow(admin *adminT) *stdDropletTableWin {
dwin := new(stdDropletTableWin)
dwin.admin = admin
dwin.win = gadgets.NewGenericWindow("virtigo current droplets", "Options")
dwin.win.Custom = func() {
log.Info("test delete window here")
}
grid := dwin.win.Group.RawGrid()
grid.NewButton("Active", func() {
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
dwin.doActiveDroplets(found)
})
grid.NewButton("Inactive", func() {
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State == virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
dwin.doInactiveDroplets(found)
})
grid.NewButton("Create", func() {
log.Info("create droplet here")
admin.createDropletWindow()
})
// make a box at the bottom of the window for the protobuf table
dwin.box = dwin.win.Bottom.Box().SetProgName("TBOX")
return dwin
}
// default window for active running droplets
func (dw *stdDropletTableWin) doInactiveDroplets(pb *virtpb.Droplets) {
dw.Lock()
defer dw.Unlock()
// erase the old table
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
// init the table
dw.pb = pb
t := dw.pb.NewTable("DropletsPB Off")
t.NewUuid()
t.SetParent(dw.box)
dropedit := t.AddButtonFunc("Edit", func(d *virtpb.Droplet) string {
return "edit"
})
dropedit.Custom = func(d *virtpb.Droplet) {
log.Info("edit droplet here", d.Hostname)
dw.admin.editDropletWindow(d)
}
dropon := t.AddButtonFunc("Start", func(d *virtpb.Droplet) string {
return "poweron"
})
dropon.Custom = func(d *virtpb.Droplet) {
log.Info("start droplet here", d.Hostname)
log.Info("should start droplet here")
log.Info(d.SprintHeader())
e := new(virtpb.Event)
e.Etype = virtpb.EventType_POWERON
e.DropletUuid = d.Uuid
if err := dw.admin.postEvent(e); err != nil {
log.Info("droplet start err", err)
}
}
vp := t.AddButtonFunc("Verify Config", func(p *virtpb.Droplet) string {
return p.Hostname
})
vp.Custom = func(d *virtpb.Droplet) {
log.Info("open config window", d.Hostname)
}
t.AddMemory()
t.AddCpus()
// final setup and display the table
dw.TB = t
f := func(e *virtpb.Droplet) {
log.Info("Triggered. do something here", e.Hostname)
// m.Enabled = true
}
dw.TB.Custom(f)
dw.TB.ShowTable()
}
// default window for active running droplets
func (dw *stdDropletTableWin) doActiveDroplets(pb *virtpb.Droplets) {
dw.Lock()
defer dw.Unlock()
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
dw.pb = pb
t := dw.pb.NewTable("DropletsPB On")
t.NewUuid()
t.SetParent(dw.box)
serial := t.AddButtonFunc("serial", func(p *virtpb.Droplet) string {
return "ttyS0"
})
serial.Custom = func(d *virtpb.Droplet) {
log.Printf("run %s: socat telnet somewhere %s:%d\n", d.Hostname, argv.Server, d.SpicePort)
log.Info("socat TCP-LISTEN:5000,reuseaddr,fork EXEC:\"virsh console myvm\"")
}
fb := t.AddButtonFunc("fb0 console", func(p *virtpb.Droplet) string {
return "remmina"
})
fb.Custom = func(d *virtpb.Droplet) {
log.Printf("connect to %s on %s: remmina spice://%s:%d\n", d.Hostname, d.Current.Hypervisor, argv.Server, 10000+d.SpicePort)
data, err := gusPost(fmt.Sprintf("%d", 10000+d.SpicePort), d.Current.Hypervisor)
log.Info("data", string(data), "err =", err)
}
// t.AddHostname()
vp := t.AddButtonFunc("Hostname", func(p *virtpb.Droplet) string {
return p.Hostname
})
vp.Custom = func(d *virtpb.Droplet) {
log.Info("edit droplet here", d.Hostname)
dw.admin.editDropletWindow(d)
}
t.AddStringFunc("location", func(d *virtpb.Droplet) string {
return d.Current.Hypervisor
})
t.AddMemory()
t.AddCpus()
t.AddSpicePort()
t.AddTimeFunc("age", func(d *virtpb.Droplet) time.Time {
age := d.Current.OnSince.AsTime()
// log.Info("age", d.Hostname, virtpb.FormatDuration(time.Since(age)))
return age
})
t.AddStringFunc("State", func(d *virtpb.Droplet) string {
if d.Current.State == virtpb.DropletState_ON {
return "ON"
}
if d.Current.State == virtpb.DropletState_OFF {
return "OFF"
}
return "UNKNOWN"
})
t.AddStringFunc("mac addr", func(d *virtpb.Droplet) string {
var macs []string
for _, n := range d.Networks {
macs = append(macs, n.Mac)
}
tmp := strings.Join(macs, "\n")
return strings.TrimSpace(tmp)
})
t.ShowTable()
// display the protobuf
dw.TB = t
f := func(e *virtpb.Droplet) {
log.Info("Triggered. do something here", e.Hostname)
// m.Enabled = true
}
dw.TB.Custom(f)
}

View File

@ -1,77 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
import (
"sync"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
type stdEventTableWin struct {
sync.Mutex
win *gadgets.GenericWindow // the machines gui window
box *gui.Node // the machines gui parent box widget
pb *virtpb.Events // the protobuf
TB *virtpb.EventsTable // the gui table buffer
update bool // if the window should be updated
}
func (w *stdEventTableWin) Toggle() {
if w == nil {
return
}
if w.win == nil {
return
}
w.win.Toggle()
}
func newEventsWindow() *stdEventTableWin {
dwin := new(stdEventTableWin)
dwin.win = gadgets.NewGenericWindow("virtigo current events", "things to do")
dwin.win.Custom = func() {
log.Info("test delete window here")
}
// make a box at the bottom of the window for the protobuf table
dwin.box = dwin.win.Bottom.Box().SetProgName("TBOX")
return dwin
}
// default table protobuf window
func (dw *stdEventTableWin) doStdEvents(pb *virtpb.Events) {
dw.Lock()
defer dw.Unlock()
// erase the old table
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
// init the table
dw.pb = pb
t := dw.pb.NewTable("EventsPB Off")
t.NewUuid()
t.SetParent(dw.box)
// pick the columns
t.AddDropletName()
t.AddDropletUuid()
t.AddHypervisor()
// display the protobuf
dw.TB = t
f := func(e *virtpb.Event) {
log.Info("std EventWindow() something here", e.Droplet)
// m.Enabled = true
}
dw.TB.Custom(f)
dw.TB.ShowTable()
}

View File

@ -1,163 +0,0 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
import (
"fmt"
"sync"
"time"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
type stdHypervisorTableWin struct {
sync.Mutex
win *gadgets.GenericWindow // the machines gui window
box *gui.Node // the machines gui parent box widget
pb *virtpb.Hypervisors // the protobuf
TB *virtpb.HypervisorsTable // the gui table buffer
update bool // if the window should be updated
}
func (w *stdHypervisorTableWin) Toggle() {
if w == nil {
return
}
if w.win == nil {
return
}
w.win.Toggle()
}
func newHypervisorsWindow() *stdHypervisorTableWin {
dwin := new(stdHypervisorTableWin)
dwin.win = gadgets.NewGenericWindow("virtigo current hypervisors", "things to do")
dwin.win.Custom = func() {
log.Info("test delete window here")
}
// make a box at the bottom of the window for the protobuf table
dwin.box = dwin.win.Bottom.Box().SetProgName("TBOX")
return dwin
}
// default table protobuf window
func (dw *stdHypervisorTableWin) doStdHypervisors(pb *virtpb.Hypervisors) {
dw.Lock()
defer dw.Unlock()
// erase the old table
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
// init the table
dw.pb = pb
t := dw.pb.NewTable("HypervisorsPB Off")
t.NewUuid()
t.SetParent(dw.box)
// pick the columns
t.AddHostname()
t.AddMemory()
t.AddCpus()
t.AddKillcount()
t.AddTimeFunc("last poll", func(h *virtpb.Hypervisor) time.Time {
// hm := me.hmap[h]
// tmp := hm.lastpoll
// log.Info("poll age", h.Hostname, virtpb.FormatDuration(time.Since(tmp)))
return time.Now()
})
t.AddStringFunc("droplets", func(h *virtpb.Hypervisor) string {
/*
var totalDroplets int
var totalUnknownDroplets int
// dur := time.Since(h.lastpoll)
// tmp := virtpb.FormatDuration(dur)
// fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp)
hm := me.hmap[h]
for name, _ := range hm.lastDroplets {
totalDroplets += 1
d := me.cluster.FindDropletByName(name)
if d == nil {
totalUnknownDroplets += 1
}
}
log.Printf("Total Droplets %d total libvirt only droplets = %d\n", totalDroplets, totalUnknownDroplets)
return fmt.Sprintf("%d", totalDroplets)
*/
return "todo"
})
// display the protobuf
dw.TB = t
f := func(e *virtpb.Hypervisor) {
log.Info("std HypervisorWindow() something here", e.Hostname)
// m.Enabled = true
}
dw.TB.Custom(f)
dw.TB.ShowTable()
}
// default table protobuf window
func (dw *stdHypervisorTableWin) doNewStdHypervisors(pb *virtpb.Hypervisors) {
dw.Lock()
defer dw.Unlock()
// erase the old table
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
// init the table
dw.pb = pb
t := dw.pb.NewTable("HypervisorsPB Off")
t.NewUuid()
t.SetParent(dw.box)
// pick the columns
t.AddHostname()
t.AddMemory()
t.AddCpus()
t.AddKillcount()
t.AddTimeFunc("last poll", func(h *virtpb.Hypervisor) time.Time {
// hm := me.hmap[h]
// tmp := hm.lastpoll
// log.Info("poll age", h.Hostname, virtpb.FormatDuration(time.Since(tmp)))
return time.Now()
})
t.AddStringFunc("droplets", func(h *virtpb.Hypervisor) string {
var totalDroplets int
var totalUnknownDroplets int
// dur := time.Since(h.lastpoll)
// tmp := virtpb.FormatDuration(dur)
// fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp)
hm := me.hmap[h]
for name, _ := range hm.lastDroplets {
totalDroplets += 1
d := me.cluster.FindDropletByName(name)
if d == nil {
totalUnknownDroplets += 1
}
}
// log.Printf("Total Droplets %d total libvirt only droplets = %d\n", totalDroplets, totalUnknownDroplets)
return fmt.Sprintf("%d", totalDroplets)
})
// display the protobuf
dw.TB = t
f := func(e *virtpb.Hypervisor) {
log.Info("std HypervisorWindow() something here", e.Hostname)
// m.Enabled = true
}
dw.TB.Custom(f)
dw.TB.ShowTable()
}