Compare commits
185 Commits
Author | SHA1 | Date |
---|---|---|
|
dbcd3b5686 | |
|
f67c81d499 | |
|
f39c7d4a51 | |
|
65563eb8e2 | |
|
bf01596f30 | |
|
d261a220df | |
|
7fd5089917 | |
|
82ebc25936 | |
|
4faca63da8 | |
|
c8a50fbb18 | |
|
4332b3d31a | |
|
2c5701eeca | |
|
a24448a9d4 | |
|
4121e66e01 | |
|
a4dd085a47 | |
|
d3f809b25d | |
|
8eda4cf2da | |
|
3cd1f64d15 | |
|
69ee3b95d0 | |
|
16558e1b72 | |
|
1fd6b1d36d | |
|
a7e639cdb0 | |
|
03b03cb197 | |
|
a97379d76f | |
|
46472fa868 | |
|
599fe4251f | |
|
50d16b3d86 | |
|
19b1588512 | |
|
0a452c005b | |
|
7ee465da56 | |
|
8517dbc948 | |
|
d2d04da122 | |
|
e0970840e2 | |
|
89f870f1f9 | |
|
9449b5699e | |
|
68bf08bd6c | |
|
f9515280cf | |
|
30a5eb56a2 | |
|
01d7e92cdb | |
|
73196c3231 | |
|
19e29d21d7 | |
|
affb055c56 | |
|
6e948e0736 | |
|
3a62f10d20 | |
|
69b0d4c013 | |
|
522723e946 | |
|
32e2753007 | |
|
9f9a52312e | |
|
6e111ba862 | |
|
55a3ed7187 | |
|
4dd0f0eaba | |
|
70cc9944ad | |
|
5ea2e5999b | |
|
bd2ddb278c | |
|
0697375d44 | |
|
a4a2d7a01f | |
|
4883c26547 | |
|
1e03cd4377 | |
|
3c9d1d6f1d | |
|
0da809ae42 | |
|
80ff65c6d2 | |
|
84aeec7dde | |
|
bd1ed6f513 | |
|
173e9ef1f4 | |
|
1c2bdfa398 | |
|
ea7cf0e744 | |
|
1321b8566a | |
|
0f1bdad780 | |
|
15fe83812b | |
|
638539c840 | |
|
71e0065240 | |
|
301fe567e2 | |
|
2c1c3482fe | |
|
8b588eeba5 | |
|
173520b42e | |
|
0a28c45a6c | |
|
a10dab96ff | |
|
7fa6c2e2de | |
|
70634ec66e | |
|
0076d3cb2d | |
|
720c2e6576 | |
|
83faa62e18 | |
|
ca9ad75283 | |
|
c1d86fc324 | |
|
26cd0f7709 | |
|
d51c4627f7 | |
|
efc3032d83 | |
|
3562fc780e | |
|
b28ae96cd4 | |
|
b4ef8b76b1 | |
|
3c520003ed | |
|
913b18737b | |
|
fb51876e8e | |
|
4a58352ec2 | |
|
e6ea90f8de | |
|
22111183a5 | |
|
41673f3170 | |
|
eacf3b8bef | |
|
bf52632cb7 | |
|
2a18f506c7 | |
|
410015c33e | |
|
c8a69bdb73 | |
|
9d08114b93 | |
|
8724a07b0d | |
|
c26699571d | |
|
42d54a418d | |
|
c12d3a2dbb | |
|
de5f5c6a85 | |
|
3f7171fff2 | |
|
8fc2fbd9c9 | |
|
d38865a6cf | |
|
7288595efc | |
|
212b582060 | |
|
d948581300 | |
|
71f83d4000 | |
|
d0767eb984 | |
|
5d1729f99b | |
|
cec509ae7f | |
|
3c1efcba0e | |
|
7837182d53 | |
|
2e8281d067 | |
|
6d5c5c6072 | |
|
419ae0ad5f | |
|
34b6891507 | |
|
a50f387b96 | |
|
161bfe395e | |
|
b4518e8b82 | |
|
9020957ee7 | |
|
0dc393896c | |
|
11f0cd97b5 | |
|
fd3e14bcc6 | |
|
30884aff3b | |
|
f42091a2ce | |
|
7320fceb8d | |
|
61b954ecca | |
|
0fd0283372 | |
|
3893ac7e3d | |
|
b35c04414a | |
|
030af1bcfb | |
|
a5eee861ea | |
|
9b94785cd2 | |
|
4d43c36db5 | |
|
15f48a01ab | |
|
cf79357bba | |
|
b6dd67b73d | |
|
7cc0bd9b2c | |
|
8dc0bf6fac | |
|
89a43a46bf | |
|
9451e162a0 | |
|
7af7c876e4 | |
|
fbd0d35660 | |
|
34edf97565 | |
|
03eec14619 | |
|
da2a24c549 | |
|
a3cffbf1c0 | |
|
9528d4c0fb | |
|
a4c54d6483 | |
|
178974e42f | |
|
fb722a3dc9 | |
|
9518e70d7a | |
|
7b3e24740d | |
|
1effa9c745 | |
|
fea819956f | |
|
78fbc9631c | |
|
63e750ce89 | |
|
aa742d099d | |
|
acdd6e9c5b | |
|
2df36637bf | |
|
d13ac244ed | |
|
37a053dae9 | |
|
d09f4e25c2 | |
|
3ce3a0d7f6 | |
|
f15148b63e | |
|
e4c089f70e | |
|
07da0fa0ee | |
|
a1593b0b88 | |
|
57fdc99855 | |
|
23e9319afb | |
|
19880a81f3 | |
|
b77eb0db4e | |
|
c8d734e290 | |
|
0b1c4f92cd | |
|
6de8f66794 | |
|
8a1321169e | |
|
367addedff |
|
@ -2,5 +2,7 @@
|
|||
go.mod
|
||||
go.sum
|
||||
|
||||
files/
|
||||
|
||||
virtigo
|
||||
virtigod
|
||||
|
|
78
Makefile
78
Makefile
|
@ -1,19 +1,53 @@
|
|||
.PHONY: build
|
||||
|
||||
VERSION = $(shell git describe --tags)
|
||||
BUILDTIME = $(shell date +%Y.%m.%d)
|
||||
|
||||
# create the go.mod and go.sum if this is a brand new repo
|
||||
# REDOMOD = $(shell if [ -e go.mod ]; then echo go.mod; else echo no go mod; fi)
|
||||
REDOMOD = $(shell if [ -e go.sum ]; then echo go.sum exists; else GO111MODULE= go mod init; GO111MODULE= go mod tidy; fi)
|
||||
|
||||
all:
|
||||
GO111MODULE=off go build -v -ldflags "-X main.Version=${VERSION} -X gui.GUIVERSION=${VERSION}"
|
||||
./virtigo --version
|
||||
./virtigo
|
||||
all: install
|
||||
@echo build worked
|
||||
virtigo list droplets
|
||||
virtigo list droplets --on
|
||||
virtigo droplet show --name check.lab.wit.org
|
||||
virtigo droplet start --name check.lab.wit.org
|
||||
|
||||
build: goimports vet
|
||||
GO111MODULE=off go build \
|
||||
-ldflags "-X main.VERSION=${VERSION} -X main.BUILDTIME=${BUILDTIME} -X gui.GUIVERSION=${VERSION}"
|
||||
|
||||
verbose: goimports vet
|
||||
GO111MODULE=off go build -v -x \
|
||||
-ldflags "-X main.VERSION=${VERSION} -X main.BUILDTIME=${BUILDTIME} -X gui.GUIVERSION=${VERSION}"
|
||||
|
||||
install: goimports vet
|
||||
GO111MODULE=off go install -v -x \
|
||||
-ldflags "-X main.VERSION=${VERSION} -X main.BUILDTIME=${BUILDTIME} -X gui.GUIVERSION=${VERSION}"
|
||||
|
||||
andlabs: verbose
|
||||
./virtigo --gui andlabs
|
||||
|
||||
# makes a .deb package
|
||||
debian:
|
||||
rm -f ~/incoming/virtigo*deb
|
||||
go-deb --no-gui --repo go.wit.com/apps/virtigo
|
||||
|
||||
xml-add:
|
||||
./virtigo --add-xml /home/nfs3/xml/*.xml
|
||||
# ./virtigo --add-xml /etc/libvirt/qemu/*xml
|
||||
./virtigo --libvirt ~/libvirt/*.xml --xml-ignore-disk=true
|
||||
|
||||
start-all-droplets:
|
||||
xml-add-save:
|
||||
./virtigo --libvirt ~/libvirt/*.xml --xml-ignore-disk=true --save
|
||||
|
||||
start-pihole.wit.com: build
|
||||
rm -f /tmp/blahcarr.xml /tmp/pihole.wit.com.xml
|
||||
./virtigo --start pihole.wit.com
|
||||
./virtigo --libvirt /tmp/pihole.wit.com.xml
|
||||
|
||||
start-pihole.wit.com-http:
|
||||
curl --silent http://localhost:8080/start?hostname=pihole.wit.com
|
||||
|
||||
old-start-all-droplets:
|
||||
curl --silent http://localhost:8080/start?start=git.wit.org
|
||||
curl --silent http://localhost:8080/start?start=go.wit.com
|
||||
curl --silent http://localhost:8080/start?start=rdate.wit.com
|
||||
|
@ -45,6 +79,10 @@ release-build:
|
|||
goimports:
|
||||
goimports -w *.go
|
||||
|
||||
vet:
|
||||
@GO111MODULE=off go vet
|
||||
@echo this go binary package builds okay
|
||||
|
||||
# remake the go.mod and go.sum files
|
||||
redomod:
|
||||
rm -f go.*
|
||||
|
@ -54,6 +92,7 @@ redomod:
|
|||
clean:
|
||||
rm -f go.*
|
||||
rm -f virtigo*
|
||||
go-mod-clean purge
|
||||
|
||||
# git clone the sources and all the golang dependancies into ~/go/src
|
||||
# if you don't have go-clone, you can get it from http://go.wit.com/
|
||||
|
@ -62,3 +101,26 @@ git-clone:
|
|||
go-clone --recursive --go-src --no-work go.wit.com/apps/virtigo
|
||||
go-clone --recursive --go-src --no-work go.wit.com/apps/gowebd
|
||||
go-clone --recursive --go-src --no-work go.wit.com/lib/daemons/virtigod
|
||||
|
||||
http-uptime:
|
||||
curl --silent http://localhost:8080/uptime
|
||||
|
||||
http-droplets:
|
||||
curl --silent http://localhost:8080/droplets
|
||||
|
||||
http-missing:
|
||||
curl --silent http://localhost:8080/missing
|
||||
|
||||
http-dumplibvirtxml:
|
||||
curl --silent http://localhost:8080//dumplibvirtxml
|
||||
|
||||
protogen:
|
||||
go-clone google.golang.org/protobuf
|
||||
cd ~/go/src/google.golang.org/protobuf/cmd/protoc-gen-go && go install
|
||||
|
||||
gocui: install
|
||||
virtigo --gui gocui --gui-verbose --gui-file ../../toolkits/gocui/gocui.so --admin
|
||||
# virtigo --gui gocui --gui-verbose --gui-file ../../toolkits/gocui/gocui.so --admin >/tmp/forge.log 2>&1
|
||||
|
||||
log:
|
||||
journalctl -f -xeu virtigod.service
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
virtigo: a control panel for your virtual machine cluster
|
||||
# virtigo: a control panel for your virtual machine cluster
|
||||
|
||||
This is an attempt to make something that should:
|
||||
There is no greater thrill for a linux sys admin than running your own cloud.
|
||||
|
||||
# This is an attempt to make something that should:
|
||||
|
||||
* Maintain the master list of virtual machines that should be running at all times
|
||||
* Work with a cluster of dom0 hypervisiors via libvirt and/or qemu
|
||||
|
@ -12,7 +14,7 @@ This is an attempt to make something that should:
|
|||
* Work in GUI mode (GTK/QT/etc) but ALSO the console (ncurses)
|
||||
* GPL'd with the intent for use with homelab and personal hobbyists
|
||||
|
||||
Notes & Goals:
|
||||
# Notes & Goals:
|
||||
|
||||
* Be super easy to use.
|
||||
* Automatically map access to serial and graphical consoles
|
||||
|
@ -26,7 +28,7 @@ Notes & Goals:
|
|||
* Automatic live migration to decommission nodes
|
||||
* Implement iptable rules via the virtigo daemon
|
||||
|
||||
Inspired by:
|
||||
# Inspired by:
|
||||
|
||||
* kvm
|
||||
* virt-manager
|
234
addDroplet.go
234
addDroplet.go
|
@ -1,234 +0,0 @@
|
|||
// Copyright 2024 WIT.COM Inc Licensed GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
pb "go.wit.com/lib/protobuf/virtbuf"
|
||||
"go.wit.com/log"
|
||||
"libvirt.org/go/libvirtxml"
|
||||
)
|
||||
|
||||
func addDomainDroplet(domcfg *libvirtxml.Domain) (*DropletT, error) {
|
||||
if domcfg == nil {
|
||||
return nil, errors.New("domcfg == nil")
|
||||
}
|
||||
|
||||
d, _ := findDomain(domcfg)
|
||||
if d != nil {
|
||||
return d, errors.New(d.pb.Hostname + " droplet exists. need to update instead")
|
||||
}
|
||||
// this is a new unknown droplet (not in the config file)
|
||||
d = new(DropletT)
|
||||
|
||||
d.pb = me.cluster.AddDroplet(domcfg.UUID, domcfg.Name, 2, 2*1024*1024)
|
||||
d.pb.StartState = "off"
|
||||
|
||||
me.droplets = append(me.droplets, d)
|
||||
me.changed = true
|
||||
|
||||
if updateDroplet(d, domcfg) {
|
||||
if me.changed {
|
||||
log.Info("updateDroplet() worked. droplet changed")
|
||||
} else {
|
||||
log.Verbose("updateDroplet() worked. nothing changed")
|
||||
}
|
||||
} else {
|
||||
log.Info("updateDroplet() failed for", d.pb.Hostname)
|
||||
return d, errors.New("update failed for " + domcfg.Name)
|
||||
}
|
||||
log.Info("added new droplet", domcfg.Name, domcfg.UUID)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func findDomain(domcfg *libvirtxml.Domain) (*DropletT, error) {
|
||||
var found *DropletT
|
||||
if domcfg == nil {
|
||||
return nil, errors.New("domcfg == nil")
|
||||
}
|
||||
|
||||
for _, d := range me.droplets {
|
||||
if d.pb.Hostname == domcfg.Name {
|
||||
if d.pb.Uuid != domcfg.UUID {
|
||||
fmt.Println("CHANGED UUID", d.pb.Uuid, domcfg.UUID)
|
||||
d.pb.Uuid = domcfg.UUID
|
||||
me.changed = true
|
||||
}
|
||||
if found == nil {
|
||||
found = d
|
||||
} else {
|
||||
fmt.Println("FOUND TWICE", d.pb.Uuid, domcfg.Name, domcfg.UUID)
|
||||
return d, errors.New("Found Twice")
|
||||
}
|
||||
|
||||
}
|
||||
if d.pb.Uuid == domcfg.UUID {
|
||||
if d.pb.Hostname != domcfg.Name {
|
||||
fmt.Println("FOUND UUID WITH MIS-MATCHED NAME", domcfg.Name, domcfg.UUID)
|
||||
return d, errors.New("UUID with mis-matched names")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return found, nil
|
||||
}
|
||||
|
||||
func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) bool {
|
||||
var ok bool = true
|
||||
|
||||
if d == nil {
|
||||
return false
|
||||
}
|
||||
if domcfg == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !updateMemory(d, domcfg) {
|
||||
log.Info("updateMemory() failed")
|
||||
ok = false
|
||||
}
|
||||
|
||||
// check cpus
|
||||
if d.pb.Cpus != int64(domcfg.VCPU.Value) {
|
||||
fmt.Printf("cpus changed. VCPU = %+v\n", domcfg.VCPU)
|
||||
d.pb.Cpus = int64(domcfg.VCPU.Value)
|
||||
me.changed = true
|
||||
}
|
||||
|
||||
// check type
|
||||
if domcfg.Type != "kvm" {
|
||||
fmt.Printf("not kvm. Virt type == %s\n", domcfg.Type)
|
||||
ok = false
|
||||
}
|
||||
|
||||
if !updateNetwork(d, domcfg) {
|
||||
log.Info("updateNetwork() failed")
|
||||
ok = false
|
||||
}
|
||||
|
||||
if !updateDisk(d, domcfg) {
|
||||
log.Info("updateDisk() failed")
|
||||
ok = false
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// returns false if something went wrong
|
||||
func updateMemory(d *DropletT, domcfg *libvirtxml.Domain) bool {
|
||||
if (d == nil) || (domcfg == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
// check memory
|
||||
if domcfg.Memory.Unit == "KiB" {
|
||||
var m int64
|
||||
m = int64(domcfg.Memory.Value * 1024)
|
||||
if d.pb.Memory != m {
|
||||
d.pb.Memory = m
|
||||
me.changed = true
|
||||
fmt.Printf("Memory changed %d, %d %s\n", d.pb.Memory, domcfg.Memory.Value, domcfg.Memory.Unit)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if domcfg.Memory.Unit == "MiB" {
|
||||
var m int64
|
||||
m = int64(domcfg.Memory.Value * 1024 * 1024)
|
||||
if d.pb.Memory != m {
|
||||
d.pb.Memory = m
|
||||
me.changed = true
|
||||
fmt.Printf("Memory changed %d, %d %s\n", d.pb.Memory, domcfg.Memory.Value, domcfg.Memory.Unit)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
if domcfg.Memory.Unit == "GiB" {
|
||||
var m int64
|
||||
m = int64(domcfg.Memory.Value * 1024 * 1024 * 1024)
|
||||
if d.pb.Memory != m {
|
||||
d.pb.Memory = m
|
||||
me.changed = true
|
||||
fmt.Printf("Memory changed %d, %d %s\n", d.pb.Memory, domcfg.Memory.Value, domcfg.Memory.Unit)
|
||||
}
|
||||
return true
|
||||
}
|
||||
fmt.Println("Unknown Memory Unit", domcfg.Memory.Unit)
|
||||
return false
|
||||
}
|
||||
|
||||
// returns false if something went wrong
|
||||
func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) bool {
|
||||
if (d == nil) || (domcfg == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
var macs []string
|
||||
// Iterate over the network interfaces and print the MAC addresses
|
||||
for _, iface := range domcfg.Devices.Interfaces {
|
||||
if iface.MAC != nil {
|
||||
// iface.MAC.Address = "aa:bb:aa:bb:aa:ff"
|
||||
// fmt.Printf("MAC Address: %+v\n", iface.MAC)
|
||||
// log.Info("Interface:", iface.Target, "MAC Address:", iface.MAC.Address)
|
||||
// fmt.Printf("source: %+v\n", iface.Source)
|
||||
macs = append(macs, iface.MAC.Address)
|
||||
} else {
|
||||
fmt.Printf("Interface: %s, MAC Address: not available\n", iface.Target.Dev)
|
||||
}
|
||||
}
|
||||
|
||||
for _, mac := range macs {
|
||||
var found bool = false
|
||||
for i, eth := range d.pb.Networks {
|
||||
if eth.Mac == mac {
|
||||
log.Verbose("OKAY. FOUND ETH:", i, eth.Mac, eth.Name)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
var eth *pb.Network
|
||||
eth = new(pb.Network)
|
||||
eth.Mac = mac
|
||||
eth.Name = "worldbr"
|
||||
d.pb.Networks = append(d.pb.Networks, eth)
|
||||
me.changed = true
|
||||
}
|
||||
}
|
||||
|
||||
log.Verbose("mac addrs:", macs)
|
||||
return true
|
||||
}
|
||||
|
||||
// returns false if something went wrong
|
||||
func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) bool {
|
||||
if (d == nil) || (domcfg == nil) {
|
||||
return false
|
||||
}
|
||||
for _, disk := range domcfg.Devices.Disks {
|
||||
var t *libvirtxml.DomainDiskSourceFile
|
||||
t = disk.Source.File
|
||||
filename := t.File
|
||||
if filename == "" {
|
||||
fmt.Println("No disk source file found.")
|
||||
continue
|
||||
}
|
||||
|
||||
var found bool = false
|
||||
for _, disk := range d.pb.Disks {
|
||||
if disk.Filename == filename {
|
||||
log.Verbose("OKAY. FOUND filename", filename)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
var disk *pb.Disk
|
||||
disk = new(pb.Disk)
|
||||
disk.Filename = filename
|
||||
d.pb.Disks = append(d.pb.Disks, disk)
|
||||
log.Info("New filename", filename)
|
||||
me.changed = true
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
83
argv.go
83
argv.go
|
@ -1,6 +1,11 @@
|
|||
package main
|
||||
|
||||
import "go.wit.com/log"
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
/*
|
||||
this parses the command line arguements
|
||||
|
@ -11,31 +16,58 @@ import "go.wit.com/log"
|
|||
var argv args
|
||||
|
||||
type args struct {
|
||||
Dir string `arg:"--dir" help:"defaults to ~/.config/virtigo/"`
|
||||
Port int `arg:"--port" default:"8080" help:"specify a different default port"`
|
||||
Hosts []string `arg:"--hosts" help:"hosts to connect to"`
|
||||
Uptime bool `arg:"--uptime" default:"true" help:"allow uptime checks for things like Kuma"`
|
||||
Daemon bool `arg:"--daemon" help:"run in daemon mode"`
|
||||
Xml []string `arg:"--add-xml" help:"add libvirt xml files"`
|
||||
Save bool `arg:"--save" default:"false" help:"save xml changes to the protobuf values"`
|
||||
List *ListCmd `arg:"subcommand:list" help:"list things"`
|
||||
Droplet *DropletCmd `arg:"subcommand:droplet" help:"send events to a droplet"`
|
||||
Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"`
|
||||
Server string `arg:"env:VIRTIGO_SERVER" help:"what virtigo cluster to connect to"`
|
||||
Localhost bool `arg:"--localhost" help:"use the local libvirt"`
|
||||
Daemon bool `arg:"--daemon" help:"run as a daemon"`
|
||||
Verbose bool `arg:"--verbose" help:"talk more"`
|
||||
Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
|
||||
Xml []string `arg:"--libvirt" help:"import qemu xml files: --libvirt /etc/libvirt/qemu/*.xml"`
|
||||
Admin bool `arg:"--admin" help:"enter admin mode"`
|
||||
Bash bool `arg:"--bash" help:"generate bash completion"`
|
||||
BashAuto []string `arg:"--auto-complete" help:"todo: move this to go-arg"`
|
||||
}
|
||||
|
||||
type EmptyCmd struct {
|
||||
}
|
||||
|
||||
type testCmd string
|
||||
|
||||
type ListCmd struct {
|
||||
Droplets *EmptyCmd `arg:"subcommand:droplets" help:"list droplets"`
|
||||
Hypervisors *EmptyCmd `arg:"subcommand:hypervisors" help:"list hypervisors"`
|
||||
On bool `arg:"--on" help:"only show things that are on"`
|
||||
}
|
||||
|
||||
type DropletCmd struct {
|
||||
Start *EmptyCmd `arg:"subcommand:start" help:"start droplet"`
|
||||
Stop *EmptyCmd `arg:"subcommand:stop" help:"stop droplet"`
|
||||
Show *EmptyCmd `arg:"subcommand:show" help:"show droplet"`
|
||||
Console *EmptyCmd `arg:"subcommand:console" help:"open serial console"`
|
||||
VNC *EmptyCmd `arg:"subcommand:vnc" help:"open VNC console"`
|
||||
Spice *EmptyCmd `arg:"subcommand:spice" help:"open spiceconsole"`
|
||||
Name string `arg:"--name" help:"what droplet to start"`
|
||||
}
|
||||
|
||||
func (a args) Description() string {
|
||||
return `
|
||||
virtigo will help control your cluster of hypervisiors
|
||||
virtigo: control your cluster
|
||||
|
||||
go install go.wit.com/apps/virtigo@latest
|
||||
This maintains a master list of all your vm's (aka 'droplets')
|
||||
in your homelab cloud. You can import libvirt xml files.
|
||||
This app talks to your hypervisors via the virtigod daemon.
|
||||
`
|
||||
}
|
||||
|
||||
func (args) Version() string {
|
||||
return "virtigo " + Version
|
||||
return ARGNAME + " " + VERSION + " Built on " + BUILDTIME
|
||||
}
|
||||
|
||||
var INFO *log.LogFlag
|
||||
var POLL *log.LogFlag
|
||||
var WARN *log.LogFlag
|
||||
var SPEW *log.LogFlag
|
||||
var EVENT *log.LogFlag
|
||||
|
||||
func init() {
|
||||
|
@ -44,6 +76,31 @@ func init() {
|
|||
|
||||
INFO = log.NewFlag("INFO", false, full, short, "general virtigo")
|
||||
POLL = log.NewFlag("POLL", false, full, short, "virtigo polling")
|
||||
SPEW = log.NewFlag("SPEW", true, full, short, "bad things")
|
||||
WARN = log.NewFlag("WARN", true, full, short, "bad things")
|
||||
EVENT = log.NewFlag("EVENT", true, full, short, "hypeprvisor/droplet events")
|
||||
}
|
||||
|
||||
/*
|
||||
handles shell autocomplete
|
||||
*/
|
||||
|
||||
func (a args) DoAutoComplete(argv []string) {
|
||||
switch argv[0] {
|
||||
case "list":
|
||||
fmt.Println("droplets hypervisors")
|
||||
case "droplet":
|
||||
fmt.Println("start stop")
|
||||
case "devel":
|
||||
fmt.Println("--force")
|
||||
case "master":
|
||||
fmt.Println("")
|
||||
case "verify":
|
||||
fmt.Println("user devel master")
|
||||
default:
|
||||
if argv[0] == ARGNAME {
|
||||
// list the subcommands here
|
||||
fmt.Println("--bash list droplet")
|
||||
}
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/bash -x
|
||||
|
||||
# this is the systemd control file
|
||||
mkdir -p files/usr/bin/
|
||||
cp ../virtigoctl/virtigoctl files/usr/bin/
|
111
configfiles.go
111
configfiles.go
|
@ -1,111 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
pb "go.wit.com/lib/protobuf/virtbuf"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
var ErrorNoFile error = errors.New("missing file")
|
||||
var ErrorParseJSON error = errors.New("invalid json")
|
||||
var ErrorParseXML error = errors.New("invalid xml")
|
||||
|
||||
// something is wrong somewhere and sometimes the
|
||||
// protobuf json files get written out with garbage
|
||||
func cfgfile() {
|
||||
err := readConfigFile("virtigo.json")
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if err == ErrorParseJSON {
|
||||
os.Exit(-1)
|
||||
}
|
||||
err = readConfigFile("virtigo.json.last")
|
||||
if err == nil {
|
||||
log.Info("read json failed", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
if err == ErrorNoFile {
|
||||
log.Info("no config file created yet", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
|
||||
func readConfigFile(filename string) error {
|
||||
me.cluster = new(pb.Cluster)
|
||||
fullname := filepath.Join(os.Getenv("VIRTIGO_HOME"), filename)
|
||||
pfile, err := os.ReadFile(fullname)
|
||||
if err != nil {
|
||||
log.Info("open config file :", err)
|
||||
return err
|
||||
}
|
||||
err = me.cluster.UnmarshalJSON(pfile)
|
||||
if err != nil {
|
||||
log.Info("read json failed", err)
|
||||
os.Exit(-1)
|
||||
return err
|
||||
}
|
||||
|
||||
// initialize each hypervisor
|
||||
for _, pbh := range me.cluster.Hypervisors {
|
||||
h := findHypervisor(pbh.Hostname)
|
||||
if h != nil {
|
||||
continue
|
||||
}
|
||||
// this is a new unknown droplet (not in the config file)
|
||||
h = new(HyperT)
|
||||
h.pb = pbh
|
||||
|
||||
h.lastpoll = time.Now()
|
||||
|
||||
me.hypers = append(me.hypers, h)
|
||||
log.Log(EVENT, "config new hypervisors", h.pb.Hostname)
|
||||
}
|
||||
|
||||
// initialize values for each droplet
|
||||
for _, pbd := range me.cluster.Droplets {
|
||||
d := findDroplet(pbd.Hostname)
|
||||
if d != nil {
|
||||
continue
|
||||
}
|
||||
// this is a new unknown droplet (not in the config file)
|
||||
d = new(DropletT)
|
||||
d.pb = pbd
|
||||
me.droplets = append(me.droplets, d)
|
||||
log.Log(EVENT, "config new droplet", d.pb.Hostname, d.pb.StartState, d.pb.PreferredHypervisor)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeConfigFile() {
|
||||
fullname := filepath.Join(os.Getenv("VIRTIGO_HOME"), "virtigo.json")
|
||||
cfgfile, err := os.OpenFile(fullname, os.O_RDWR|os.O_CREATE, 0666)
|
||||
defer cfgfile.Close()
|
||||
if err != nil {
|
||||
log.Info("open config file :", err)
|
||||
return
|
||||
}
|
||||
json := me.cluster.FormatJSON()
|
||||
fmt.Fprintln(cfgfile, json)
|
||||
log.Info("Write:", fullname, "OK")
|
||||
}
|
||||
|
||||
func writeConfigFileDroplets() {
|
||||
fullname := filepath.Join(os.Getenv("VIRTIGO_HOME"), "droplets.text")
|
||||
cfgfile, err := os.OpenFile(fullname, os.O_RDWR|os.O_CREATE, 0666)
|
||||
defer cfgfile.Close()
|
||||
if err != nil {
|
||||
log.Info("open config file :", err)
|
||||
return
|
||||
}
|
||||
// text := me.cluster.Droplets.FormatTEXT()
|
||||
text := me.cluster.FormatTEXT()
|
||||
fmt.Fprintln(cfgfile, text)
|
||||
log.Info("Write:", fullname, "OK")
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
Source: virtigo
|
||||
Build-Depends: golang
|
||||
Package: virtigo
|
||||
Maintainer: Jeff Carr <jcarr@wit.com>
|
||||
Architecture: amd64
|
||||
Recommends: virtigod
|
||||
Depends: gus, remmina, remmina-plugin-spice
|
||||
URL: https://go.wit.com/apps/virtigo
|
||||
Description: control your virtual machines in your cluster
|
||||
lets you start,stop, etc virtual machines
|
||||
converts libvirt xml into protobuf definitions
|
||||
communicates with hypervisors with protobuf's
|
||||
uses virsh to start & stop
|
||||
maintains unique mac address table
|
||||
servers cluster status on :8080 for uptime checking like Kuma
|
|
@ -0,0 +1,416 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
// An app to submit patches for the 30 GO GUI repos
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/user"
|
||||
"time"
|
||||
|
||||
"go.wit.com/gui"
|
||||
"go.wit.com/lib/gadgets"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
// refresh the windows & tables the user has open
|
||||
func (admin *adminT) refresh() error {
|
||||
if argv.Verbose {
|
||||
log.Info("virtigo scan here")
|
||||
}
|
||||
|
||||
if admin.url == nil {
|
||||
log.Info("admin url == nil")
|
||||
return fmt.Errorf("admin url == nil")
|
||||
}
|
||||
|
||||
msg := []byte(`{"message": "Hello"}`)
|
||||
|
||||
// display the uptime
|
||||
if data, err := postData(admin.url.String()+"/uptime", msg); err != nil {
|
||||
log.Info("/uptime Error:", err)
|
||||
} else {
|
||||
log.Info("Response:", string(data))
|
||||
admin.uptime.SetText(string(data))
|
||||
}
|
||||
|
||||
// update the droplet list
|
||||
if data, err := postData(admin.url.String()+"/DropletsPB", msg); err != nil {
|
||||
log.Info("/DropletsPB Error:", err)
|
||||
} else {
|
||||
fmt.Println("DropletsPB Response len:", len(data))
|
||||
admin.cluster.Droplets = new(virtpb.Droplets)
|
||||
if err := admin.cluster.Droplets.Unmarshal(data); err != nil {
|
||||
fmt.Println("droplets marshal failed", err)
|
||||
return err
|
||||
}
|
||||
fmt.Println("Droplet len=", admin.cluster.Droplets.Len())
|
||||
}
|
||||
|
||||
// update the hypervisor list
|
||||
if data, err := postData(admin.url.String()+"/HypervisorsPB", msg); err != nil {
|
||||
log.Info("Error:", err)
|
||||
} else {
|
||||
fmt.Println("HypervisorsPB Response len:", len(data))
|
||||
admin.cluster.Hypervisors = new(virtpb.Hypervisors)
|
||||
if err := admin.cluster.Hypervisors.Unmarshal(data); err != nil {
|
||||
fmt.Println("hypervisors marshal failed", err)
|
||||
return err
|
||||
}
|
||||
fmt.Println("Hypervisors len=", admin.cluster.Hypervisors.Len())
|
||||
}
|
||||
|
||||
// update the events list
|
||||
if data, err := postData(admin.url.String()+"/EventsPB", msg); err != nil {
|
||||
log.Info("Error:", err)
|
||||
} else {
|
||||
fmt.Println("EventsPB Response len:", len(data))
|
||||
admin.cluster.Events = new(virtpb.Events)
|
||||
if err := admin.cluster.Events.Unmarshal(data); err != nil {
|
||||
fmt.Println("events marshal failed", err)
|
||||
return err
|
||||
}
|
||||
fmt.Println("Events len=", admin.cluster.Events.Len())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var client *http.Client
|
||||
|
||||
func doLocalhostAdminGui() *adminT {
|
||||
admin := new(adminT)
|
||||
|
||||
admin.uptime = me.gwin.Group.NewLabel("uptime")
|
||||
|
||||
grid := me.gwin.Group.RawGrid()
|
||||
|
||||
grid.NewButton("show hypervisors", func() {
|
||||
if admin.cluster.Hypervisors == nil {
|
||||
log.Info("hypervisors not initialized")
|
||||
return
|
||||
}
|
||||
log.Info("Hypervisors len=", admin.cluster.Hypervisors.Len())
|
||||
admin.hwin = newHypervisorsWindow()
|
||||
admin.hwin.doStdHypervisors(admin.cluster.Hypervisors)
|
||||
admin.hwin.win.Custom = func() {
|
||||
log.Info("hiding table window")
|
||||
}
|
||||
})
|
||||
|
||||
grid.NewButton("droplets", func() {
|
||||
if admin.cluster.Droplets == nil {
|
||||
log.Info("droplets not initialized")
|
||||
return
|
||||
}
|
||||
admin.dwin = newDropletsWindow(admin)
|
||||
admin.dwin.win.Custom = func() {
|
||||
log.Info("hiding droplet table window")
|
||||
}
|
||||
var found *virtpb.Droplets
|
||||
found = virtpb.NewDroplets()
|
||||
all := admin.cluster.Droplets.All()
|
||||
for all.Scan() {
|
||||
vm := all.Next()
|
||||
if vm.Current.State != virtpb.DropletState_ON {
|
||||
continue
|
||||
}
|
||||
found.Append(vm)
|
||||
}
|
||||
admin.dwin.doActiveDroplets(found)
|
||||
})
|
||||
|
||||
grid.NewButton("events", func() {
|
||||
if admin.cluster.Events == nil {
|
||||
log.Info("events are not initialized")
|
||||
return
|
||||
}
|
||||
log.Info("Events len=", admin.cluster.Events.Len())
|
||||
admin.ewin = newEventsWindow()
|
||||
admin.ewin.doStdEvents(admin.cluster.Events)
|
||||
admin.ewin.win.Custom = func() {
|
||||
log.Info("hiding table window")
|
||||
}
|
||||
})
|
||||
|
||||
grid.NextRow()
|
||||
|
||||
grid.NewButton("refresh", func() {
|
||||
admin.refresh()
|
||||
})
|
||||
|
||||
return admin
|
||||
}
|
||||
|
||||
func doAdminGui() {
|
||||
// Initialize a persistent client with a custom Transport
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
|
||||
},
|
||||
Timeout: 10 * time.Second, // Set a reasonable timeout
|
||||
}
|
||||
|
||||
me.gwin = gadgets.NewGenericWindow("Virtigo: (run your cluster)", "")
|
||||
me.gwin.Custom = func() {
|
||||
log.Warn("Main window close")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
me.cmap = make(map[*virtpb.Cluster]*adminT)
|
||||
for c := range me.clusters.IterAll() {
|
||||
a := new(adminT)
|
||||
me.cmap[c] = a
|
||||
log.Info("found in the config file", c.URL[0])
|
||||
a.makeClusterGroup(c)
|
||||
}
|
||||
|
||||
// sit here forever refreshing the GUI
|
||||
for {
|
||||
// admin.refresh()
|
||||
log.Info("todo: refresh() protobufs here")
|
||||
time.Sleep(90 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func (admin *adminT) doAdminGui() {
|
||||
// Initialize a persistent client with a custom Transport
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
|
||||
},
|
||||
Timeout: 10 * time.Second, // Set a reasonable timeout
|
||||
}
|
||||
|
||||
win := gadgets.NewGenericWindow("Virtigo: (run your cluster)", "localhost")
|
||||
win.Custom = func() {
|
||||
log.Warn("Main window close")
|
||||
os.Exit(0)
|
||||
}
|
||||
me.gwin = win
|
||||
|
||||
admin.uptime = win.Group.NewLabel("uptime")
|
||||
|
||||
grid := win.Group.RawGrid()
|
||||
|
||||
grid.NewButton("show hypervisors", func() {
|
||||
if admin.cluster.Hypervisors == nil {
|
||||
log.Info("hypervisors not initialized")
|
||||
return
|
||||
}
|
||||
log.Info("Hypervisors len=", admin.cluster.Hypervisors.Len())
|
||||
admin.hwin = newHypervisorsWindow()
|
||||
admin.hwin.doStdHypervisors(admin.cluster.Hypervisors)
|
||||
admin.hwin.win.Custom = func() {
|
||||
log.Info("hiding table window")
|
||||
}
|
||||
})
|
||||
|
||||
grid.NewButton("droplets", func() {
|
||||
if admin.cluster.Droplets == nil {
|
||||
log.Info("droplets not initialized")
|
||||
return
|
||||
}
|
||||
admin.dwin = newDropletsWindow(admin)
|
||||
admin.dwin.win.Custom = func() {
|
||||
log.Info("hiding droplet table window")
|
||||
}
|
||||
var found *virtpb.Droplets
|
||||
found = virtpb.NewDroplets()
|
||||
all := admin.cluster.Droplets.All()
|
||||
for all.Scan() {
|
||||
vm := all.Next()
|
||||
if vm.Current.State != virtpb.DropletState_ON {
|
||||
continue
|
||||
}
|
||||
found.Append(vm)
|
||||
}
|
||||
admin.dwin.doActiveDroplets(found)
|
||||
})
|
||||
|
||||
grid.NewButton("events", func() {
|
||||
if admin.cluster.Events == nil {
|
||||
log.Info("events are not initialized")
|
||||
return
|
||||
}
|
||||
log.Info("Events len=", admin.cluster.Events.Len())
|
||||
admin.ewin = newEventsWindow()
|
||||
admin.ewin.doStdEvents(admin.cluster.Events)
|
||||
admin.ewin.win.Custom = func() {
|
||||
log.Info("hiding table window")
|
||||
}
|
||||
})
|
||||
|
||||
grid.NextRow()
|
||||
|
||||
grid.NewButton("refresh", func() {
|
||||
admin.refresh()
|
||||
})
|
||||
|
||||
grid.NewButton("test gui close", func() {
|
||||
gui.StandardExit()
|
||||
// okExit("admin close")
|
||||
})
|
||||
|
||||
me.cmap = make(map[*virtpb.Cluster]*adminT)
|
||||
for c := range me.clusters.IterAll() {
|
||||
a := new(adminT)
|
||||
me.cmap[c] = a
|
||||
log.Info("found in the config file", c.URL[0])
|
||||
a.makeClusterGroup(c)
|
||||
}
|
||||
|
||||
// sit here forever refreshing the GUI
|
||||
for {
|
||||
admin.refresh()
|
||||
time.Sleep(90 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func (admin *adminT) makeClusterGroup(c *virtpb.Cluster) {
|
||||
var err error
|
||||
admin.url, err = url.Parse(c.URL[0])
|
||||
if err != nil {
|
||||
badExit(err)
|
||||
}
|
||||
|
||||
if admin.cluster == nil {
|
||||
admin.cluster = new(virtpb.Cluster)
|
||||
admin.cluster.Name = c.Name
|
||||
admin.cluster.Uuid = c.Uuid
|
||||
}
|
||||
|
||||
name := c.GetName()
|
||||
if name == "" {
|
||||
name = admin.url.Hostname()
|
||||
}
|
||||
|
||||
group := me.gwin.Bottom.NewGroup(name)
|
||||
admin.uptime = group.NewLabel("uptime")
|
||||
|
||||
grid := group.RawGrid()
|
||||
|
||||
grid.NewButton("show hypervisors", func() {
|
||||
if admin.cluster.Hypervisors == nil {
|
||||
log.Info("hypervisors not initialized")
|
||||
return
|
||||
}
|
||||
log.Info("Hypervisors len=", admin.cluster.Hypervisors.Len())
|
||||
admin.hwin = newHypervisorsWindow()
|
||||
admin.hwin.doStdHypervisors(admin.cluster.Hypervisors)
|
||||
admin.hwin.win.Custom = func() {
|
||||
log.Info("hiding table window")
|
||||
}
|
||||
})
|
||||
|
||||
grid.NewButton("droplets", func() {
|
||||
if admin.cluster.Droplets == nil {
|
||||
log.Info("droplets not initialized")
|
||||
return
|
||||
}
|
||||
admin.dwin = newDropletsWindow(admin)
|
||||
admin.dwin.win.Custom = func() {
|
||||
log.Info("hiding droplet table window")
|
||||
}
|
||||
var found *virtpb.Droplets
|
||||
found = virtpb.NewDroplets()
|
||||
all := admin.cluster.Droplets.All()
|
||||
for all.Scan() {
|
||||
vm := all.Next()
|
||||
if vm.Current.State != virtpb.DropletState_ON {
|
||||
continue
|
||||
}
|
||||
found.Append(vm)
|
||||
}
|
||||
admin.dwin.doActiveDroplets(found)
|
||||
})
|
||||
|
||||
grid.NewButton("events", func() {
|
||||
if admin.cluster.Events == nil {
|
||||
log.Info("events are not initialized")
|
||||
return
|
||||
}
|
||||
log.Info("Events len=", admin.cluster.Events.Len())
|
||||
admin.ewin = newEventsWindow()
|
||||
admin.ewin.doStdEvents(admin.cluster.Events)
|
||||
admin.ewin.win.Custom = func() {
|
||||
log.Info("hiding table window")
|
||||
}
|
||||
})
|
||||
|
||||
grid.NewButton("refresh", func() {
|
||||
admin.refresh()
|
||||
})
|
||||
|
||||
if err := admin.refresh(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
grid.NewButton("save cluster.pb", func() {
|
||||
admin.cluster.ConfigSave()
|
||||
})
|
||||
}
|
||||
|
||||
func postData(url string, data []byte) ([]byte, error) {
|
||||
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
usr, _ := user.Current()
|
||||
req.Header.Set("author", usr.Username)
|
||||
req.Header.Set("Connection", "keep-alive") // Ensure keep-alive is used
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func (admin *adminT) postEvent(e *virtpb.Event) error {
|
||||
var result *virtpb.Event
|
||||
result = new(virtpb.Event)
|
||||
|
||||
msg, err := e.Marshal()
|
||||
if err != nil {
|
||||
log.Info("postEvent() marshal() failed", err, e)
|
||||
return err
|
||||
}
|
||||
|
||||
url := admin.url.String() + "/event"
|
||||
|
||||
// update the droplet list
|
||||
if data, err := postData(url, msg); err != nil {
|
||||
log.Info("postEvent() /event Error:", err)
|
||||
return err
|
||||
} else {
|
||||
if err := result.Unmarshal(data); err != nil {
|
||||
log.Println("postEvent() result marshal failed", err, "len(dat) =", len(data))
|
||||
log.Println("postEvent() data =", string(data))
|
||||
return err
|
||||
} else {
|
||||
log.Println("postEvent() result marshal worked on len(dat) =", len(data))
|
||||
log.Println("postEvent() result =", result.FormatTEXT())
|
||||
}
|
||||
}
|
||||
if result.Error != "" {
|
||||
return fmt.Errorf("%s", result.Error)
|
||||
}
|
||||
log.Printf("Event worked to %s uuid=%s\n", url, result.DropletUuid)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,154 @@
|
|||
// Copyright 2024 WIT.COM Inc Licensed GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/lib/virtigolib"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func doDaemon() error {
|
||||
// set defaults
|
||||
me.unstable = time.Now() // initialize the grid as unstable
|
||||
me.changed = false
|
||||
me.hmap = make(map[*virtpb.Hypervisor]*HyperT)
|
||||
|
||||
// how long a droplet can be missing until it's declared dead
|
||||
me.unstableTimeout = 17 * time.Second
|
||||
me.missingDropletTimeout = time.Minute // not sure the difference between these values
|
||||
|
||||
// how often to poll the hypervisors
|
||||
me.hyperPollDelay = 5 * time.Second
|
||||
|
||||
// how long the cluster must be stable before new droplets can be started
|
||||
me.clusterStableDuration = 37 * time.Second
|
||||
|
||||
me.cluster = virtpb.InitCluster()
|
||||
if err := me.cluster.ConfigLoad(); err != nil {
|
||||
log.Info("config load error", err)
|
||||
log.Info("")
|
||||
log.Info("You have never run this before")
|
||||
log.Info("init example cloud here")
|
||||
log.Sleep(2)
|
||||
return err
|
||||
}
|
||||
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
if d == nil {
|
||||
fmt.Println("d == nil")
|
||||
return fmt.Errorf("d == nil")
|
||||
}
|
||||
fmt.Println("Droplet UUID:", d.Uuid)
|
||||
if d.Current == nil {
|
||||
d.Current = new(virtpb.Current)
|
||||
}
|
||||
d.SetState(virtpb.DropletState_OFF)
|
||||
log.Info("droplet", d.Hostname)
|
||||
}
|
||||
hmm := "pihole.wit.com"
|
||||
d := me.cluster.FindDropletByName(hmm)
|
||||
if d == nil {
|
||||
log.Info("did not find found droplet", hmm)
|
||||
} else {
|
||||
log.Info("found droplet", d.Hostname, d)
|
||||
}
|
||||
|
||||
var newEvents []*virtpb.Event
|
||||
|
||||
// sanity check the cluster & droplets
|
||||
if _, _, err := ValidateDroplets(); err != nil {
|
||||
log.Info("todo: add flag to ignore. for now, fix problems in the config file.")
|
||||
return err
|
||||
}
|
||||
newe, err := ValidateDiskFilenames()
|
||||
if err != nil {
|
||||
log.Info(err)
|
||||
return err
|
||||
}
|
||||
// this is a new droplet. add it to the cluster
|
||||
for _, e := range newe {
|
||||
newEvents = append(newEvents, e)
|
||||
}
|
||||
ValidateUniqueFilenames()
|
||||
|
||||
for _, filename := range argv.Xml {
|
||||
domcfg, err := virtigolib.ReadXml(filename)
|
||||
if err != nil {
|
||||
// parsing the libvirt xml file failed
|
||||
log.Info("error:", filename, err)
|
||||
log.Info("readXml() error", filename)
|
||||
log.Info("readXml() error", err)
|
||||
log.Info("libvirt XML will have to be fixed by hand")
|
||||
return err
|
||||
}
|
||||
// this is a new droplet. add it to the cluster
|
||||
log.Info("Add XML Droplet here", domcfg.Name)
|
||||
_, newe, err := virtigolib.AddDomainDroplet(me.cluster, domcfg)
|
||||
if err != nil {
|
||||
log.Info("addDomainDroplet() error", filename)
|
||||
log.Info("addDomainDroplet() error", err)
|
||||
log.Info("libvirt XML will have to be fixed by hand")
|
||||
return err
|
||||
}
|
||||
for _, e := range newe {
|
||||
newEvents = append(newEvents, e)
|
||||
}
|
||||
}
|
||||
for i, e := range newEvents {
|
||||
log.Info(i, "Event:", e.Droplet, e.FieldName, "orig:", e.OrigVal, "new:", e.NewVal)
|
||||
me.changed = true
|
||||
}
|
||||
|
||||
if me.changed {
|
||||
if err := me.cluster.ConfigSave(); err != nil {
|
||||
log.Info("configsave error", err)
|
||||
return err
|
||||
}
|
||||
log.Info("XML changes saved in protobuf config")
|
||||
return nil
|
||||
}
|
||||
if len(argv.Xml) != 0 {
|
||||
log.Info("No XML changes found")
|
||||
return fmt.Errorf("No XML changes found")
|
||||
}
|
||||
|
||||
// initialize each hypervisor
|
||||
for _, pbh := range me.cluster.H.Hypervisors {
|
||||
// this is a new unknown droplet (not in the config file)
|
||||
var h *HyperT
|
||||
h = new(HyperT)
|
||||
h.pb = pbh
|
||||
h.lastDroplets = make(map[string]time.Time)
|
||||
h.lastpoll = time.Now()
|
||||
|
||||
me.hmap[pbh] = h
|
||||
me.hypers = append(me.hypers, h)
|
||||
log.Log(EVENT, "config new hypervisors", h.pb.Hostname)
|
||||
}
|
||||
|
||||
// start the watchdog polling for each hypervisor
|
||||
for _, h := range me.hypers {
|
||||
log.Info("starting polling on", h.pb.Hostname)
|
||||
|
||||
// start a watchdog on each hypervisor
|
||||
go h.NewWatchdog()
|
||||
}
|
||||
|
||||
var cloud *virtigolib.CloudManager
|
||||
cloud = virtigolib.NewCloud()
|
||||
found, _ := cloud.FindDropletByName("www.wit.com")
|
||||
if found == nil {
|
||||
log.Info("d == nil")
|
||||
} else {
|
||||
log.Info("d == ", found)
|
||||
}
|
||||
|
||||
startHTTP()
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,361 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
// An app to submit patches for the 30 GO GUI repos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.wit.com/lib/gui/shell"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func doDroplet() (string, error) {
|
||||
err := me.clusters.ConfigLoad()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
msg := []byte(`{"message": "Hello"}`)
|
||||
|
||||
// Initialize a persistent client with a custom Transport
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
|
||||
},
|
||||
Timeout: 10 * time.Second, // Set a reasonable timeout
|
||||
}
|
||||
|
||||
me.cmap = make(map[*virtpb.Cluster]*adminT)
|
||||
for c := range me.clusters.IterAll() {
|
||||
var err error
|
||||
admin := new(adminT)
|
||||
if admin.cluster == nil {
|
||||
admin.cluster = new(virtpb.Cluster)
|
||||
}
|
||||
me.cmap[c] = admin
|
||||
log.Info("found in the config file", c.URL[0])
|
||||
// a.makeClusterGroup(c)
|
||||
admin.url, err = url.Parse(c.URL[0])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// update the droplet list
|
||||
if data, err := postData(admin.url.String()+"/DropletsPB", msg); err != nil {
|
||||
log.Info("/DropletsPB Error:", err)
|
||||
continue
|
||||
} else {
|
||||
admin.cluster.Droplets = new(virtpb.Droplets)
|
||||
if err := admin.cluster.Droplets.Unmarshal(data); err != nil {
|
||||
log.Printf("DropletsPB Response len:%d\n", len(data))
|
||||
log.Println("droplets marshal failed", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
log.Printf("Cluster Name: %s\n", c.Name)
|
||||
log.Printf("Number of Droplets: %d\n", admin.cluster.Droplets.Len())
|
||||
|
||||
if argv.Droplet.Name == "" {
|
||||
return "", fmt.Errorf("--name droplet name was empty")
|
||||
}
|
||||
|
||||
var found *virtpb.Droplets
|
||||
found = virtpb.NewDroplets()
|
||||
all := admin.cluster.Droplets.All()
|
||||
for all.Scan() {
|
||||
vm := all.Next()
|
||||
if argv.Droplet.Name == vm.Hostname {
|
||||
if argv.Droplet.Show != nil {
|
||||
log.Info(vm.SprintHeader())
|
||||
txt := vm.FormatTEXT()
|
||||
log.Info(txt)
|
||||
return "droplet status", nil
|
||||
}
|
||||
if argv.Droplet.Start != nil {
|
||||
log.Info("should start droplet here")
|
||||
log.Info(vm.SprintHeader())
|
||||
e := new(virtpb.Event)
|
||||
e.Etype = virtpb.EventType_POWERON
|
||||
e.DropletUuid = vm.Uuid
|
||||
|
||||
if err := admin.postEvent(e); err != nil {
|
||||
return "droplet start err", err
|
||||
}
|
||||
return "droplet start", nil
|
||||
}
|
||||
return "droplet found", fmt.Errorf("do what to the droplet?")
|
||||
}
|
||||
found.Append(vm)
|
||||
}
|
||||
log.Println("On Droplet count=", found.Len())
|
||||
}
|
||||
return "", fmt.Errorf("droplet %s not found", argv.Droplet.Name)
|
||||
}
|
||||
|
||||
func doEvent(e *virtpb.Event) *virtpb.Event {
|
||||
result := new(virtpb.Event)
|
||||
if e.Etype == virtpb.EventType_POWERON {
|
||||
log.Println("power on droplet on local cluster here", e.DropletUuid)
|
||||
result.State = virtpb.Event_DONE
|
||||
rs, err := Start(e.DropletUuid)
|
||||
log.Println("Start() returned", rs)
|
||||
log.Println("Start() returned err", err)
|
||||
if err != nil {
|
||||
result.Error = fmt.Sprintf("%v", err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
if e.Etype == virtpb.EventType_EDIT {
|
||||
log.Println("edit event", e.DropletUuid)
|
||||
result.State = virtpb.Event_DONE
|
||||
if e.Droplet != nil {
|
||||
return updateDroplet(e.Droplet)
|
||||
}
|
||||
log.Println("unknown edit event")
|
||||
result.State = virtpb.Event_FAIL
|
||||
return result
|
||||
}
|
||||
|
||||
if e.Etype == virtpb.EventType_ADD {
|
||||
log.Println("START ADD droplet event", e.Droplet.FormatTEXT())
|
||||
if e.Droplet == nil {
|
||||
result.State = virtpb.Event_FAIL
|
||||
return result
|
||||
}
|
||||
result.DropletName = e.Droplet.Hostname
|
||||
result.Error = e.Droplet.FormatTEXT() // feedback to the other side for debugging
|
||||
|
||||
// attempt to create the new droplet
|
||||
if err := createDroplet(e.Droplet, result); err != nil {
|
||||
result.Error += fmt.Sprintf("createDroplet() err: %v", err)
|
||||
result.State = virtpb.Event_FAIL
|
||||
return result
|
||||
}
|
||||
log.Println("create droplet worked", e.Droplet.FormatTEXT())
|
||||
result.State = virtpb.Event_DONE
|
||||
return result
|
||||
}
|
||||
|
||||
log.Println("unknown event", e)
|
||||
result.Etype = e.Etype
|
||||
result.State = virtpb.Event_FAIL
|
||||
return result
|
||||
}
|
||||
|
||||
func updateDroplet(newd *virtpb.Droplet) *virtpb.Event {
|
||||
var changed bool = false
|
||||
result := new(virtpb.Event)
|
||||
|
||||
if newd == nil {
|
||||
result.Error = "updateDroplet() d == nil"
|
||||
result.State = virtpb.Event_FAIL
|
||||
return result
|
||||
}
|
||||
|
||||
d := me.cluster.FindDropletByUuid(newd.Uuid)
|
||||
if d == nil {
|
||||
result.Error = "updateDroplet() could not find uuid"
|
||||
result.State = virtpb.Event_FAIL
|
||||
return result
|
||||
}
|
||||
log.Println("found droplet to update:", newd.Uuid, newd.Hostname, newd.Cpus, newd.Memory)
|
||||
|
||||
if d.Hostname != newd.Hostname && newd.Hostname != "" {
|
||||
d.Hostname = newd.Hostname
|
||||
changed = true
|
||||
}
|
||||
|
||||
if d.Cpus != newd.Cpus && newd.Cpus > 0 {
|
||||
d.Cpus = newd.Cpus
|
||||
changed = true
|
||||
}
|
||||
|
||||
// arbitrary check. don't make vm's with less than 64 MB of RAM
|
||||
// big enough most things will load with some stdout
|
||||
if d.Memory != newd.Memory && newd.Memory > (64*1024*1024) {
|
||||
d.Memory = newd.Memory
|
||||
changed = true
|
||||
}
|
||||
|
||||
if changed {
|
||||
if err := me.cluster.ConfigSave(); err != nil {
|
||||
log.Info("configsave error", err)
|
||||
result.Error = fmt.Sprintf("%v", err)
|
||||
result.State = virtpb.Event_FAIL
|
||||
return result
|
||||
}
|
||||
} else {
|
||||
log.Println("nothing changed in", newd.Uuid, newd.Hostname)
|
||||
}
|
||||
|
||||
result.State = virtpb.Event_DONE
|
||||
return result
|
||||
}
|
||||
|
||||
func createDroplet(newd *virtpb.Droplet, result *virtpb.Event) error {
|
||||
if newd == nil {
|
||||
return fmt.Errorf("droplet protobuf == nil")
|
||||
}
|
||||
|
||||
if newd.Uuid == "" {
|
||||
newd.Uuid = uuid.New().String()
|
||||
}
|
||||
|
||||
d := me.cluster.FindDropletByUuid(newd.Uuid)
|
||||
if d != nil {
|
||||
return fmt.Errorf("droplet uuid already used")
|
||||
}
|
||||
|
||||
log.Println("found droplet to update:", newd.Uuid, newd.Hostname, newd.Cpus, newd.Memory)
|
||||
|
||||
if newd.Hostname == "" {
|
||||
return fmt.Errorf("Hostname can not be blank")
|
||||
}
|
||||
d = me.cluster.FindDropletByName(newd.Hostname)
|
||||
if d != nil {
|
||||
return fmt.Errorf("hostname already defined")
|
||||
}
|
||||
|
||||
// by default, on locally imported domains, set the preferred hypervisor!
|
||||
newd.LocalOnly = "yes on: " + "farm03"
|
||||
|
||||
newd.PreferredHypervisor = "farm03"
|
||||
newd.StartState = virtpb.DropletState_OFF
|
||||
|
||||
newd.Current = new(virtpb.Current)
|
||||
newd.Current.State = virtpb.DropletState_OFF
|
||||
|
||||
// create the network
|
||||
if err := createNetwork(newd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create the disks
|
||||
if err := createDisks(newd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// append the protobuf and save it
|
||||
me.cluster.AddDroplet(newd)
|
||||
if err := me.cluster.ConfigSave(); err != nil {
|
||||
log.Info("configsave error", err)
|
||||
return fmt.Errorf("ConfigSave() error: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func findDisks(d *virtpb.Droplet) error {
|
||||
log.Info("need to do this")
|
||||
return nil
|
||||
}
|
||||
|
||||
func createDisks(d *virtpb.Droplet) error {
|
||||
if d.Disks != nil {
|
||||
return findDisks(d)
|
||||
}
|
||||
|
||||
newdisk := new(virtpb.Disk)
|
||||
newdisk.Filename = d.Hostname + ".qcow2"
|
||||
newdisk.Filepath = "/home/nfs2"
|
||||
d.Disks = append(d.Disks, newdisk)
|
||||
|
||||
basefile := "/home/nfs2/base2025.wit-5.qcow2"
|
||||
newfile := filepath.Join(newdisk.Filepath, newdisk.Filename)
|
||||
|
||||
if !shell.Exists(newdisk.Filepath) {
|
||||
return fmt.Errorf("disk image path missing: %s", newdisk.Filepath)
|
||||
}
|
||||
|
||||
if !shell.Exists(basefile) {
|
||||
return fmt.Errorf("basefile %s missing", basefile)
|
||||
}
|
||||
|
||||
if shell.Exists(newfile) {
|
||||
return fmt.Errorf("disk image already exists: %s", newfile)
|
||||
}
|
||||
|
||||
cmd := []string{"dd", "bs=100M", "status=progress", "oflag=dsync", "if=" + basefile, "of=" + newfile}
|
||||
result := shell.RunRealtime(cmd)
|
||||
if result.Exit != 0 {
|
||||
return fmt.Errorf("dd to %s failed %d\n%s\n%s", newfile, result.Exit, strings.Join(result.Stdout, "\n"), strings.Join(result.Stderr, "\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createNetwork(d *virtpb.Droplet) error {
|
||||
if d.Networks != nil {
|
||||
// network already done
|
||||
return nil
|
||||
}
|
||||
if len(d.Networks) > 0 {
|
||||
// network already done
|
||||
return nil
|
||||
}
|
||||
|
||||
n := new(virtpb.Network)
|
||||
n.Mac = getNewMac()
|
||||
n.Name = "worldbr"
|
||||
d.Networks = append(d.Networks, n)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getNewMac() string {
|
||||
// mac address map to check for duplicates
|
||||
var macs map[string]string
|
||||
macs = make(map[string]string)
|
||||
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
for _, n := range d.Networks {
|
||||
// log.Println("network:", n.Mac, d.Uuid, d.Hostname)
|
||||
if _, ok := macs[n.Mac]; ok {
|
||||
// UUID already exists
|
||||
log.Info("duplicate MAC", n.Mac, macs[n.Mac])
|
||||
log.Info("duplicate MAC", n.Mac, d.Hostname)
|
||||
return ""
|
||||
}
|
||||
macs[n.Mac] = d.Hostname
|
||||
}
|
||||
}
|
||||
|
||||
return generateMAC(macs)
|
||||
}
|
||||
|
||||
func generateMAC(macs map[string]string) string {
|
||||
prefix := []byte{0x22, 0x22, 0x22}
|
||||
for {
|
||||
// Generate last 3 bytes randomly
|
||||
suffix := make([]byte, 3)
|
||||
if _, err := rand.Read(suffix); err != nil {
|
||||
log.Fatalf("Failed to generate random bytes: %v", err)
|
||||
}
|
||||
|
||||
// Format full MAC address
|
||||
mac := fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x",
|
||||
prefix[0], prefix[1], prefix[2],
|
||||
suffix[0], suffix[1], suffix[2])
|
||||
|
||||
// Check if MAC is already used
|
||||
if _, exists := macs[mac]; !exists {
|
||||
log.Println("Using new MAC:", mac)
|
||||
return mac
|
||||
}
|
||||
log.Println("MAC already defined:", mac)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,205 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
// An app to submit patches for the 30 GO GUI repos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.wit.com/lib/gadgets"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func debug() {
|
||||
for {
|
||||
time.Sleep(90 * time.Second)
|
||||
// log.Info("TODO: use this?")
|
||||
}
|
||||
}
|
||||
|
||||
func doGui() {
|
||||
mainWindow := gadgets.NewGenericWindow("Virtigo: (inventory your cluster)", "Local Cluster Settings")
|
||||
mainWindow.Custom = func() {
|
||||
log.Warn("Main window close")
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
drawWindow(mainWindow)
|
||||
}
|
||||
|
||||
func drawWindow(win *gadgets.GenericWindow) {
|
||||
grid := win.Group.RawGrid()
|
||||
|
||||
var newHyperWin *stdHypervisorTableWin
|
||||
grid.NewButton("show hypervisors", func() {
|
||||
if newHyperWin != nil {
|
||||
log.Info("redraw hypervisors")
|
||||
newHyperWin.doNewStdHypervisors(me.cluster.H)
|
||||
return
|
||||
}
|
||||
log.Info("Hypervisors len=", me.cluster.H.Len())
|
||||
newHyperWin = newHypervisorsWindow()
|
||||
newHyperWin.doNewStdHypervisors(me.cluster.H)
|
||||
newHyperWin.win.Custom = func() {
|
||||
log.Info("hiding table window")
|
||||
}
|
||||
})
|
||||
|
||||
var dropWin *gadgets.GenericWindow
|
||||
grid.NewButton("droplets", func() {
|
||||
if dropWin != nil {
|
||||
dropWin.Toggle()
|
||||
return
|
||||
}
|
||||
d := me.cluster.GetDropletsPB()
|
||||
var found *virtpb.Droplets
|
||||
found = virtpb.NewDroplets()
|
||||
all := d.All()
|
||||
for all.Scan() {
|
||||
vm := all.Next()
|
||||
if vm.Current.State != virtpb.DropletState_ON {
|
||||
continue
|
||||
}
|
||||
found.Append(vm)
|
||||
}
|
||||
dropWin, _ = makeDropletsWindow(found)
|
||||
dropWin.Win.Custom = func() {
|
||||
log.Info("hiding droplet table window")
|
||||
}
|
||||
})
|
||||
|
||||
var ewin *stdEventTableWin
|
||||
grid.NewButton("events", func() {
|
||||
if ewin != nil {
|
||||
log.Info("update events here")
|
||||
e := me.cluster.GetEventsPB()
|
||||
log.Info("Events len=", e.Len())
|
||||
ewin.doStdEvents(e)
|
||||
return
|
||||
}
|
||||
ewin = newEventsWindow()
|
||||
ewin.win.Custom = func() {
|
||||
log.Info("hiding table window")
|
||||
}
|
||||
|
||||
e := me.cluster.GetEventsPB()
|
||||
log.Info("Events len=", e.Len())
|
||||
ewin.doStdEvents(e)
|
||||
})
|
||||
grid.NextRow()
|
||||
|
||||
grid.NewButton("ConfigSave()", func() {
|
||||
log.Info("todo: make code for this")
|
||||
})
|
||||
|
||||
var testWin *gadgets.GenericWindow
|
||||
grid.NewButton("create droplet", func() {
|
||||
if testWin != nil {
|
||||
testWin.Toggle()
|
||||
return
|
||||
}
|
||||
d := me.cluster.GetDropletsPB()
|
||||
testWin, _ = makeDropletsWindow(d)
|
||||
})
|
||||
|
||||
grid.NewButton("uptime", func() {
|
||||
updateUptimeGui("kuma uptime should update this")
|
||||
})
|
||||
grid.NextRow()
|
||||
|
||||
grid = win.Middle.RawGrid()
|
||||
me.status = grid.NewLabel("cur status")
|
||||
grid.NextRow()
|
||||
me.lastuptime = grid.NewLabel("last uptime")
|
||||
grid.NextRow()
|
||||
}
|
||||
|
||||
func updateUptimeGui(uptime string) {
|
||||
if me.status == nil {
|
||||
// gui is not initialized
|
||||
return
|
||||
}
|
||||
me.status.SetLabel(uptime)
|
||||
|
||||
datestamp := time.Now().Format("2006-01-02 15:04:03")
|
||||
me.lastuptime.SetLabel("last uptime at " + datestamp)
|
||||
}
|
||||
|
||||
func makeDropletsWindow(pb *virtpb.Droplets) (*gadgets.GenericWindow, *virtpb.DropletsTable) {
|
||||
win := gadgets.NewGenericWindow("Droplets registered with Virtigo", "Buttons of things")
|
||||
t := pb.NewTable("testDroptable")
|
||||
t.NewUuid()
|
||||
|
||||
grid := win.Group.RawGrid()
|
||||
grid.NewButton("Create", func() {
|
||||
log.Info("todo: open create window here")
|
||||
})
|
||||
grid.NewButton("Show All", func() {
|
||||
log.Info("todo")
|
||||
})
|
||||
|
||||
/*
|
||||
grid.NewButton("Update", func() {
|
||||
t.Update()
|
||||
})
|
||||
*/
|
||||
|
||||
tbox := win.Bottom.Box()
|
||||
t.SetParent(tbox)
|
||||
t.AddHostname()
|
||||
t.AddStringFunc("location", func(d *virtpb.Droplet) string {
|
||||
return d.Current.Hypervisor
|
||||
})
|
||||
t.AddMemory()
|
||||
t.AddCpus()
|
||||
t.AddSpicePort()
|
||||
t.AddTimeFunc("age", func(d *virtpb.Droplet) time.Time {
|
||||
age := d.Current.OnSince.AsTime()
|
||||
log.Info("age", d.Hostname, virtpb.FormatDuration(time.Since(age)))
|
||||
return age
|
||||
})
|
||||
t.AddStringFunc("State", func(d *virtpb.Droplet) string {
|
||||
if d.Current.State == virtpb.DropletState_ON {
|
||||
return "ON"
|
||||
}
|
||||
if d.Current.State == virtpb.DropletState_OFF {
|
||||
return "OFF"
|
||||
}
|
||||
return "UNKNOWN"
|
||||
})
|
||||
t.AddStringFunc("mac addr", func(d *virtpb.Droplet) string {
|
||||
var macs []string
|
||||
for _, n := range d.Networks {
|
||||
macs = append(macs, n.Mac)
|
||||
}
|
||||
tmp := strings.Join(macs, "\n")
|
||||
return strings.TrimSpace(tmp)
|
||||
})
|
||||
t.ShowTable()
|
||||
return win, t
|
||||
}
|
||||
|
||||
func makeEventsWindow(pb *virtpb.Events) *gadgets.GenericWindow {
|
||||
win := gadgets.NewGenericWindow("Cluster Events", "Buttons of things")
|
||||
grid := win.Group.RawGrid()
|
||||
grid.NewButton("List", func() {
|
||||
log.Info("list...")
|
||||
})
|
||||
tmp := fmt.Sprintf("num of events = %d", pb.Len())
|
||||
grid.NewLabel(tmp)
|
||||
|
||||
tbox := win.Bottom.Box() // a vertical box (like a stack of books)
|
||||
t := pb.NewTable("test 2")
|
||||
t.NewUuid()
|
||||
t.SetParent(tbox)
|
||||
t.AddDropletName()
|
||||
t.AddHypervisor()
|
||||
t.ShowTable()
|
||||
return win
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
// An app to submit patches for the 30 GO GUI repos
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func doList() {
|
||||
msg := []byte(`{"message": "Hello"}`)
|
||||
|
||||
// Initialize a persistent client with a custom Transport
|
||||
client = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
|
||||
},
|
||||
Timeout: 10 * time.Second, // Set a reasonable timeout
|
||||
}
|
||||
|
||||
me.cmap = make(map[*virtpb.Cluster]*adminT)
|
||||
for c := range me.clusters.IterAll() {
|
||||
var err error
|
||||
admin := new(adminT)
|
||||
admin.cluster = new(virtpb.Cluster)
|
||||
me.cmap[c] = admin
|
||||
log.Info("found in the config file", c.URL[0])
|
||||
// a.makeClusterGroup(c)
|
||||
admin.url, err = url.Parse(c.URL[0])
|
||||
if err != nil {
|
||||
badExit(err)
|
||||
}
|
||||
|
||||
// update the droplet list
|
||||
if data, err := postData(admin.url.String()+"/DropletsPB", msg); err != nil {
|
||||
log.Info("/DropletsPB Error:", err)
|
||||
continue
|
||||
} else {
|
||||
admin.cluster.Droplets = new(virtpb.Droplets)
|
||||
if err := admin.cluster.Droplets.Unmarshal(data); err != nil {
|
||||
log.Printf("DropletsPB Response len:%d\n", len(data))
|
||||
log.Println("droplets marshal failed", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
log.Printf("Cluster Name: %s\n", c.Name)
|
||||
log.Printf("Number of Droplets: %d\n", admin.cluster.Droplets.Len())
|
||||
|
||||
var found *virtpb.Droplets
|
||||
found = virtpb.NewDroplets()
|
||||
all := admin.cluster.Droplets.All()
|
||||
for all.Scan() {
|
||||
vm := all.Next()
|
||||
if vm.Current == nil {
|
||||
continue
|
||||
}
|
||||
if argv.List.On && (vm.Current.State == virtpb.DropletState_OFF) {
|
||||
continue
|
||||
}
|
||||
found.Append(vm)
|
||||
log.Info(vm.SprintHeader())
|
||||
}
|
||||
log.Println("On Droplet count=", found.Len())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
/*
|
||||
debugging code to see the state of the
|
||||
cluster via http
|
||||
*/
|
||||
|
||||
func dumpCluster(w http.ResponseWriter) {
|
||||
umap, macs, err := ValidateDroplets()
|
||||
for u, hostname := range umap {
|
||||
fmt.Fprintln(w, "uuid:", u, "hostname:", hostname)
|
||||
}
|
||||
|
||||
for mac, uuid := range macs {
|
||||
fmt.Fprintln(w, "mac:", mac, "uuid", uuid, "hostname:", umap[uuid])
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintln(w, "ValidateDroplets() failed:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// list running droplets and droplets that should be running
|
||||
func dumpDroplets(w http.ResponseWriter, full bool) {
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
|
||||
// this line in golang could replace 80 lines of COBOL
|
||||
header := d.SprintDumpHeader() + " "
|
||||
|
||||
// check if this is a locally defined libvirt domain that needs to be imported
|
||||
if d.LocalOnly != "" {
|
||||
header += "(local)"
|
||||
}
|
||||
header += d.Hostname
|
||||
|
||||
if d.Current.State == virtpb.DropletState_ON {
|
||||
// everything is as it should be with this vm
|
||||
fmt.Fprintln(w, header)
|
||||
continue
|
||||
}
|
||||
if d.StartState == virtpb.DropletState_ON {
|
||||
// this is supposed to be ON and needs to be turned on
|
||||
fmt.Fprintln(w, header, "(should be on). todo: start() here")
|
||||
continue
|
||||
}
|
||||
|
||||
if d.LocalOnly != "" {
|
||||
// this is supposed to be ON and needs to be turned on
|
||||
fmt.Fprintln(w, header, "this libvirt/domain/xml needs to be imported")
|
||||
continue
|
||||
}
|
||||
|
||||
if full {
|
||||
var filenames string
|
||||
for _, disk := range d.Disks {
|
||||
filenames += disk.Filename + " "
|
||||
}
|
||||
|
||||
// this needs to be turned on
|
||||
fmt.Fprintln(w, header, filenames)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// status of the hypervisors
|
||||
func dumpHypervisors(w http.ResponseWriter) {
|
||||
var totalDroplets int
|
||||
var totalUnknownDroplets int
|
||||
for _, h := range me.hypers {
|
||||
dur := time.Since(h.lastpoll)
|
||||
tmp := virtpb.FormatDuration(dur)
|
||||
fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp)
|
||||
for name, _ := range h.lastDroplets {
|
||||
totalDroplets += 1
|
||||
d := me.cluster.FindDropletByName(name)
|
||||
header := d.SprintDumpHeader() + " "
|
||||
if d == nil {
|
||||
totalUnknownDroplets += 1
|
||||
}
|
||||
log.Info("\t", header, d.Hostname)
|
||||
}
|
||||
}
|
||||
if totalUnknownDroplets == 0 {
|
||||
fmt.Fprintln(w, "\tTotal Droplets", totalDroplets)
|
||||
} else {
|
||||
fmt.Fprintln(w, "\tTotal Droplets", totalDroplets, "total libvirt only droplets =", totalUnknownDroplets)
|
||||
}
|
||||
}
|
116
event.go
116
event.go
|
@ -2,21 +2,18 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"go.wit.com/lib/gui/shell"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func (d *DropletT) Start() {
|
||||
log.Info("a new virtual machine is running")
|
||||
}
|
||||
|
||||
func (h *HyperT) RestartDaemon() {
|
||||
// restarts the virtigod daemon on a hypervisor via http
|
||||
func (h *HyperT) RestartVirtigod() {
|
||||
url := "http://" + h.pb.Hostname + ":2520/kill"
|
||||
s := shell.Wget(url)
|
||||
log.Info("EVENT RestartDaemon", url, s)
|
||||
log.Info("EVENT RestartVirtigod", url, s)
|
||||
h.lastpoll = time.Now()
|
||||
h.killcount += 1
|
||||
|
||||
|
@ -28,103 +25,34 @@ func (h *HyperT) RestartDaemon() {
|
|||
me.unstable = time.Now()
|
||||
}
|
||||
|
||||
var stableTimeout time.Duration = 43 * time.Second
|
||||
|
||||
// checks if the cluster is ready and stable
|
||||
func clusterReady() (bool, string) {
|
||||
last := time.Since(me.unstable)
|
||||
if last > stableTimeout {
|
||||
// the cluster has not been stable for 133 seconds
|
||||
log.Warn("clusterReady() is stable for ", shell.FormatDuration(stableTimeout), " secs")
|
||||
return true, fmt.Sprintln("clusterReady() is stable ", shell.FormatDuration(stableTimeout), " secs")
|
||||
}
|
||||
log.Warn("clusterReady() is unstable for", shell.FormatDuration(last))
|
||||
return false, "clusterReady() is unstable for " + shell.FormatDuration(last)
|
||||
}
|
||||
|
||||
func (d *DropletT) dropletReady() (bool, string) {
|
||||
if d.CurrentState == "ON" {
|
||||
return false, "EVENT start droplet is already ON"
|
||||
}
|
||||
if d.starts > 2 {
|
||||
// reason := "EVENT start droplet has already been started " + d.starts + " times"
|
||||
return false, fmt.Sprintln("EVENT start droplet has already been started ", d.starts, " times")
|
||||
}
|
||||
return true, ""
|
||||
}
|
||||
|
||||
func (h *HyperT) Start(d *DropletT) (bool, string) {
|
||||
ready, result := clusterReady()
|
||||
if !ready {
|
||||
return false, result
|
||||
}
|
||||
ready, result = d.dropletReady()
|
||||
// this must be bool in string because accumulated output is sometimes
|
||||
// written to STDOUT, sometimes to http
|
||||
func (h *HyperT) start(d *virtpb.Droplet) (bool, string) {
|
||||
ready, result := me.cluster.DropletReady(d)
|
||||
if !ready {
|
||||
return false, result
|
||||
}
|
||||
|
||||
url := "http://" + h.pb.Hostname + ":2520/start?start=" + d.pb.Hostname
|
||||
s := shell.Wget(url)
|
||||
url := "http://" + h.pb.Hostname + ":2520/start?start=" + d.Hostname
|
||||
var msg string
|
||||
var data []byte
|
||||
msg = d.FormatJSON()
|
||||
data = []byte(msg) // Convert the string to []byte
|
||||
req, err := httpPost(url, data)
|
||||
if err != nil {
|
||||
return false, fmt.Sprintln("error:", err)
|
||||
}
|
||||
log.Info("http post url:", url)
|
||||
log.Info("http post data:", msg)
|
||||
|
||||
result = "EVENT start droplet url: " + url + "\n"
|
||||
result += "EVENT start droplet response: " + s.String()
|
||||
result += "EVENT start droplet response: " + string(req)
|
||||
|
||||
// increment the counter for a start attempt working
|
||||
d.starts += 1
|
||||
d.Current.StartAttempts += 1
|
||||
|
||||
// mark the cluster as unstable so droplet starts can be throttled
|
||||
me.unstable = time.Now()
|
||||
|
||||
return true, result
|
||||
}
|
||||
|
||||
func Start(name string) (bool, string) {
|
||||
var result string
|
||||
|
||||
d := findDroplet(name)
|
||||
if d == nil {
|
||||
result += "can't start unknown droplet"
|
||||
return false, result
|
||||
}
|
||||
|
||||
if d.CurrentState == "ON" {
|
||||
return false, "EVENT start droplet is already ON"
|
||||
}
|
||||
|
||||
dur := time.Since(me.unstable) // how long has the cluster been stable?
|
||||
result = fmt.Sprintln("should start droplet", name, "here. grid stable for:", shell.FormatDuration(dur))
|
||||
if dur < 17*time.Second {
|
||||
result += "grid is still too unstable"
|
||||
return false, result
|
||||
}
|
||||
|
||||
// make the list of hypervisors that are active and can start new droplets
|
||||
var pool []*HyperT
|
||||
for _, h := range me.hypers {
|
||||
result += fmt.Sprintln("could start droplet on", name, "on", h.pb.Hostname, h.pb.Active)
|
||||
if d.pb.PreferredHypervisor == h.pb.Hostname {
|
||||
// the config file says this droplet should run on this hypervisor
|
||||
a, b := h.Start(d)
|
||||
return a, result + b
|
||||
}
|
||||
|
||||
if h.pb.Active != true {
|
||||
continue
|
||||
}
|
||||
pool = append(pool, h)
|
||||
}
|
||||
|
||||
// left here as an example of how to actually do random numbers
|
||||
// it's complete mathematical chaos. Randomness is simple when
|
||||
// human interaction occurs -- which is exactly what happens most
|
||||
// of the time. most random shit is bullshit. all you really need
|
||||
// is exactly this to make sure the random functions work as they
|
||||
// should. Probably, just use this everywhere in all cases. --jcarr
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
a := 0
|
||||
b := len(pool)
|
||||
n := a + rand.Intn(b-a)
|
||||
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
|
||||
h := pool[n]
|
||||
startbool, startresult := h.Start(d)
|
||||
return startbool, result + startresult
|
||||
}
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"go.wit.com/gui"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func okExit(note string) {
|
||||
if note != "" {
|
||||
log.Info(ARGNAME, "exit:", note, "ok")
|
||||
}
|
||||
gui.StandardExit()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func badExit(err error) {
|
||||
log.Info(ARGNAME, "failed: ", err)
|
||||
gui.StandardExit()
|
||||
os.Exit(-1)
|
||||
}
|
||||
|
||||
func exit(note string, err error) {
|
||||
if note != "" {
|
||||
log.Info(ARGNAME, "exit:", note, "ok")
|
||||
}
|
||||
gui.StandardExit()
|
||||
if err == nil {
|
||||
os.Exit(0)
|
||||
}
|
||||
log.Info(ARGNAME, "failed: ", err)
|
||||
os.Exit(-1)
|
||||
}
|
257
http.go
257
http.go
|
@ -2,11 +2,13 @@ package main
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.wit.com/lib/gui/shell"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/lib/virtigolib"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
|
@ -17,102 +19,199 @@ func cleanURL(url string) string {
|
|||
}
|
||||
|
||||
func okHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var tmp string
|
||||
tmp = cleanURL(r.URL.Path)
|
||||
var route string
|
||||
route = cleanURL(r.URL.Path)
|
||||
// log.HttpMode(w)
|
||||
// defer log.HttpMode(nil)
|
||||
|
||||
// is the cluster running what it should?
|
||||
if tmp == "/droplets" {
|
||||
for _, d := range me.droplets {
|
||||
if d.pb.StartState != "ON" {
|
||||
continue
|
||||
}
|
||||
dur := time.Since(d.lastpoll) // Calculate the elapsed time
|
||||
var hname string
|
||||
if d.h == nil {
|
||||
hname = ""
|
||||
} else {
|
||||
hname = d.h.pb.Hostname
|
||||
}
|
||||
if d.CurrentState != "ON" {
|
||||
fmt.Fprintln(w, "BAD STATE ", d.pb.Hostname, hname, "(", d.pb.StartState, "vs", d.CurrentState, ")", shell.FormatDuration(dur))
|
||||
} else {
|
||||
dur := time.Since(d.lastpoll) // Calculate the elapsed time
|
||||
fmt.Fprintln(w, "GOOD STATE ON", d.pb.Hostname, hname, shell.FormatDuration(dur))
|
||||
}
|
||||
msg, err := ioutil.ReadAll(r.Body) // Read the body as []byte
|
||||
if err != nil {
|
||||
log.Info("ReadAll() error =", err)
|
||||
return
|
||||
}
|
||||
if route == "/uptime" {
|
||||
ok, s := uptimeCheck()
|
||||
fmt.Fprintln(w, s)
|
||||
// log.Info(s)
|
||||
updateUptimeGui(s)
|
||||
if ok {
|
||||
// log.Info("Handling URL:", route, "cluster is ok")
|
||||
} else {
|
||||
log.Info("Handling URL:", route, "cluster is not right yet")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if tmp == "/favicon.ico" {
|
||||
if route == "/create" {
|
||||
var d *virtpb.Droplet
|
||||
d = new(virtpb.Droplet)
|
||||
if err := d.Unmarshal(msg); err != nil {
|
||||
log.Info("proto.Unmarshal() failed on wire message len", len(msg))
|
||||
log.Info("error =", err)
|
||||
return
|
||||
}
|
||||
log.Info("proto.Unmarshal() worked on msg len", len(msg), "hostname =", d.Hostname)
|
||||
found := me.cluster.FindDropletByName(d.Hostname)
|
||||
if found != nil {
|
||||
log.Info("already have hostname ", d.Hostname)
|
||||
return
|
||||
}
|
||||
log.Info("new hostname ", d.Hostname)
|
||||
if !me.cluster.AddDroplet(d) {
|
||||
log.Info("new hostname added ok ", d.Hostname)
|
||||
} else {
|
||||
log.Info("hostname add failed for ", d.Hostname)
|
||||
}
|
||||
if err := me.cluster.ConfigSave(); err != nil {
|
||||
log.Info("configsave error", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
log.Info("config file saved")
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/event" {
|
||||
var e *virtpb.Event
|
||||
e = new(virtpb.Event)
|
||||
if err := e.Unmarshal(msg); err != nil {
|
||||
log.Info("proto.Unmarshal() failed on wire message len", len(msg))
|
||||
log.Info("error =", err)
|
||||
return
|
||||
}
|
||||
log.Info("/event proto.Unmarshal() worked on msg len", len(msg), "hostname =", e.DropletUuid)
|
||||
result := doEvent(e)
|
||||
data, err := result.Marshal()
|
||||
if err != nil {
|
||||
log.Info("/event marshal failed", err, "len(data) =", len(data))
|
||||
fmt.Fprintln(w, "/event failed", err)
|
||||
return
|
||||
}
|
||||
w.Write(data)
|
||||
// fmt.Fprintln("droplet marshal failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/import" {
|
||||
log.Info("virtigo import starts here")
|
||||
result, err := importDomain(w, r)
|
||||
if err != nil {
|
||||
log.Info("virtigo import failed")
|
||||
log.Info(result)
|
||||
return
|
||||
}
|
||||
log.Info("virtigo import worked")
|
||||
return
|
||||
}
|
||||
|
||||
// toggle poll logging
|
||||
if route == "/poll" {
|
||||
if POLL.Enabled() {
|
||||
log.Info("POLL is true")
|
||||
POLL.SetBool(false)
|
||||
} else {
|
||||
log.Info("POLL is false")
|
||||
POLL.SetBool(true)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/dumpcluster" {
|
||||
dumpCluster(w)
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/dumpdroplet" {
|
||||
me.cluster.DumpDroplet(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/dumpdroplets" {
|
||||
dumpDroplets(w, false)
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/DropletsPB" {
|
||||
pb := me.cluster.GetDropletsPB()
|
||||
data, err := pb.Marshal()
|
||||
if err != nil {
|
||||
log.Info("droplet marshal failed", err)
|
||||
fmt.Fprintln(w, "droplet marshal failed", err)
|
||||
return
|
||||
}
|
||||
w.Write(data)
|
||||
// fmt.Fprintln("droplet marshal failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/HypervisorsPB" {
|
||||
pb := me.cluster.GetHypervisorsPB()
|
||||
data, err := pb.Marshal()
|
||||
if err != nil {
|
||||
log.Info("hypervisors marshal failed", err)
|
||||
fmt.Fprintln(w, "hypervisors marshal failed", err)
|
||||
return
|
||||
}
|
||||
w.Write(data)
|
||||
// fmt.Fprintln("droplet marshal failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/EventsPB" {
|
||||
pb := me.cluster.GetEventsPB()
|
||||
data, err := pb.Marshal()
|
||||
if err != nil {
|
||||
log.Info("events marshal failed", err)
|
||||
fmt.Fprintln(w, "events marshal failed", err)
|
||||
return
|
||||
}
|
||||
w.Write(data)
|
||||
// fmt.Fprintln("droplet marshal failed", err)
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/dumpdropletsfull" {
|
||||
dumpDroplets(w, true)
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/dumphypervisors" {
|
||||
dumpHypervisors(w)
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/dumplibvirtxml" {
|
||||
virtigolib.DumpLibvirtxmlDomainNames()
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/quit" {
|
||||
log.Warn("writing out config file and exiting virtigo")
|
||||
if err := me.cluster.ConfigSave(); err != nil {
|
||||
log.Info("configsave error", err)
|
||||
} else {
|
||||
os.Exit(-1)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if route == "/favicon.ico" {
|
||||
// w.Header().Set("Content-Type", "image/svg+xml")
|
||||
w.Header().Set("Content-Type", "image/png")
|
||||
writeFile(w, "ipv6.png")
|
||||
return
|
||||
}
|
||||
|
||||
if tmp == "/goReference.svg" {
|
||||
if route == "/goReference.svg" {
|
||||
w.Header().Set("Content-Type", "image/svg+xml")
|
||||
writeFile(w, "goReference.svg")
|
||||
return
|
||||
}
|
||||
|
||||
if tmp == "/writeconfig" {
|
||||
writeConfigFile()
|
||||
writeConfigFileDroplets()
|
||||
fmt.Fprintln(w, "OK")
|
||||
return
|
||||
}
|
||||
|
||||
if tmp == "/uptime" {
|
||||
b, s := clusterHealthy()
|
||||
if b {
|
||||
log.Info("Handling URL:", tmp, "cluster is ok", s)
|
||||
fmt.Fprintln(w, s)
|
||||
} else {
|
||||
log.Info("Handling URL:", tmp, "cluster is not right yet", s)
|
||||
fmt.Fprintln(w, s)
|
||||
}
|
||||
for _, h := range me.hypers {
|
||||
url := "http://" + h.pb.Hostname + ":2520/kill"
|
||||
dur := time.Since(h.lastpoll) // Calculate the elapsed time
|
||||
if dur > 90*time.Second {
|
||||
h.RestartDaemon()
|
||||
continue
|
||||
}
|
||||
if h.killcount != 0 {
|
||||
log.Info("KILL count =", h.killcount, "FOR", h.pb.Hostname, dur, "curl", url)
|
||||
}
|
||||
if h.killcount > 10 {
|
||||
log.Info("KILL count is greater than 10 for host", h.pb.Hostname, dur, "curl", url)
|
||||
}
|
||||
// l := shell.FormatDuration(dur)
|
||||
// log.Warn("HOST =", h.pb.Hostname, "Last poll =", l)
|
||||
//if d.pb.StartState != "ON" {
|
||||
// continue
|
||||
//}
|
||||
// dur := time.Since(d.lastpoll) // Calculate the elapsed time
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if tmp == "/start" {
|
||||
start := r.URL.Query().Get("start")
|
||||
// log.Warn("Handling URL:", tmp, "start droplet", start)
|
||||
b, result := Start(start)
|
||||
log.Warn("Start returned =", b, "result =", result)
|
||||
fmt.Fprintln(w, "Start() returned", b)
|
||||
fmt.Fprintln(w, "result:", result)
|
||||
return
|
||||
}
|
||||
|
||||
log.Warn("BAD URL =", tmp)
|
||||
fmt.Fprintln(w, "BAD URL", tmp)
|
||||
// badurl(w, r.URL.String())
|
||||
// fmt.Fprintln(w, "BAD", tmp)
|
||||
log.Warn("BAD URL =", route)
|
||||
}
|
||||
|
||||
// write a file out to the http socket
|
||||
func writeFile(w http.ResponseWriter, filename string) {
|
||||
// fmt.Fprintln(w, "GOT TEST?")
|
||||
fullname := "resources/" + filename
|
||||
pfile, err := resources.ReadFile(fullname)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,187 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/lib/virtigolib"
|
||||
"go.wit.com/log"
|
||||
|
||||
// "libvirt.org/go/libvirt"
|
||||
"libvirt.org/go/libvirtxml"
|
||||
)
|
||||
|
||||
// attempts to import the *libvirt.Domain directly from the hypervisor
|
||||
|
||||
func importDomain(w http.ResponseWriter, r *http.Request) (string, error) {
|
||||
var result string
|
||||
domainName := r.URL.Query().Get("domainName")
|
||||
force := r.URL.Query().Get("force")
|
||||
if domainName == "" {
|
||||
result = "importDomain() failed. name is blank " + r.URL.Path
|
||||
log.Warn(result)
|
||||
fmt.Fprintln(w, result)
|
||||
return "", errors.New(result)
|
||||
}
|
||||
|
||||
// a LocalOnly record should already have been created by hypervisor.Poll()
|
||||
d := me.cluster.FindDropletByName(domainName)
|
||||
if d == nil {
|
||||
result = "libvirt domain " + domainName + " could not be found on any hypervisor\n"
|
||||
log.Info(result)
|
||||
fmt.Fprintln(w, result)
|
||||
return result, errors.New(result)
|
||||
}
|
||||
|
||||
// if it's not local only, don't attempt this for now
|
||||
if d.LocalOnly == "" {
|
||||
if force == "true" {
|
||||
result = "LocalOnly is blank. force=true. PROCEEDING WITH DANGER\n"
|
||||
log.Warn(result)
|
||||
fmt.Fprint(w, result)
|
||||
} else {
|
||||
result = "LocalOnly is blank. SKIP. merge not supported yet. force=" + force
|
||||
log.Log(WARN, result)
|
||||
fmt.Fprintln(w, result)
|
||||
return result, errors.New(result)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
// it probably doesn't really matter what the state it
|
||||
if d.Current.State != pb.DropletState_OFF {
|
||||
result := "error: libvirt domain " + name + " is not off"
|
||||
log.Info(result)
|
||||
fmt.Fprintln(w, result)
|
||||
return result, errors.New(result)
|
||||
}
|
||||
*/
|
||||
|
||||
// get the hypervisor record for what it's worth
|
||||
h := findHypervisorByName(d.Current.Hypervisor)
|
||||
if h == nil {
|
||||
result = "unknown hypervisor = " + d.Current.Hypervisor
|
||||
log.Log(WARN, result)
|
||||
fmt.Fprintln(w, result)
|
||||
return result, errors.New(result)
|
||||
}
|
||||
|
||||
// exports and builds a libvirt.Domain from the hypervisor
|
||||
domcfg, err := ExportLibvirtDomain(h.pb, domainName)
|
||||
if err != nil {
|
||||
result = fmt.Sprint("ExportLibvirtDomain() failed", err)
|
||||
log.Warn(result)
|
||||
fmt.Fprintln(w, result)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// merges and updates the droplet protobuf based on the libvirt XML
|
||||
events, err := virtigolib.MergelibvirtDomain(d, domcfg)
|
||||
if err != nil {
|
||||
result = fmt.Sprint("MerglibvirtDomain() failed for", d.Hostname, err)
|
||||
log.Warn(result)
|
||||
fmt.Fprintln(w, result)
|
||||
return "", errors.New(result)
|
||||
}
|
||||
|
||||
// check what was non-standard and make a note of it. Save it in the protobuf
|
||||
s, err := virtigolib.DumpNonStandardXML(domcfg)
|
||||
if err != nil {
|
||||
result = s + "\n"
|
||||
result = fmt.Sprintln("DumpNonStandardXML() on", domcfg.Name, "failed for", err)
|
||||
log.Info(result)
|
||||
return "", err
|
||||
}
|
||||
result += s
|
||||
|
||||
// everything worked. add the events
|
||||
for _, e := range events {
|
||||
me.cluster.AddEvent(e)
|
||||
}
|
||||
|
||||
result += fmt.Sprintln("importDomain() worked")
|
||||
|
||||
// remote LocalOnly flag
|
||||
d.LocalOnly = ""
|
||||
|
||||
// probably be safe and don't let this move around the cluster
|
||||
d.PreferredHypervisor = d.Current.Hypervisor
|
||||
|
||||
log.Log(WARN, result)
|
||||
fmt.Fprintln(w, result)
|
||||
log.Warn("Everything worked. Saving config files")
|
||||
if err := me.cluster.ConfigSave(); err != nil {
|
||||
log.Warn("configsave error", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// this must be bool in string because accumulated output is sometimes
|
||||
// written to STDOUT, sometimes to http
|
||||
func (h *HyperT) importDomain(d *virtpb.Droplet) (bool, string) {
|
||||
ready, result := me.cluster.DropletReady(d)
|
||||
if !ready {
|
||||
return false, result
|
||||
}
|
||||
|
||||
url := "http://" + h.pb.Hostname + ":2520/import?domain=" + d.Hostname
|
||||
var msg string
|
||||
var data []byte
|
||||
msg = d.FormatJSON()
|
||||
data = []byte(msg) // Convert the string to []byte
|
||||
req, err := httpPost(url, data)
|
||||
if err != nil {
|
||||
return false, fmt.Sprintln("error:", err)
|
||||
}
|
||||
log.Info("http post url:", url)
|
||||
log.Info("http post data:", msg)
|
||||
|
||||
result = "EVENT import droplet url: " + url + "\n"
|
||||
result += "EVENT import droplet response: " + string(req)
|
||||
|
||||
// increment the counter for a start attempt working
|
||||
d.Current.StartAttempts += 1
|
||||
|
||||
// mark the cluster as unstable so droplet starts can be throttled
|
||||
me.unstable = time.Now()
|
||||
|
||||
return true, result
|
||||
}
|
||||
|
||||
func ExportLibvirtDomain(h *virtpb.Hypervisor, domainName string) (*libvirtxml.Domain, error) {
|
||||
// attempt to get the domain record from virtigo
|
||||
xml, err := postImportDomain(h.Hostname, domainName)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// convert the xml into a libvirt object
|
||||
domcfg := &libvirtxml.Domain{}
|
||||
err = domcfg.Unmarshal(string(xml))
|
||||
if err != nil {
|
||||
log.Warn("Unmarshal failed", domainName, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return domcfg, nil
|
||||
}
|
||||
|
||||
func postImportDomain(hypervisor string, domain string) ([]byte, error) {
|
||||
url := "http://" + hypervisor + ":2520/import?domain=" + domain
|
||||
var msg string
|
||||
var data []byte
|
||||
msg = "import " + domain
|
||||
data = []byte(msg) // Convert the string to []byte
|
||||
req, err := httpPost(url, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return req, nil
|
||||
}
|
202
main.go
202
main.go
|
@ -4,155 +4,101 @@ package main
|
|||
|
||||
import (
|
||||
"embed"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.wit.com/dev/alexflint/arg"
|
||||
"go.wit.com/lib/gui/prep"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
var Version string
|
||||
// sent via -ldflags
|
||||
var VERSION string
|
||||
var BUILDTIME string
|
||||
|
||||
var ARGNAME string = "virtigo"
|
||||
|
||||
//go:embed resources/*
|
||||
var resources embed.FS
|
||||
|
||||
func main() {
|
||||
me = new(virtigoT)
|
||||
prep.Bash(ARGNAME, argv.DoAutoComplete) // this line should be: prep.Bash(argv)
|
||||
me.myGui = prep.Gui() // prepares the GUI package for go-args
|
||||
me.pp = arg.MustParse(&argv)
|
||||
|
||||
if me.pp == nil {
|
||||
me.pp.WriteHelp(os.Stdout)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if os.Getenv("VIRTIGO_HOME") == "" {
|
||||
homeDir, _ := os.UserHomeDir()
|
||||
fullpath := filepath.Join(homeDir, ".config/virtigo")
|
||||
os.Setenv("VIRTIGO_HOME", fullpath)
|
||||
}
|
||||
pp := arg.MustParse(&argv)
|
||||
|
||||
if !argv.Uptime {
|
||||
pp.WriteHelp(os.Stdout)
|
||||
os.Exit(0)
|
||||
me.clusters = virtpb.NewClusters()
|
||||
|
||||
if argv.List != nil {
|
||||
err := me.clusters.ConfigLoad()
|
||||
if err != nil {
|
||||
badExit(err)
|
||||
}
|
||||
doList()
|
||||
okExit("virtigo list")
|
||||
}
|
||||
|
||||
if argv.Droplet != nil {
|
||||
exit(doDroplet())
|
||||
}
|
||||
|
||||
me.myGui.Start() // loads the GUI toolkit
|
||||
|
||||
if argv.Admin {
|
||||
err := me.clusters.ConfigLoad()
|
||||
if err != nil {
|
||||
badExit(err)
|
||||
}
|
||||
|
||||
doAdminGui()
|
||||
okExit("admin close")
|
||||
}
|
||||
|
||||
if argv.Server != "" {
|
||||
log.Info("start admin interface")
|
||||
admin := new(adminT)
|
||||
var err error
|
||||
admin.url, err = url.Parse(argv.Server)
|
||||
if err != nil {
|
||||
badExit(err)
|
||||
}
|
||||
err = me.clusters.ConfigLoad()
|
||||
if err != nil {
|
||||
clusters := virtpb.NewClusters()
|
||||
c := new(virtpb.Cluster)
|
||||
c.Uuid = uuid.New().String()
|
||||
c.URL = append(c.URL, argv.Server)
|
||||
clusters.Append(c)
|
||||
virtpb.ConfigWriteTEXT(clusters, "cluster.text")
|
||||
|
||||
badExit(err)
|
||||
}
|
||||
|
||||
admin.doAdminGui()
|
||||
okExit("admin close")
|
||||
}
|
||||
|
||||
if argv.Daemon {
|
||||
log.DaemonMode(true)
|
||||
if err := doDaemon(); err != nil {
|
||||
badExit(err)
|
||||
}
|
||||
okExit("")
|
||||
}
|
||||
|
||||
// set defaults
|
||||
me.unstable = time.Now() // initialize the grid as unstable
|
||||
me.delay = 5 * time.Second // how often to poll the hypervisors
|
||||
me.changed = false
|
||||
|
||||
cfgfile()
|
||||
|
||||
var ok bool = true
|
||||
for _, filename := range argv.Xml {
|
||||
domcfg, err := readXml(filename)
|
||||
if err != nil {
|
||||
// parsing the libvirt xml file failed
|
||||
log.Info("error:", filename, err)
|
||||
ok = false
|
||||
continue
|
||||
}
|
||||
// see if the libvirt xml droplet is already here
|
||||
d, err := findDomain(domcfg)
|
||||
if err != nil {
|
||||
// some error. probably UUID mismatch or hostname duplication
|
||||
// this has to be fixed by hand
|
||||
ok = false
|
||||
continue
|
||||
}
|
||||
if d == nil {
|
||||
// this is a new droplet. add it to the cluster
|
||||
log.Info("Add New Droplet here", domcfg.Name)
|
||||
_, err := addDomainDroplet(domcfg)
|
||||
if err != nil {
|
||||
ok = false
|
||||
log.Info("addDomainDroplet() failed", err)
|
||||
}
|
||||
} else {
|
||||
// this droplet is already here
|
||||
if updateDroplet(d, domcfg) {
|
||||
if me.changed {
|
||||
log.Info("updateDroplet() worked. droplet changed")
|
||||
} else {
|
||||
log.Info(filename, "nothing changed")
|
||||
}
|
||||
} else {
|
||||
log.Info("updateDroplet() failed for", d.pb.Hostname)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(argv.Xml) != 0 {
|
||||
if me.changed {
|
||||
if argv.Save {
|
||||
writeConfigFile()
|
||||
writeConfigFileDroplets()
|
||||
} else {
|
||||
log.Info("Not saving changes (use --save to save)")
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
log.Info("adding xml files failed")
|
||||
os.Exit(-1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
/*
|
||||
log.Info("command line hypervisors:", argv.Hosts)
|
||||
for _, name := range argv.Hosts {
|
||||
h := findHypervisor(name)
|
||||
if h != nil {
|
||||
log.Info("command line hypervisor", name, "already in config file")
|
||||
continue
|
||||
}
|
||||
h = addHypervisor(name)
|
||||
h.pb.Active = true
|
||||
}
|
||||
*/
|
||||
|
||||
// start the watchdog polling for each hypervisor
|
||||
for _, h := range me.hypers {
|
||||
log.Info("starting polling on", h.pb.Hostname)
|
||||
go h.NewWatchdog()
|
||||
}
|
||||
|
||||
// sit here
|
||||
startHTTP()
|
||||
}
|
||||
|
||||
func makeDroplet(start string) {
|
||||
d := findDroplet(start)
|
||||
if d == nil {
|
||||
log.Info("droplet is unknown:", start)
|
||||
os.Exit(0)
|
||||
}
|
||||
log.Info("start droplet here:", d.pb.Hostname)
|
||||
domcfg := makeStandardXml(d)
|
||||
|
||||
fmt.Printf("Virt type %s\n", domcfg.Type)
|
||||
fmt.Printf("Virt name %s\n", domcfg.Name)
|
||||
fmt.Printf("Virt UUID %s\n", domcfg.UUID)
|
||||
fmt.Printf("Virt Memory %d %s\n", domcfg.Memory.Value, domcfg.Memory.Unit)
|
||||
|
||||
// test add some ethernet devices
|
||||
macs := getMacs(domcfg)
|
||||
fmt.Printf("Virt mac addr:%s\n", macs)
|
||||
|
||||
// clearEthernet(domcfg)
|
||||
|
||||
addEthernet(domcfg, "04:44:33:11:22:11", "worldbr")
|
||||
addEthernet(domcfg, "04:44:33:33:44:55", "greenbr")
|
||||
|
||||
// add a check here to make these unique
|
||||
// setRandomMacs(domcfg)
|
||||
|
||||
// print out the final mac addresses
|
||||
macs = getMacs(domcfg)
|
||||
fmt.Printf("Virt mac addr:%s\n", macs)
|
||||
|
||||
qcow := "/home/nfs/" + d.pb.Hostname + ".qcow2"
|
||||
setSimpleDisk(domcfg, qcow)
|
||||
|
||||
writeoutXml(domcfg, "blahcarr")
|
||||
os.Exit(0)
|
||||
doGui() // start making our forge GUI
|
||||
startHTTP() // sit here forever
|
||||
}
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
package main
|
||||
|
||||
// RFC implementation
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os/user"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Function to create a persistent TCP connection
|
||||
func createPersistentConnection(host string) (net.Conn, error) {
|
||||
dialer := &net.Dialer{
|
||||
Timeout: 10 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}
|
||||
conn, err := dialer.Dial("tcp", host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to establish connection: %w", err)
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func mesocket() {
|
||||
host := "example.com:80"
|
||||
|
||||
// Establish a persistent TCP connection
|
||||
conn, err := createPersistentConnection(host)
|
||||
if err != nil {
|
||||
fmt.Println("Error creating connection:", err)
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
// Custom transport that forces HTTP requests to use our existing connection
|
||||
transport := &http.Transport{
|
||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
fmt.Println("Reusing existing TCP connection")
|
||||
return conn, nil
|
||||
},
|
||||
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
url := "http://example.com/endpoint"
|
||||
data := []byte(`{"message": "Hello"}`)
|
||||
|
||||
// Create an HTTP request
|
||||
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
fmt.Println("Error creating request:", err)
|
||||
return
|
||||
}
|
||||
|
||||
usr, _ := user.Current()
|
||||
req.Header.Set("author", usr.Username)
|
||||
req.Header.Set("Connection", "keep-alive") // Keep connection alive
|
||||
|
||||
// Perform the HTTP request
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
fmt.Println("Error performing request:", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Read and print the response
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
fmt.Println("Error reading response:", err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("Response:", string(body))
|
||||
}
|
219
poll.go
219
poll.go
|
@ -6,9 +6,20 @@ import (
|
|||
"time"
|
||||
|
||||
"go.wit.com/lib/gui/shell"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
)
|
||||
|
||||
func findHypervisorByName(name string) *HyperT {
|
||||
for _, h := range me.hypers {
|
||||
if h.pb.Hostname == name {
|
||||
return h
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *HyperT) pollHypervisor() {
|
||||
url := "http://" + h.pb.Hostname + ":2520/vms"
|
||||
log.Log(POLL, "wget url =", url)
|
||||
|
@ -16,6 +27,7 @@ func (h *HyperT) pollHypervisor() {
|
|||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var bytesSplice []byte
|
||||
bytesSplice = s.Bytes()
|
||||
// fmt.Fprintln(w, string(bytesSplice))
|
||||
|
@ -25,134 +37,184 @@ func (h *HyperT) pollHypervisor() {
|
|||
}
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 2 {
|
||||
log.Log(WARN, "unknown:", h.pb.Hostname, fields)
|
||||
continue
|
||||
}
|
||||
state := fields[0]
|
||||
name := fields[1]
|
||||
if state == "ON" {
|
||||
log.Log(POLL, h.pb.Hostname, "STATE:", state, "HOST:", name, "rest:", fields[2:])
|
||||
d := findDroplet(name)
|
||||
if d == nil {
|
||||
// this is a new unknown droplet (not in the config file)
|
||||
d = new(DropletT)
|
||||
d.pb.Hostname = name
|
||||
d.h = h
|
||||
d.lastpoll = time.Now()
|
||||
d.CurrentState = "ON"
|
||||
me.droplets = append(me.droplets, d)
|
||||
log.Log(EVENT, name, "IS NEW. ADDED ON", h.pb.Hostname)
|
||||
d := me.cluster.FindDropletByName(name)
|
||||
if d == nil {
|
||||
log.Log(WARN, name, "local defined domain")
|
||||
log.Log(WARN, name, "local Adding new entry with AddDropletLocal()")
|
||||
log.Log(WARN, name, "local Adding new entry with AddDropletLocal()")
|
||||
log.Log(WARN, name, "local Adding new entry with AddDropletLocal()")
|
||||
me.cluster.AddDropletLocal(name, h.pb.Hostname)
|
||||
continue
|
||||
}
|
||||
start := d.SprintHeader()
|
||||
h.lastDroplets[name] = time.Now()
|
||||
if state == "OFF" {
|
||||
if d.Current.Hypervisor == "" {
|
||||
d.Current.Hypervisor = h.pb.Hostname
|
||||
}
|
||||
log.Log(INFO, "ALREADY RECORDED", d.pb.Hostname)
|
||||
if d.LocalOnly == "" {
|
||||
log.Log(WARN, start, "local domain is a duplicate (need to resolve this)", h.pb.Hostname)
|
||||
continue
|
||||
}
|
||||
log.Log(WARN, start, "local domain ready to import from hypervisor")
|
||||
continue
|
||||
}
|
||||
|
||||
// update the status to ON and the last polled value
|
||||
d.CurrentState = "ON"
|
||||
d.lastpoll = time.Now()
|
||||
if state == "ON" {
|
||||
log.Log(POLL, start, "STATE:", state, "rest:", fields[2:])
|
||||
|
||||
if d.h == nil {
|
||||
// update the status to ON
|
||||
d.SetState(virtpb.DropletState_ON)
|
||||
|
||||
// set the LastPoll time to now
|
||||
now := time.Now()
|
||||
d.Current.LastPoll = timestamppb.New(now)
|
||||
|
||||
if d.Current.Hypervisor == "" {
|
||||
// this means the droplet was in the config file
|
||||
// but this is the first time it's shown up as running
|
||||
|
||||
// this should mean a droplet is running where the config file says it probably should be running
|
||||
if d.pb.PreferredHypervisor == h.pb.Hostname {
|
||||
log.Log(EVENT, "new droplet", d.pb.Hostname, "(matches config hypervisor", h.pb.Hostname+")")
|
||||
d.h = h
|
||||
if d.PreferredHypervisor == h.pb.Hostname {
|
||||
log.Log(EVENT, start, "poll shows new droplet", d.Hostname,
|
||||
"(matches config hypervisor", h.pb.Hostname+")")
|
||||
d.Current.Hypervisor = h.pb.Hostname
|
||||
continue
|
||||
}
|
||||
|
||||
log.Log(EVENT, "new droplet", d.pb.Hostname, "on", h.pb.Hostname, "(in config file without preferred hypervisor)")
|
||||
d.h = h
|
||||
log.Log(EVENT, start, "poll shows new droplet (in config file without preferred hypervisor)")
|
||||
d.Current.Hypervisor = h.pb.Hostname
|
||||
continue
|
||||
}
|
||||
|
||||
// this means the droplet is still where it was before
|
||||
if d.h.pb.Hostname != h.pb.Hostname {
|
||||
log.Log(EVENT, "droplet", d.h.pb.Hostname, "moved to", h.pb.Hostname)
|
||||
// if this is blank, the droplet has probably never booted yet
|
||||
if d.Current.Hypervisor == "" {
|
||||
d.Current.Hypervisor = h.pb.Hostname
|
||||
continue
|
||||
}
|
||||
d.h = h
|
||||
|
||||
// this means the droplet has moved
|
||||
if d.Current.Hypervisor != h.pb.Hostname {
|
||||
log.Log(EVENT, "droplet", d.Hostname, "moved to", h.pb.Hostname)
|
||||
// record the droplet migrated (or booted somewhere else? recording this is a work in progress)
|
||||
me.cluster.DropletMoved(d, h.pb)
|
||||
continue
|
||||
}
|
||||
d.Current.Hypervisor = h.pb.Hostname
|
||||
}
|
||||
}
|
||||
|
||||
// these are the droplets that don't exist anymore on this hypervisor
|
||||
// this should mean you ran shutdown within domU
|
||||
for name, t := range h.lastDroplets {
|
||||
dur := time.Since(t)
|
||||
if dur > me.hyperPollDelay {
|
||||
d := me.cluster.FindDropletByName(name)
|
||||
header := d.SprintHeader()
|
||||
if d == nil {
|
||||
log.Info(header, "droplet has probably powered down", name, "but findDroplet returned nil")
|
||||
// should delete this from h.lastDroplets
|
||||
continue
|
||||
}
|
||||
if d.Current.State == virtpb.DropletState_OFF {
|
||||
log.Info(header, "droplet timed out and is off. remove from h.lastDroplets[] slice")
|
||||
delete(h.lastDroplets, name)
|
||||
continue
|
||||
}
|
||||
|
||||
// everthing below here is dumb and needs to be rethought
|
||||
if d.Current.State != virtpb.DropletState_UNKNOWN {
|
||||
d.SetState(virtpb.DropletState_UNKNOWN)
|
||||
log.Info(header, "set state UNKNOWN here", name)
|
||||
}
|
||||
if d.Current.State == virtpb.DropletState_UNKNOWN {
|
||||
if dur > time.Minute*2 {
|
||||
// what this means is the droplet probably wasn't migrated or the migrate failed
|
||||
// where should this be checked? the status needs to be changed to OFF
|
||||
s := virtpb.FormatDuration(dur)
|
||||
log.Info(header, "UNKNOWN state for more than 2 minutes (clearing out ?)", name, s)
|
||||
|
||||
// it might be safe to set the status to OFF here. not really. this poll needs
|
||||
// to be moved somewhere else. there needs to be a new goroutine not tied to the
|
||||
// hypervisor
|
||||
d.SetState(virtpb.DropletState_OFF)
|
||||
}
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
h.lastpoll = time.Now()
|
||||
h.killcount = 0 // poll worked. reset killcount
|
||||
}
|
||||
|
||||
func findDroplet(name string) *DropletT {
|
||||
for _, d := range me.droplets {
|
||||
if d.pb.Hostname == name {
|
||||
return d
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func findHypervisor(name string) *HyperT {
|
||||
for _, h := range me.hypers {
|
||||
if h.pb.Hostname == name {
|
||||
return h
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// check the state of the cluster and return a string
|
||||
// that is intended to be sent to an uptime monitor like Kuma
|
||||
func clusterHealthy() (bool, string) {
|
||||
func uptimeCheck() (bool, string) {
|
||||
var good bool = true
|
||||
var total int
|
||||
var working int
|
||||
var failed int
|
||||
var missing int
|
||||
var missing []*virtpb.Droplet
|
||||
var unknown int
|
||||
var unknownList []string
|
||||
|
||||
for _, d := range me.droplets {
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
total += 1
|
||||
if d.pb.StartState != "ON" {
|
||||
continue
|
||||
}
|
||||
dur := time.Since(d.lastpoll) // Calculate the elapsed time
|
||||
if d.CurrentState == "" {
|
||||
// log.Info("SKIP. hostname has not been polled yet", d.pb.Hostname, d.hname)
|
||||
unknown += 1
|
||||
unknownList = append(unknownList, d.pb.Hostname)
|
||||
if d.StartState != virtpb.DropletState_ON {
|
||||
continue
|
||||
}
|
||||
dur := time.Since(d.Current.LastPoll.AsTime()) // Calculate the elapsed time
|
||||
var hname string
|
||||
if d.h != nil {
|
||||
hname = d.h.pb.Hostname
|
||||
if d.Current.Hypervisor != "" {
|
||||
hname = d.Current.Hypervisor
|
||||
}
|
||||
if d.CurrentState != "ON" {
|
||||
log.Info("BAD STATE", d.pb.StartState, d.pb.Hostname, hname, "CurrentState =", d.CurrentState, shell.FormatDuration(dur))
|
||||
good = false
|
||||
failed += 1
|
||||
} else {
|
||||
dur := time.Since(d.lastpoll) // Calculate the elapsed time
|
||||
if dur > time.Minute {
|
||||
log.Info("GOOD STATE MISSING", d.pb.Hostname, hname, shell.FormatDuration(dur))
|
||||
switch d.Current.State {
|
||||
case virtpb.DropletState_UNKNOWN:
|
||||
// log.Info("SKIP. hostname has not been polled yet", d.Hostname, d.hname)
|
||||
unknown += 1
|
||||
unknownList = append(unknownList, d.Hostname)
|
||||
case virtpb.DropletState_ON:
|
||||
if dur > me.missingDropletTimeout {
|
||||
log.Info("GOOD STATE MISSING", d.Hostname, hname, virtpb.FormatDuration(dur))
|
||||
good = false
|
||||
d.CurrentState = "MISSING"
|
||||
d.SetState(virtpb.DropletState_UNKNOWN)
|
||||
failed += 1
|
||||
continue
|
||||
}
|
||||
l := shell.FormatDuration(dur)
|
||||
l := virtpb.FormatDuration(dur)
|
||||
if l == "" {
|
||||
log.Info("DUR IS EMPTY", dur)
|
||||
missing += 1
|
||||
missing = append(missing, d)
|
||||
continue
|
||||
}
|
||||
working += 1
|
||||
// log.Info("GOOD STATE ON", d.pb.Hostname, d.hname, "dur =", l)
|
||||
// log.Info("GOOD STATE ON", d.Hostname, d.hname, "dur =", l)
|
||||
case virtpb.DropletState_OFF:
|
||||
log.Info("OFF STATE", d.StartState, d.Hostname, hname, virtpb.FormatDuration(dur))
|
||||
good = false
|
||||
failed += 1
|
||||
// missing = append(missing, d)
|
||||
default:
|
||||
log.Info("WTF STATE", d.StartState, d.Hostname, hname, "Current.State =", d.Current.State, virtpb.FormatDuration(dur))
|
||||
good = false
|
||||
failed += 1
|
||||
missing = append(missing, d)
|
||||
}
|
||||
}
|
||||
var summary string = "("
|
||||
summary += fmt.Sprintf("total = %d ", total)
|
||||
summary += fmt.Sprintf("working = %d ", working)
|
||||
if missing > 0 {
|
||||
summary += fmt.Sprintf("missing = %d ", missing)
|
||||
if len(missing) > 0 {
|
||||
summary += fmt.Sprintf("missing = %d ", len(missing))
|
||||
}
|
||||
if unknown > 0 {
|
||||
summary += fmt.Sprintf("unknown = %d ", unknown, unknownList)
|
||||
summary += fmt.Sprintf("unknown = %d %+v", unknown, unknownList)
|
||||
}
|
||||
if failed > 0 {
|
||||
summary += fmt.Sprintf("failed = %d ", failed)
|
||||
|
@ -163,14 +225,19 @@ func clusterHealthy() (bool, string) {
|
|||
summary += "(killcount=" + fmt.Sprintf("%d", me.killcount) + ")"
|
||||
}
|
||||
last := time.Since(me.unstable)
|
||||
if last > 133*time.Second {
|
||||
s := strings.TrimSpace(virtpb.FormatDuration(last))
|
||||
if last > me.clusterStableDuration {
|
||||
// the cluster has not been stable for 10 seconds
|
||||
s := strings.TrimSpace(shell.FormatDuration(last))
|
||||
summary += "(stable=" + s + ")"
|
||||
} else {
|
||||
summary += "(unstable=" + s + ")"
|
||||
}
|
||||
for _, d := range missing {
|
||||
summary += fmt.Sprint("\nmissing droplet: ", d.Hostname, " current state ", d.Current.State)
|
||||
}
|
||||
if good {
|
||||
return good, "GOOD=true " + summary
|
||||
}
|
||||
me.unstable = time.Now()
|
||||
// me.unstable = time.Now()
|
||||
return good, "GOOD=false " + summary
|
||||
}
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/user"
|
||||
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func httpPost(url string, data []byte) ([]byte, error) {
|
||||
var err error
|
||||
var req *http.Request
|
||||
|
||||
req, err = http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
|
||||
|
||||
usr, _ := user.Current()
|
||||
req.Header.Set("author", usr.Username)
|
||||
hostname, _ := os.Hostname()
|
||||
req.Header.Set("hostname", hostname)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return body, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
||||
|
||||
func parseURL() (string, string) {
|
||||
parsedURL, err := url.Parse(argv.Server)
|
||||
if err != nil {
|
||||
fmt.Println("Error parsing URL:", err)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// Extract Host (includes hostname/IP and port if present)
|
||||
host := parsedURL.Host
|
||||
fmt.Println("Host:", host)
|
||||
|
||||
// Extract Hostname (without port)
|
||||
hostname := parsedURL.Hostname()
|
||||
fmt.Println("Hostname:", hostname)
|
||||
|
||||
// Extract Port
|
||||
port := parsedURL.Port()
|
||||
fmt.Println("Port:", port)
|
||||
|
||||
return parsedURL.Hostname(), parsedURL.Port()
|
||||
}
|
||||
|
||||
func gusPost(port string, dest string) ([]byte, error) {
|
||||
var err error
|
||||
var req *http.Request
|
||||
|
||||
gus, _ := parseURL()
|
||||
url := fmt.Sprintf("http://%s:%d/%s?port=%s&dest=%s", gus, 2522, "enable", port, dest)
|
||||
|
||||
data := []byte("hello world")
|
||||
|
||||
req, err = http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
|
||||
|
||||
usr, _ := user.Current()
|
||||
req.Header.Set("author", usr.Username)
|
||||
hostname, _ := os.Hostname()
|
||||
req.Header.Set("hostname", hostname)
|
||||
req.Header.Set("port", port)
|
||||
req.Header.Set("dest", dest)
|
||||
|
||||
log.Printf("gusPust url(%s) port(%s) dest(%s) hostname(%s)\n", url, port, dest, hostname)
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return body, err
|
||||
}
|
||||
return body, nil
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
dirs: "/var/lib/libvirt/images"
|
||||
dirs: "/home/isos"
|
||||
dirs: "/home/nfs"
|
||||
dirs: "/home/ceph"
|
||||
dirs: "/home"
|
|
@ -0,0 +1,44 @@
|
|||
droplets: {
|
||||
hostname: "git.wit.org"
|
||||
cpus: 16
|
||||
memory: 103079215104
|
||||
preferred_hypervisor: "farm04"
|
||||
qemu_machine: "pc-q35-9.0"
|
||||
networks: {
|
||||
mac: "22:22:22:22:22:03"
|
||||
name: ""
|
||||
}
|
||||
disks: {
|
||||
filename: "git.wit.org.qcow2"
|
||||
filepath: "/home/nfs3"
|
||||
}
|
||||
}
|
||||
droplets: {
|
||||
hostname: "go.wit.com"
|
||||
cpus: 2
|
||||
memory: 2147483648
|
||||
preferred_hypervisor: "farm04"
|
||||
qemu_machine: "pc-q35-9.0"
|
||||
networks: {
|
||||
mac: "22:22:22:22:22:05"
|
||||
name: ""
|
||||
}
|
||||
disks: {
|
||||
filename: "go.wit.com.qcow2"
|
||||
filepath: "/home/nfs"
|
||||
}
|
||||
}
|
||||
droplets: {
|
||||
hostname: "wekan.foo.com"
|
||||
cpus: 2
|
||||
memory: 2147483648
|
||||
qemu_machine: "pc-q35-9.1"
|
||||
networks: {
|
||||
mac: "22:22:22:22:22:08"
|
||||
name: ""
|
||||
}
|
||||
disks: {
|
||||
filename: "wekan.foo.com.qcow2"
|
||||
filepath: "/home/nfs"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
events: {
|
||||
droplet: "www.foo.org"
|
||||
start: {
|
||||
seconds: 1729895589
|
||||
nanos: 425114400
|
||||
}
|
||||
field_name: "Droplet.Memory"
|
||||
orig_val: "1073741824"
|
||||
new_val: "2147483648"
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
hypervisors: {
|
||||
uuid: "11111111-2222-3333-4444-555555555555"
|
||||
hostname: "hyper01"
|
||||
active: true
|
||||
cpus: 16
|
||||
memory: 8796093022208
|
||||
comment: "this is a fake hypervisor"
|
||||
autoscan: true
|
||||
}
|
||||
hypervisors: {
|
||||
hostname: "hyper02"
|
||||
active: true
|
||||
cpus: 16
|
||||
memory: 8796093022208
|
||||
comment: "this is a fake hypervisor"
|
||||
autoscan: true
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
Fields in libvirtxml.Domain:
|
||||
Domain: XMLName
|
||||
Domain: Type
|
||||
Domain: ID
|
||||
Domain: Name
|
||||
Domain: UUID
|
||||
Domain: GenID
|
||||
Domain: Title
|
||||
Domain: Description
|
||||
Domain: Metadata
|
||||
Domain: MaximumMemory
|
||||
Domain: Memory
|
||||
Domain: CurrentMemory
|
||||
Domain: BlockIOTune
|
||||
Domain: MemoryTune
|
||||
Domain: MemoryBacking
|
||||
Domain: VCPU
|
||||
Domain: VCPUs
|
||||
Domain: IOThreads
|
||||
Domain: IOThreadIDs
|
||||
Domain: DefaultIOThread
|
||||
Domain: CPUTune
|
||||
Domain: NUMATune
|
||||
Domain: Resource
|
||||
Domain: SysInfo
|
||||
Domain: Bootloader
|
||||
Domain: BootloaderArgs
|
||||
Domain: OS
|
||||
Domain: IDMap
|
||||
Domain: Features
|
||||
Domain: CPU
|
||||
Domain: Clock
|
||||
Domain: OnPoweroff
|
||||
Domain: OnReboot
|
||||
Domain: OnCrash
|
||||
Domain: PM
|
||||
Domain: Perf
|
||||
Domain: Devices
|
||||
Domain: SecLabel
|
||||
Domain: KeyWrap
|
||||
Domain: LaunchSecurity
|
||||
Domain: QEMUCommandline
|
||||
Domain: QEMUCapabilities
|
||||
Domain: QEMUOverride
|
||||
Domain: QEMUDeprecation
|
||||
Domain: LXCNamespace
|
||||
Domain: BHyveCommandline
|
||||
Domain: VMWareDataCenterPath
|
||||
Domain: XenCommandline
|
||||
Fields in libvirtxml.DomainDeviceList:
|
||||
DomainDeviceList: Emulator
|
||||
DomainDeviceList: Disks
|
||||
DomainDeviceList: Controllers
|
||||
DomainDeviceList: Leases
|
||||
DomainDeviceList: Filesystems
|
||||
DomainDeviceList: Interfaces
|
||||
DomainDeviceList: Smartcards
|
||||
DomainDeviceList: Serials
|
||||
DomainDeviceList: Parallels
|
||||
DomainDeviceList: Consoles
|
||||
DomainDeviceList: Channels
|
||||
DomainDeviceList: Inputs
|
||||
DomainDeviceList: TPMs
|
||||
DomainDeviceList: Graphics
|
||||
DomainDeviceList: Sounds
|
||||
DomainDeviceList: Audios
|
||||
DomainDeviceList: Videos
|
||||
DomainDeviceList: Hostdevs
|
||||
DomainDeviceList: RedirDevs
|
||||
DomainDeviceList: RedirFilters
|
||||
DomainDeviceList: Hubs
|
||||
DomainDeviceList: Watchdogs
|
||||
DomainDeviceList: MemBalloon
|
||||
DomainDeviceList: RNGs
|
||||
DomainDeviceList: NVRAM
|
||||
DomainDeviceList: Panics
|
||||
DomainDeviceList: Shmems
|
||||
DomainDeviceList: Memorydevs
|
||||
DomainDeviceList: IOMMU
|
||||
DomainDeviceList: VSock
|
||||
DomainDeviceList: Crypto
|
||||
DomainDeviceList: PStore
|
||||
Fields in libvirtxml.DomainInterface:
|
||||
DomainInterface: XMLName
|
||||
DomainInterface: Managed
|
||||
DomainInterface: TrustGuestRXFilters
|
||||
DomainInterface: MAC
|
||||
DomainInterface: Source
|
||||
DomainInterface: Boot
|
||||
DomainInterface: VLan
|
||||
DomainInterface: VirtualPort
|
||||
DomainInterface: IP
|
||||
DomainInterface: Route
|
||||
DomainInterface: PortForward
|
||||
DomainInterface: Script
|
||||
DomainInterface: DownScript
|
||||
DomainInterface: BackendDomain
|
||||
DomainInterface: Target
|
||||
DomainInterface: Guest
|
||||
DomainInterface: Model
|
||||
DomainInterface: Driver
|
||||
DomainInterface: Backend
|
||||
DomainInterface: FilterRef
|
||||
DomainInterface: Tune
|
||||
DomainInterface: Teaming
|
||||
DomainInterface: Link
|
||||
DomainInterface: MTU
|
||||
DomainInterface: Bandwidth
|
||||
DomainInterface: PortOptions
|
||||
DomainInterface: Coalesce
|
||||
DomainInterface: ROM
|
||||
DomainInterface: ACPI
|
||||
DomainInterface: Alias
|
||||
DomainInterface: Address
|
||||
Fields in libvirtxml.DomainInterfaceSource
|
||||
libvirtxml.DomainInterfaceSource User
|
||||
libvirtxml.DomainInterfaceSource Ethernet
|
||||
libvirtxml.DomainInterfaceSource VHostUser
|
||||
libvirtxml.DomainInterfaceSource Server
|
||||
libvirtxml.DomainInterfaceSource Client
|
||||
libvirtxml.DomainInterfaceSource MCast
|
||||
libvirtxml.DomainInterfaceSource Network
|
||||
libvirtxml.DomainInterfaceSource Bridge
|
||||
libvirtxml.DomainInterfaceSource Internal
|
||||
libvirtxml.DomainInterfaceSource Direct
|
||||
libvirtxml.DomainInterfaceSource Hostdev
|
||||
libvirtxml.DomainInterfaceSource UDP
|
||||
libvirtxml.DomainInterfaceSource VDPA
|
||||
libvirtxml.DomainInterfaceSource Null
|
||||
libvirtxml.DomainInterfaceSource VDS
|
||||
Fields in libvirtxml.DomainInterfaceSourceBridge
|
||||
libvirtxml.DomainInterfaceSourceBridge Bridge
|
|
@ -1,191 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>go.wit.com</name>
|
||||
<uuid>9e795cd7-7142-4757-bef2-f607b4f9944f</uuid>
|
||||
<metadata>
|
||||
<libosinfo:libosinfo xmlns:libosinfo="http://libosinfo.org/xmlns/libvirt/domain/1.0">
|
||||
<libosinfo:os id="http://debian.org/debian/12"/>
|
||||
</libosinfo:libosinfo>
|
||||
</metadata>
|
||||
<memory unit='KiB'>2097152</memory>
|
||||
<currentMemory unit='KiB'>2097152</currentMemory>
|
||||
<vcpu placement='static'>2</vcpu>
|
||||
<resource>
|
||||
<partition>/machine</partition>
|
||||
</resource>
|
||||
<os>
|
||||
<type arch='x86_64' machine='pc-q35-9.0'>hvm</type>
|
||||
<boot dev='hd'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
<apic/>
|
||||
<vmport state='off'/>
|
||||
</features>
|
||||
<cpu mode='host-passthrough' check='none' migratable='on'/>
|
||||
<clock offset='utc'>
|
||||
<timer name='rtc' tickpolicy='catchup'/>
|
||||
<timer name='pit' tickpolicy='delay'/>
|
||||
<timer name='hpet' present='no'/>
|
||||
</clock>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>destroy</on_crash>
|
||||
<pm>
|
||||
<suspend-to-mem enabled='no'/>
|
||||
<suspend-to-disk enabled='no'/>
|
||||
</pm>
|
||||
<devices>
|
||||
<emulator>/usr/bin/qemu-system-x86_64</emulator>
|
||||
<disk type='file' device='disk'>
|
||||
<driver name='qemu' type='qcow2' />
|
||||
<source file='/home/go.wit.com.qcow2'/>
|
||||
<backingStore/>
|
||||
<target dev='sda' bus='sata'/>
|
||||
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
|
||||
</disk>
|
||||
<controller type='usb' index='0' model='qemu-xhci' ports='15'>
|
||||
<address type='pci' domain='0x0000' bus='0x02' slot='0x00' function='0x0'/>
|
||||
</controller>
|
||||
<controller type='pci' index='0' model='pcie-root'/>
|
||||
<controller type='pci' index='1' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='1' port='0x10'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0' multifunction='on'/>
|
||||
</controller>
|
||||
<controller type='pci' index='2' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='2' port='0x11'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x1'/>
|
||||
</controller>
|
||||
<controller type='pci' index='3' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='3' port='0x12'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x2'/>
|
||||
</controller>
|
||||
<controller type='pci' index='4' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='4' port='0x13'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x3'/>
|
||||
</controller>
|
||||
<controller type='pci' index='5' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='5' port='0x14'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x4'/>
|
||||
</controller>
|
||||
<controller type='pci' index='6' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='6' port='0x15'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x5'/>
|
||||
</controller>
|
||||
<controller type='pci' index='7' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='7' port='0x16'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x6'/>
|
||||
</controller>
|
||||
<controller type='pci' index='8' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='8' port='0x17'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x7'/>
|
||||
</controller>
|
||||
<controller type='pci' index='9' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='9' port='0x18'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0' multifunction='on'/>
|
||||
</controller>
|
||||
<controller type='pci' index='10' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='10' port='0x19'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x1'/>
|
||||
</controller>
|
||||
<controller type='pci' index='11' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='11' port='0x1a'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x2'/>
|
||||
</controller>
|
||||
<controller type='pci' index='12' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='12' port='0x1b'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x3'/>
|
||||
</controller>
|
||||
<controller type='pci' index='13' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='13' port='0x1c'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x4'/>
|
||||
</controller>
|
||||
<controller type='pci' index='14' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='14' port='0x1d'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x5'/>
|
||||
</controller>
|
||||
<controller type='pci' index='15' model='pcie-root-port'>
|
||||
<model name='pcie-root-port'/>
|
||||
<target chassis='15' port='0x1e'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x6'/>
|
||||
</controller>
|
||||
<controller type='pci' index='16' model='pcie-to-pci-bridge'>
|
||||
<model name='pcie-pci-bridge'/>
|
||||
<address type='pci' domain='0x0000' bus='0x04' slot='0x00' function='0x0'/>
|
||||
</controller>
|
||||
<controller type='sata' index='0'>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/>
|
||||
</controller>
|
||||
<controller type='virtio-serial' index='0'>
|
||||
<address type='pci' domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
|
||||
</controller>
|
||||
<controller type='scsi' index='0' model='lsilogic'>
|
||||
<address type='pci' domain='0x0000' bus='0x10' slot='0x01' function='0x0'/>
|
||||
</controller>
|
||||
<interface type='bridge'>
|
||||
<mac address='22:22:22:22:22:22'/>
|
||||
<source bridge='worldbr'/>
|
||||
<model type='virtio'/>
|
||||
<address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
|
||||
</interface>
|
||||
<serial type='pty'>
|
||||
<target type='isa-serial' port='0'>
|
||||
<model name='isa-serial'/>
|
||||
</target>
|
||||
</serial>
|
||||
<console type='pty'>
|
||||
<target type='serial' port='0'/>
|
||||
</console>
|
||||
<channel type='unix'>
|
||||
<target type='virtio' name='org.qemu.guest_agent.0'/>
|
||||
<address type='virtio-serial' controller='0' bus='0' port='1'/>
|
||||
</channel>
|
||||
<channel type='spicevmc'>
|
||||
<target type='virtio' name='com.redhat.spice.0'/>
|
||||
<address type='virtio-serial' controller='0' bus='0' port='2'/>
|
||||
</channel>
|
||||
<input type='tablet' bus='usb'>
|
||||
<address type='usb' bus='0' port='1'/>
|
||||
</input>
|
||||
<input type='mouse' bus='ps2'/>
|
||||
<graphics type='spice' autoport='yes'>
|
||||
<listen type='address'/>
|
||||
<image compression='off'/>
|
||||
</graphics>
|
||||
<sound model='ich9'>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x1b' function='0x0'/>
|
||||
</sound>
|
||||
<video>
|
||||
<model type='virtio' heads='1' primary='yes'/>
|
||||
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x0'/>
|
||||
</video>
|
||||
<redirdev bus='usb' type='spicevmc'>
|
||||
<address type='usb' bus='0' port='2'/>
|
||||
</redirdev>
|
||||
<redirdev bus='usb' type='spicevmc'>
|
||||
<address type='usb' bus='0' port='3'/>
|
||||
</redirdev>
|
||||
<memballoon model='virtio'>
|
||||
<address type='pci' domain='0x0000' bus='0x05' slot='0x00' function='0x0'/>
|
||||
</memballoon>
|
||||
<rng model='virtio'>
|
||||
<backend model='random'>/dev/urandom</backend>
|
||||
<address type='pci' domain='0x0000' bus='0x06' slot='0x00' function='0x0'/>
|
||||
</rng>
|
||||
</devices>
|
||||
<seclabel type='dynamic' model='apparmor' relabel='yes'/>
|
||||
</domain>
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
<domain type='kvm'>
|
||||
<name>jcarr</name>
|
||||
<uuid>c8684902-c405-4f31-b532-128c277056cc</uuid>
|
||||
<memory unit='GiB'>4</memory>
|
||||
<vcpu placement='static'>8</vcpu>
|
||||
</domain>
|
|
@ -0,0 +1,114 @@
|
|||
package main
|
||||
|
||||
// validates the droplet information
|
||||
// finds a hypervisor
|
||||
// attempts to start the virtual machine
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func isClusterStable() (string, error) {
|
||||
// how long has the cluster been stable?
|
||||
// wait until it is stable. use this to throttle droplet starts
|
||||
dur := time.Since(me.unstable)
|
||||
good := fmt.Sprintln("trying to start droplet here. grid stable for: ", virtpb.FormatDuration(dur))
|
||||
if dur < me.unstableTimeout {
|
||||
tmp := virtpb.FormatDuration(me.unstableTimeout)
|
||||
err := "grid is still too unstable (unstable timeout = " + tmp + ")\n"
|
||||
return good + err, errors.New(err)
|
||||
}
|
||||
return good, nil
|
||||
}
|
||||
|
||||
// for now, because sometimes this should write to stdout and
|
||||
// sometimes to http socket, it returns a string
|
||||
func Start(id string) (string, error) {
|
||||
var result string
|
||||
|
||||
if s, err := isClusterStable(); err != nil {
|
||||
result += s
|
||||
return result, err
|
||||
}
|
||||
|
||||
// lookup the droplet by name
|
||||
d := me.cluster.FindDropletByUuid(id)
|
||||
if d == nil {
|
||||
result = "can't start unknown droplet: " + id
|
||||
return result, errors.New(result)
|
||||
}
|
||||
|
||||
// validate the droplet
|
||||
if err := ValidateDroplet(d); err != nil {
|
||||
log.Info("ValidateDroplet() failed", err)
|
||||
result = "ValidateDroplet() failed droplet " + d.Hostname
|
||||
return result, err
|
||||
}
|
||||
|
||||
if d.Current == nil {
|
||||
d.Current = new(virtpb.Current)
|
||||
}
|
||||
|
||||
// is the droplet already on?
|
||||
if d.Current.State == virtpb.DropletState_ON {
|
||||
result = "EVENT start droplet " + d.Hostname + " is already ON"
|
||||
return result, errors.New(result)
|
||||
}
|
||||
|
||||
// make the list of hypervisors that are active and can start new droplets
|
||||
var pool []*HyperT
|
||||
for _, h := range me.hypers {
|
||||
// this droplet is set to use this and only this hypervisor
|
||||
if d.ForceHypervisor == h.pb.Hostname {
|
||||
ok, b := h.start(d)
|
||||
if ok {
|
||||
return result + b, nil
|
||||
}
|
||||
return result + b, errors.New("start " + d.Hostname + " on hypervisor " + h.pb.Hostname)
|
||||
}
|
||||
|
||||
// skip hypervisors marked inactive
|
||||
if h.pb.Active != true {
|
||||
result += fmt.Sprintln("hypervisor is inactive:", d.Hostname, "for", h.pb.Hostname, h.pb.Active)
|
||||
continue
|
||||
}
|
||||
|
||||
// the config file says this droplet should run on this hypervisor
|
||||
// attempt to start the droplet here. use this even if the hypervisor is inactive?
|
||||
if d.PreferredHypervisor == h.pb.Hostname {
|
||||
ok, b := h.start(d)
|
||||
if ok {
|
||||
return result + b, nil
|
||||
}
|
||||
return result + b, errors.New("start " + d.Hostname + " on hypervisor " + h.pb.Hostname)
|
||||
}
|
||||
|
||||
result += fmt.Sprintln("hypervisor ready:", d.Hostname, "for", h.pb.Hostname, h.pb.Active)
|
||||
pool = append(pool, h)
|
||||
}
|
||||
|
||||
// left here as an example of how to actually do random numbers
|
||||
// it's complete mathematical chaos. Randomness is simple when
|
||||
// human interaction occurs -- which is exactly what happens most
|
||||
// of the time. most random shit is bullshit. all you really need
|
||||
// is exactly this to make sure the random functions work as they
|
||||
// should. Probably, just use this everywhere in all cases. --jcarr
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
a := 0
|
||||
b := len(pool)
|
||||
n := a + rand.Intn(b-a)
|
||||
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
|
||||
h := pool[n]
|
||||
|
||||
ok, output := h.start(d)
|
||||
if ok {
|
||||
return result + output, nil
|
||||
}
|
||||
return result + output, errors.New("start " + d.Hostname + " on hypervisor " + h.pb.Hostname)
|
||||
}
|
67
structs.go
67
structs.go
|
@ -1,13 +1,17 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
pb "go.wit.com/lib/protobuf/virtbuf"
|
||||
"libvirt.org/go/libvirtxml"
|
||||
"go.wit.com/dev/alexflint/arg"
|
||||
"go.wit.com/gui"
|
||||
"go.wit.com/lib/gadgets"
|
||||
"go.wit.com/lib/gui/prep"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
)
|
||||
|
||||
var me virtigoT
|
||||
var me *virtigoT
|
||||
|
||||
// disable the GUI
|
||||
func (b *virtigoT) Disable() {
|
||||
|
@ -21,30 +25,43 @@ func (b *virtigoT) Enable() {
|
|||
|
||||
// this app's variables
|
||||
type virtigoT struct {
|
||||
cluster *pb.Cluster
|
||||
names []string
|
||||
hypers []*HyperT
|
||||
droplets []*DropletT
|
||||
delay time.Duration // how often to poll the hypervisors
|
||||
killcount int
|
||||
unstable time.Time // the last time the cluster was incorrect
|
||||
changed bool
|
||||
pp *arg.Parser // go-arg parser
|
||||
myGui *prep.GuiPrep // the gui toolkit handle
|
||||
e *virtpb.Events // virt protobuf events
|
||||
hmap map[*virtpb.Hypervisor]*HyperT // map to the local struct
|
||||
names []string // ?
|
||||
hypers []*HyperT // notsure
|
||||
killcount int // how many times virtigo-d has had to been killed
|
||||
unstable time.Time // the last time the cluster was incorrect
|
||||
changed bool // have things changed?
|
||||
hyperPollDelay time.Duration // how often to poll the hypervisors
|
||||
unstableTimeout time.Duration // how long a droplet can be unstable until it's declared dead
|
||||
clusterStableDuration time.Duration // how long the cluster must be stable before new droplets can be started
|
||||
missingDropletTimeout time.Duration // how long a droplet can be missing for
|
||||
status *gui.Node // the cluster status
|
||||
lastuptime *gui.Node // the last time uptime was checked by Kuma
|
||||
clusters *virtpb.Clusters // clusters protobuf
|
||||
cmap map[*virtpb.Cluster]*adminT // map to local GUI objects and the protobuf
|
||||
gwin *gadgets.GenericWindow // main window
|
||||
cluster *virtpb.OldCluster // basic cluster settings
|
||||
// admin *adminT // the admin struct
|
||||
}
|
||||
|
||||
// cluster "admin" mode
|
||||
type adminT struct {
|
||||
cluster *virtpb.Cluster // the cluster protobuf
|
||||
uptime *gui.Node // the uptime message
|
||||
dwin *stdDropletTableWin // the droplet window
|
||||
hwin *stdHypervisorTableWin // the hypervisor window
|
||||
ewin *stdEventTableWin // the events window
|
||||
url *url.URL // URL for the cloud
|
||||
}
|
||||
|
||||
// the stuff that is needed for a hypervisor
|
||||
type HyperT struct {
|
||||
pb *pb.Hypervisor // the Hypervisor protobuf
|
||||
dog *time.Ticker // the watchdog timer itself
|
||||
lastpoll time.Time // the last time the hypervisor polled
|
||||
killcount int
|
||||
}
|
||||
|
||||
// the stuff that is needed for a hypervisor
|
||||
type DropletT struct {
|
||||
pb *pb.Droplet // the Droplet protobuf
|
||||
xml *libvirtxml.Domain // a xml representation from libvirt
|
||||
h *HyperT // the hypervisor it's currently running on
|
||||
CurrentState string // what the state of the droplet is ACTUALLY IS
|
||||
lastpoll time.Time // the last time the droplet was seen running
|
||||
starts int // how many times a start event has been attempted
|
||||
pb *virtpb.Hypervisor // the Hypervisor protobuf
|
||||
dog *time.Ticker // the watchdog timer itself
|
||||
lastpoll time.Time // the last time the hypervisor polled
|
||||
lastDroplets map[string]time.Time // the vm's in the last poll
|
||||
killcount int // how many times the daemon has been forcably killed
|
||||
}
|
||||
|
|
|
@ -0,0 +1,372 @@
|
|||
package main
|
||||
|
||||
/*
|
||||
validate / sanity check / consistancy check the data
|
||||
|
||||
here is some code to do smart things like:
|
||||
|
||||
* check mac addresses are unique
|
||||
* check uuid's are unique
|
||||
* double check filenames are unique
|
||||
* return a unique mac address
|
||||
* return a unique uuid
|
||||
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
// will make sure the mac address is unique
|
||||
func ValidateUniqueMac(mac string) bool {
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
for _, n := range d.Networks {
|
||||
if n.Mac == mac {
|
||||
log.Info("duplicate MAC", n.Mac, "in droplet", d.Hostname)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// records all the known paths. this should go in the protobuf
|
||||
func addClusterFilepath(dir string) *virtpb.Event {
|
||||
var found bool = false
|
||||
var e *virtpb.Event
|
||||
for _, d := range me.cluster.Dirs {
|
||||
if d == dir {
|
||||
// found dir
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if dir != "." {
|
||||
// make a new Add Event
|
||||
e = virtpb.NewAddEvent(nil, "Add Cluster Directory", dir)
|
||||
me.cluster.Dirs = append(me.cluster.Dirs, dir)
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// returns the droplet using a filename
|
||||
func lookupFilename(filename string) *virtpb.Droplet {
|
||||
filebase := filepath.Base(filename)
|
||||
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
for _, disk := range d.Disks {
|
||||
if filebase == disk.Filename {
|
||||
return d
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateUniqueFilenames() bool {
|
||||
var ok bool = true
|
||||
var disks map[string]string
|
||||
disks = make(map[string]string)
|
||||
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
for _, disk := range d.Disks {
|
||||
filename := disk.Filename
|
||||
addClusterFilepath(disk.Filepath)
|
||||
if _, ok := disks[filename]; ok {
|
||||
/*
|
||||
if argv.IgnDisk {
|
||||
log.Info("ignore dup disk", filename, disks[filename], d.Hostname)
|
||||
} else {
|
||||
}
|
||||
*/
|
||||
log.Info("file", filename, "on droplet", disks[filename])
|
||||
log.Info("file", filename, "on droplet", d.Hostname)
|
||||
log.Info("duplicate disk names (--xml-ignore-disk to ignore)")
|
||||
ok = false
|
||||
}
|
||||
disks[filename] = d.Hostname
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
log.Println("validated okay: no duplicate disk images")
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
func ValidateDiskFilenames() ([]*virtpb.Event, error) {
|
||||
var alle []*virtpb.Event
|
||||
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
var found bool = false
|
||||
for _, disk := range d.Disks {
|
||||
filename := disk.Filename
|
||||
filebase := filepath.Base(filename)
|
||||
dir := filepath.Dir(filename)
|
||||
addClusterFilepath(dir)
|
||||
if disk.Filename != filebase {
|
||||
// update filename
|
||||
e := d.NewChangeEvent("Disk.Filename", disk.Filename, filebase)
|
||||
alle = append(alle, e)
|
||||
disk.Filename = filebase
|
||||
}
|
||||
// make sure the filename is the hostname + .qcow2
|
||||
filetype := filepath.Ext(filebase)
|
||||
if filetype == ".img" {
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
if filetype != ".qcow2" {
|
||||
log.Info("file type", filetype, "not supported for", filebase, "on", d.Hostname)
|
||||
return nil, errors.New("only supporting qcow2 images for now")
|
||||
}
|
||||
test := strings.TrimSuffix(filebase, filetype)
|
||||
if test == d.Hostname {
|
||||
found = true
|
||||
}
|
||||
if dir == "." {
|
||||
continue
|
||||
}
|
||||
if dir == "" {
|
||||
continue
|
||||
}
|
||||
if disk.Filepath != dir {
|
||||
// update filename
|
||||
e := d.NewChangeEvent("Disk.Filepath", disk.Filepath, dir)
|
||||
alle = append(alle, e)
|
||||
disk.Filepath = dir
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
log.Info("droplet", d.Hostname, d.Disks)
|
||||
log.Warn("droplet " + d.Hostname + " has nonstandard disk names")
|
||||
}
|
||||
}
|
||||
return alle, nil
|
||||
}
|
||||
|
||||
// consistancy check. run on a regular basis
|
||||
//
|
||||
// runs on startup. dies if there are duplicates
|
||||
// the config file must then be edited by hand for now
|
||||
func ValidateDroplets() (map[string]string, map[string]string, error) {
|
||||
// uuid map to check for duplicates
|
||||
var umap map[string]string
|
||||
umap = make(map[string]string)
|
||||
|
||||
// mac address map to check for duplicates
|
||||
var macs map[string]string
|
||||
macs = make(map[string]string)
|
||||
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
// Generate a new UUID
|
||||
if d.Uuid == "" {
|
||||
u := uuid.New()
|
||||
d.Uuid = u.String()
|
||||
}
|
||||
|
||||
// seconds, ok := timeZone[tz]; ok {
|
||||
if _, ok := umap[d.Uuid]; ok {
|
||||
// UUID already exists
|
||||
log.Info("duplicate UUID", d.Uuid, umap[d.Uuid])
|
||||
log.Info("duplicate UUID", d.Uuid, d.Hostname)
|
||||
if d.Archive == nil {
|
||||
d.Archive = new(virtpb.Archive)
|
||||
log.Info("d.Archive was nil for", d.Hostname)
|
||||
// os.Exit(-1)
|
||||
}
|
||||
d.Archive.Reason = virtpb.DropletArchive_DUP
|
||||
continue
|
||||
// return umap, macs, errors.New("duplicate UUID: " + d.Uuid)
|
||||
}
|
||||
umap[d.Uuid] = d.Hostname
|
||||
|
||||
for _, n := range d.Networks {
|
||||
// log.Println("network:", n.Mac, d.Uuid, d.Hostname)
|
||||
if _, ok := macs[n.Mac]; ok {
|
||||
// UUID already exists
|
||||
log.Info("duplicate MAC", n.Mac, macs[n.Mac], umap[macs[n.Mac]])
|
||||
log.Info("duplicate MAC", n.Mac, d.Hostname)
|
||||
return umap, macs, errors.New("duplicate MAC: " + n.Mac)
|
||||
}
|
||||
macs[n.Mac] = d.Uuid
|
||||
}
|
||||
}
|
||||
log.Println("validated okay: no duplicate MAC addr")
|
||||
log.Println("validated okay: no duplicate UUID")
|
||||
|
||||
return umap, macs, nil
|
||||
}
|
||||
|
||||
func searchForDuplicateUUIDs() {
|
||||
// var broken int
|
||||
}
|
||||
|
||||
/*
|
||||
// remove from the slice
|
||||
func deleteDroplet(bad int) {
|
||||
var all *virtpb.Droplets
|
||||
all = me.cluster.DeleteDroplet(b *db.Droplet)
|
||||
|
||||
fmt.Println("deleting", bad, all.Droplets[bad].Hostname)
|
||||
|
||||
// Check if the index is within bounds
|
||||
if bad >= 0 && bad < len(all.Droplets) {
|
||||
// Remove element at targetIndex
|
||||
all.Droplets = append(all.Droplets[:bad], all.Droplets[bad+1:]...)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
// checks a droplet right before a start event
|
||||
// verify ethernet mac address
|
||||
// verify uuid (but probably can ignore this since it's not used)
|
||||
// check qemu domain id
|
||||
// check spice and vnc ports
|
||||
// check filenames
|
||||
func ValidateDroplet(check *virtpb.Droplet) error {
|
||||
// check for duplicate uuid's
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
if check == d {
|
||||
continue
|
||||
}
|
||||
if d.Uuid == check.Uuid {
|
||||
// UUID already exists
|
||||
log.Info("duplicate UUID", d.Uuid, d.Hostname)
|
||||
log.Info("duplicate UUID", d.Uuid, check.Hostname)
|
||||
// d.Archive = new(virtpb.DropletArchive)
|
||||
if d.Archive == nil {
|
||||
log.Info("d.Archive == nil")
|
||||
os.Exit(-1)
|
||||
}
|
||||
d.Archive.Reason = virtpb.DropletArchive_DUP
|
||||
// return errors.New("duplicate UUID: " + d.Uuid)
|
||||
}
|
||||
}
|
||||
|
||||
// check for duplicate mac addresses
|
||||
for _, checkn := range check.Networks {
|
||||
log.Info("found mac = ", checkn.Mac, check.Hostname)
|
||||
if checkn.Mac == "" {
|
||||
checkn.Mac = getNewMac()
|
||||
if err := me.cluster.ConfigSave(); err != nil {
|
||||
log.Info("configsave error", err)
|
||||
os.Exit(-1)
|
||||
}
|
||||
}
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
if check == d {
|
||||
continue
|
||||
}
|
||||
for _, n := range d.Networks {
|
||||
if checkn.Mac == n.Mac {
|
||||
// MAC already exists
|
||||
log.Info("duplicate MAC", n.Mac, d.Hostname)
|
||||
log.Info("duplicate MAC", n.Mac, check.Hostname)
|
||||
return errors.New("duplicate MAC: " + n.Mac)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := setUniqueSpicePort(check); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setUniqueSpicePort(check *virtpb.Droplet) error {
|
||||
var ports map[int64]*virtpb.Droplet
|
||||
ports = make(map[int64]*virtpb.Droplet)
|
||||
|
||||
// check spice ports
|
||||
// checkn.SpicePort = getUniqueSpicePort()
|
||||
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||
for loop.Scan() {
|
||||
d := loop.Next()
|
||||
if d.SpicePort == 0 {
|
||||
continue
|
||||
}
|
||||
if dup, ok := ports[d.SpicePort]; ok {
|
||||
// dup := ports[d.SpicePort]
|
||||
log.Warn("duplicate ports", d.SpicePort, d.Hostname, d.Current.State)
|
||||
if d.Current.State != virtpb.DropletState_ON {
|
||||
// hack for now. should be safe to erase this
|
||||
d.SpicePort = 0
|
||||
log.Warn("erasing port for non-ON droplet", d.SpicePort, d.Hostname, d.Current.State)
|
||||
}
|
||||
log.Warn("duplicate ports", dup.SpicePort, dup.Hostname, dup.Current.State)
|
||||
if dup.Current.State != virtpb.DropletState_ON {
|
||||
// hack for now. should be safe to erase this
|
||||
dup.SpicePort = 0
|
||||
log.Warn("erasing port for non-ON droplet", dup.SpicePort, dup.Hostname, dup.Current.State)
|
||||
}
|
||||
// todo: fix this somewhow
|
||||
return errors.New("duplicate ports")
|
||||
}
|
||||
ports[d.SpicePort] = d
|
||||
}
|
||||
|
||||
for p, d := range ports {
|
||||
log.Info("found spice port", p, "on", d.Hostname)
|
||||
}
|
||||
|
||||
var start int64
|
||||
start = 5910
|
||||
for {
|
||||
if start == 6000 {
|
||||
// x11 might use this on dom0's running a desktop
|
||||
// maybe qemu uses it iternally
|
||||
start += 1
|
||||
continue
|
||||
}
|
||||
if _, ok := ports[start]; ok {
|
||||
d := ports[start]
|
||||
log.Info("already using port", start, "on", d.Hostname)
|
||||
if d == check {
|
||||
log.Info("this is good because it's me!", check.Hostname, d.Hostname)
|
||||
return nil
|
||||
}
|
||||
start += 1
|
||||
continue
|
||||
}
|
||||
// generate change port event
|
||||
log.Info("going to try port", start, "on", check.Hostname)
|
||||
e := check.NewChangeEvent("SpicePort", check.SpicePort, start)
|
||||
me.cluster.AddEvent(e)
|
||||
|
||||
// set port to start
|
||||
check.SpicePort = start
|
||||
|
||||
// write out config file
|
||||
if err := me.cluster.ConfigSave(); err != nil {
|
||||
log.Info("config save error inside here is bad", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -15,7 +15,7 @@ func TimeFunction(f func()) time.Duration {
|
|||
}
|
||||
|
||||
func (h *HyperT) NewWatchdog() {
|
||||
h.dog = time.NewTicker(me.delay)
|
||||
h.dog = time.NewTicker(me.hyperPollDelay)
|
||||
defer h.dog.Stop()
|
||||
done := make(chan bool)
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
// An app to submit patches for the 30 GO GUI repos
|
||||
|
||||
import (
|
||||
"go.wit.com/lib/gadgets"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func createWindow() *gadgets.GenericWindow {
|
||||
createWindow := gadgets.NewGenericWindow("Create Droplet", "settings")
|
||||
createWindow.Custom = func() {
|
||||
log.Warn("create window close")
|
||||
}
|
||||
|
||||
grid := createWindow.Group.RawGrid()
|
||||
|
||||
gadgets.NewBasicEntry(grid, "memory")
|
||||
grid.NextRow()
|
||||
|
||||
grid.NewLabel("name")
|
||||
grid.NewTextbox("something")
|
||||
grid.NextRow()
|
||||
|
||||
grid.NewButton("Start", func() {
|
||||
log.Info("make a box")
|
||||
})
|
||||
|
||||
return createWindow
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
// An app to submit patches for the 30 GO GUI repos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"go.wit.com/gui"
|
||||
"go.wit.com/lib/gadgets"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func (admin *adminT) createDropletWindow() *gadgets.GenericWindow {
|
||||
d := new(virtpb.Droplet)
|
||||
|
||||
win := gadgets.NewGenericWindow("Create Droplet "+d.Hostname, "settings")
|
||||
win.Custom = func() {
|
||||
log.Warn("edit window close")
|
||||
}
|
||||
|
||||
grid := win.Group.RawGrid()
|
||||
|
||||
var save *gui.Node
|
||||
|
||||
grid.NewLabel("name")
|
||||
name := grid.NewTextbox("new2.wit.com")
|
||||
d.Hostname = "new2.wit.com"
|
||||
name.SetText(d.Hostname)
|
||||
name.Custom = func() {
|
||||
if d.Hostname == name.String() {
|
||||
return
|
||||
}
|
||||
d.Hostname = name.String()
|
||||
log.Info("changed droplet name to", d.Hostname)
|
||||
save.Enable()
|
||||
}
|
||||
grid.NextRow()
|
||||
|
||||
mem := gadgets.NewBasicEntry(grid, "memory (GB)")
|
||||
mem.SetText("16")
|
||||
d.Memory = int64(16 * 1024 * 2024 * 1024)
|
||||
grid.NextRow()
|
||||
mem.Custom = func() {
|
||||
newmem, err := strconv.Atoi(mem.String())
|
||||
if err != nil {
|
||||
log.Info("mem value error", mem.String(), err)
|
||||
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
|
||||
return
|
||||
}
|
||||
if newmem < 1 {
|
||||
log.Info("mem can not be < 1")
|
||||
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
|
||||
return
|
||||
}
|
||||
d.Memory = int64(newmem * (1024 * 2024 * 1024))
|
||||
log.Info("changed mem value. new val =", d.Memory)
|
||||
|
||||
save.Enable()
|
||||
}
|
||||
grid.NextRow() // each entry is on it's own row
|
||||
|
||||
cpus := gadgets.NewBasicEntry(grid, "cpus")
|
||||
cpus.SetText("4")
|
||||
d.Cpus = int64(4)
|
||||
cpus.Custom = func() {
|
||||
newcpu, err := strconv.Atoi(cpus.String())
|
||||
if err != nil {
|
||||
log.Info("cpus value error", cpus.String(), err)
|
||||
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
|
||||
return
|
||||
}
|
||||
if newcpu < 1 {
|
||||
log.Info("cpus can not be < 1")
|
||||
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
|
||||
return
|
||||
}
|
||||
d.Cpus = int64(newcpu)
|
||||
log.Info("changed cpus value. new val =", d.Cpus)
|
||||
|
||||
save.Enable()
|
||||
}
|
||||
grid.NextRow() // each entry is on it's own row
|
||||
|
||||
/*
|
||||
save = grid.NewButton("postEvent() EDIT", func() {
|
||||
log.Info("save droplet changes here")
|
||||
|
||||
e := new(virtpb.Event)
|
||||
e.Etype = virtpb.EventType_EDIT
|
||||
e.Droplet = d
|
||||
|
||||
if err := admin.postEvent(e); err != nil {
|
||||
log.Info("event edit err", err)
|
||||
} else {
|
||||
log.Info("admin.postEvent() worked (?)")
|
||||
}
|
||||
})
|
||||
*/
|
||||
|
||||
save = grid.NewButton("Create", func() {
|
||||
log.Info("save droplet changes here")
|
||||
|
||||
e := new(virtpb.Event)
|
||||
e.Etype = virtpb.EventType_ADD
|
||||
e.Droplet = d
|
||||
|
||||
if err := admin.postEvent(e); err != nil {
|
||||
log.Info("event edit err", err)
|
||||
} else {
|
||||
log.Info("admin.postEvent() worked (?)")
|
||||
}
|
||||
})
|
||||
|
||||
// save.Disable()
|
||||
return win
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
// An app to submit patches for the 30 GO GUI repos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"go.wit.com/gui"
|
||||
"go.wit.com/lib/gadgets"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
func (admin *adminT) editDropletWindow(d *virtpb.Droplet) *gadgets.GenericWindow {
|
||||
win := gadgets.NewGenericWindow("Edit Droplet "+d.Hostname, "settings")
|
||||
win.Custom = func() {
|
||||
log.Warn("edit window close")
|
||||
}
|
||||
|
||||
grid := win.Group.RawGrid()
|
||||
|
||||
var save *gui.Node
|
||||
|
||||
grid.NewLabel("name")
|
||||
name := grid.NewTextbox("something")
|
||||
name.SetText(d.Hostname)
|
||||
name.Custom = func() {
|
||||
if d.Hostname == name.String() {
|
||||
return
|
||||
}
|
||||
d.Hostname = name.String()
|
||||
log.Info("changed droplet name to", d.Hostname)
|
||||
save.Enable()
|
||||
}
|
||||
grid.NextRow()
|
||||
|
||||
mem := gadgets.NewBasicEntry(grid, "memory (GB)")
|
||||
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
|
||||
grid.NextRow()
|
||||
mem.Custom = func() {
|
||||
newmem, err := strconv.Atoi(mem.String())
|
||||
if err != nil {
|
||||
log.Info("mem value error", mem.String(), err)
|
||||
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
|
||||
return
|
||||
}
|
||||
if newmem < 1 {
|
||||
log.Info("mem can not be < 1")
|
||||
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
|
||||
return
|
||||
}
|
||||
d.Memory = int64(newmem * (1024 * 2024 * 1024))
|
||||
log.Info("changed mem value. new val =", d.Memory)
|
||||
|
||||
save.Enable()
|
||||
}
|
||||
|
||||
cpus := gadgets.NewBasicEntry(grid, "cpus")
|
||||
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
|
||||
grid.NextRow()
|
||||
cpus.Custom = func() {
|
||||
newcpu, err := strconv.Atoi(cpus.String())
|
||||
if err != nil {
|
||||
log.Info("cpus value error", cpus.String(), err)
|
||||
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
|
||||
return
|
||||
}
|
||||
if newcpu < 1 {
|
||||
log.Info("cpus can not be < 1")
|
||||
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
|
||||
return
|
||||
}
|
||||
d.Cpus = int64(newcpu)
|
||||
log.Info("changed cpus value. new val =", d.Cpus)
|
||||
|
||||
save.Enable()
|
||||
}
|
||||
|
||||
grid.NewLabel("hypervisor")
|
||||
hyper := grid.NewDropdown()
|
||||
hyper.AddText("farm03")
|
||||
hyper.AddText("farm04")
|
||||
hyper.AddText("farm05")
|
||||
if d.Current != nil {
|
||||
hyper.SetText(d.Current.Hypervisor)
|
||||
} else {
|
||||
hyper.SetText("farm03")
|
||||
}
|
||||
grid.NextRow()
|
||||
|
||||
grid.NewButton("Start", func() {
|
||||
log.Info("make a box")
|
||||
})
|
||||
|
||||
save = grid.NewButton("save", func() {
|
||||
log.Info("save droplet changes here")
|
||||
|
||||
e := new(virtpb.Event)
|
||||
e.Etype = virtpb.EventType_EDIT
|
||||
e.Droplet = d
|
||||
/*
|
||||
e.Droplet = new(virtpb.Droplet)
|
||||
e.Droplet.Uuid = d.Uuid
|
||||
e.Droplet.Cpus = 4
|
||||
e.Droplet.Memory = 8 * (1024 * 1024 * 1024)
|
||||
e.Droplet.Hostname = name.String()
|
||||
*/
|
||||
|
||||
if err := admin.postEvent(e); err != nil {
|
||||
log.Info("event edit err", err)
|
||||
}
|
||||
})
|
||||
save.Disable()
|
||||
|
||||
grid.NewButton("dump", func() {
|
||||
t := d.FormatTEXT()
|
||||
log.Info(t)
|
||||
})
|
||||
|
||||
return win
|
||||
}
|
|
@ -0,0 +1,227 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.wit.com/gui"
|
||||
"go.wit.com/lib/gadgets"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
type stdDropletTableWin struct {
|
||||
sync.Mutex
|
||||
win *gadgets.GenericWindow // the machines gui window
|
||||
box *gui.Node // the machines gui parent box widget
|
||||
pb *virtpb.Droplets // the droplets protobuf
|
||||
TB *virtpb.DropletsTable // the gui table buffer
|
||||
update bool // if the window should be updated
|
||||
Close func() // this function is called when the window is closed
|
||||
admin *adminT
|
||||
}
|
||||
|
||||
func (w *stdDropletTableWin) Toggle() {
|
||||
if w == nil {
|
||||
return
|
||||
}
|
||||
if w.win == nil {
|
||||
return
|
||||
}
|
||||
w.win.Toggle()
|
||||
}
|
||||
|
||||
func newDropletsWindow(admin *adminT) *stdDropletTableWin {
|
||||
dwin := new(stdDropletTableWin)
|
||||
dwin.admin = admin
|
||||
dwin.win = gadgets.NewGenericWindow("virtigo current droplets", "Options")
|
||||
dwin.win.Custom = func() {
|
||||
log.Info("test delete window here")
|
||||
}
|
||||
grid := dwin.win.Group.RawGrid()
|
||||
|
||||
grid.NewButton("Active", func() {
|
||||
var found *virtpb.Droplets
|
||||
found = virtpb.NewDroplets()
|
||||
all := admin.cluster.Droplets.All()
|
||||
for all.Scan() {
|
||||
vm := all.Next()
|
||||
if vm.Current.State != virtpb.DropletState_ON {
|
||||
continue
|
||||
}
|
||||
found.Append(vm)
|
||||
}
|
||||
dwin.doActiveDroplets(found)
|
||||
})
|
||||
|
||||
grid.NewButton("Inactive", func() {
|
||||
var found *virtpb.Droplets
|
||||
found = virtpb.NewDroplets()
|
||||
all := admin.cluster.Droplets.All()
|
||||
for all.Scan() {
|
||||
vm := all.Next()
|
||||
if vm.Current.State == virtpb.DropletState_ON {
|
||||
continue
|
||||
}
|
||||
found.Append(vm)
|
||||
}
|
||||
dwin.doInactiveDroplets(found)
|
||||
})
|
||||
|
||||
grid.NewButton("Create", func() {
|
||||
log.Info("create droplet here")
|
||||
admin.createDropletWindow()
|
||||
})
|
||||
|
||||
// make a box at the bottom of the window for the protobuf table
|
||||
dwin.box = dwin.win.Bottom.Box().SetProgName("TBOX")
|
||||
return dwin
|
||||
}
|
||||
|
||||
// default window for active running droplets
|
||||
func (dw *stdDropletTableWin) doInactiveDroplets(pb *virtpb.Droplets) {
|
||||
dw.Lock()
|
||||
defer dw.Unlock()
|
||||
|
||||
// erase the old table
|
||||
if dw.TB != nil {
|
||||
dw.TB.Delete()
|
||||
dw.TB = nil
|
||||
}
|
||||
|
||||
// init the table
|
||||
dw.pb = pb
|
||||
t := dw.pb.NewTable("DropletsPB Off")
|
||||
t.NewUuid()
|
||||
t.SetParent(dw.box)
|
||||
|
||||
dropedit := t.AddButtonFunc("Edit", func(d *virtpb.Droplet) string {
|
||||
return "edit"
|
||||
})
|
||||
dropedit.Custom = func(d *virtpb.Droplet) {
|
||||
log.Info("edit droplet here", d.Hostname)
|
||||
dw.admin.editDropletWindow(d)
|
||||
}
|
||||
|
||||
dropon := t.AddButtonFunc("Start", func(d *virtpb.Droplet) string {
|
||||
return "poweron"
|
||||
})
|
||||
dropon.Custom = func(d *virtpb.Droplet) {
|
||||
log.Info("start droplet here", d.Hostname)
|
||||
log.Info("should start droplet here")
|
||||
log.Info(d.SprintHeader())
|
||||
e := new(virtpb.Event)
|
||||
e.Etype = virtpb.EventType_POWERON
|
||||
e.DropletUuid = d.Uuid
|
||||
|
||||
if err := dw.admin.postEvent(e); err != nil {
|
||||
log.Info("droplet start err", err)
|
||||
}
|
||||
}
|
||||
|
||||
vp := t.AddButtonFunc("Verify Config", func(p *virtpb.Droplet) string {
|
||||
return p.Hostname
|
||||
})
|
||||
vp.Custom = func(d *virtpb.Droplet) {
|
||||
log.Info("open config window", d.Hostname)
|
||||
}
|
||||
|
||||
t.AddMemory()
|
||||
t.AddCpus()
|
||||
|
||||
// final setup and display the table
|
||||
dw.TB = t
|
||||
f := func(e *virtpb.Droplet) {
|
||||
log.Info("Triggered. do something here", e.Hostname)
|
||||
// m.Enabled = true
|
||||
}
|
||||
dw.TB.Custom(f)
|
||||
dw.TB.ShowTable()
|
||||
}
|
||||
|
||||
// default window for active running droplets
|
||||
func (dw *stdDropletTableWin) doActiveDroplets(pb *virtpb.Droplets) {
|
||||
dw.Lock()
|
||||
defer dw.Unlock()
|
||||
if dw.TB != nil {
|
||||
dw.TB.Delete()
|
||||
dw.TB = nil
|
||||
}
|
||||
|
||||
dw.pb = pb
|
||||
|
||||
t := dw.pb.NewTable("DropletsPB On")
|
||||
t.NewUuid()
|
||||
|
||||
t.SetParent(dw.box)
|
||||
|
||||
serial := t.AddButtonFunc("serial", func(p *virtpb.Droplet) string {
|
||||
return "ttyS0"
|
||||
})
|
||||
serial.Custom = func(d *virtpb.Droplet) {
|
||||
log.Printf("run %s: socat telnet somewhere %s:%d\n", d.Hostname, argv.Server, d.SpicePort)
|
||||
log.Info("socat TCP-LISTEN:5000,reuseaddr,fork EXEC:\"virsh console myvm\"")
|
||||
|
||||
}
|
||||
|
||||
fb := t.AddButtonFunc("fb0 console", func(p *virtpb.Droplet) string {
|
||||
return "remmina"
|
||||
})
|
||||
fb.Custom = func(d *virtpb.Droplet) {
|
||||
log.Printf("connect to %s on %s: remmina spice://%s:%d\n", d.Hostname, d.Current.Hypervisor, argv.Server, 10000+d.SpicePort)
|
||||
data, err := gusPost(fmt.Sprintf("%d", 10000+d.SpicePort), d.Current.Hypervisor)
|
||||
log.Info("data", string(data), "err =", err)
|
||||
}
|
||||
|
||||
// t.AddHostname()
|
||||
vp := t.AddButtonFunc("Hostname", func(p *virtpb.Droplet) string {
|
||||
return p.Hostname
|
||||
})
|
||||
vp.Custom = func(d *virtpb.Droplet) {
|
||||
log.Info("edit droplet here", d.Hostname)
|
||||
dw.admin.editDropletWindow(d)
|
||||
}
|
||||
|
||||
t.AddStringFunc("location", func(d *virtpb.Droplet) string {
|
||||
return d.Current.Hypervisor
|
||||
})
|
||||
t.AddMemory()
|
||||
t.AddCpus()
|
||||
t.AddSpicePort()
|
||||
t.AddTimeFunc("age", func(d *virtpb.Droplet) time.Time {
|
||||
age := d.Current.OnSince.AsTime()
|
||||
// log.Info("age", d.Hostname, virtpb.FormatDuration(time.Since(age)))
|
||||
return age
|
||||
})
|
||||
t.AddStringFunc("State", func(d *virtpb.Droplet) string {
|
||||
if d.Current.State == virtpb.DropletState_ON {
|
||||
return "ON"
|
||||
}
|
||||
if d.Current.State == virtpb.DropletState_OFF {
|
||||
return "OFF"
|
||||
}
|
||||
return "UNKNOWN"
|
||||
})
|
||||
t.AddStringFunc("mac addr", func(d *virtpb.Droplet) string {
|
||||
var macs []string
|
||||
for _, n := range d.Networks {
|
||||
macs = append(macs, n.Mac)
|
||||
}
|
||||
tmp := strings.Join(macs, "\n")
|
||||
return strings.TrimSpace(tmp)
|
||||
})
|
||||
t.ShowTable()
|
||||
|
||||
// display the protobuf
|
||||
dw.TB = t
|
||||
f := func(e *virtpb.Droplet) {
|
||||
log.Info("Triggered. do something here", e.Hostname)
|
||||
// m.Enabled = true
|
||||
}
|
||||
dw.TB.Custom(f)
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"go.wit.com/gui"
|
||||
"go.wit.com/lib/gadgets"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
type stdEventTableWin struct {
|
||||
sync.Mutex
|
||||
win *gadgets.GenericWindow // the machines gui window
|
||||
box *gui.Node // the machines gui parent box widget
|
||||
pb *virtpb.Events // the protobuf
|
||||
TB *virtpb.EventsTable // the gui table buffer
|
||||
update bool // if the window should be updated
|
||||
}
|
||||
|
||||
func (w *stdEventTableWin) Toggle() {
|
||||
if w == nil {
|
||||
return
|
||||
}
|
||||
if w.win == nil {
|
||||
return
|
||||
}
|
||||
w.win.Toggle()
|
||||
}
|
||||
|
||||
func newEventsWindow() *stdEventTableWin {
|
||||
dwin := new(stdEventTableWin)
|
||||
dwin.win = gadgets.NewGenericWindow("virtigo current events", "things to do")
|
||||
dwin.win.Custom = func() {
|
||||
log.Info("test delete window here")
|
||||
}
|
||||
|
||||
// make a box at the bottom of the window for the protobuf table
|
||||
dwin.box = dwin.win.Bottom.Box().SetProgName("TBOX")
|
||||
return dwin
|
||||
}
|
||||
|
||||
// default table protobuf window
|
||||
func (dw *stdEventTableWin) doStdEvents(pb *virtpb.Events) {
|
||||
dw.Lock()
|
||||
defer dw.Unlock()
|
||||
|
||||
// erase the old table
|
||||
if dw.TB != nil {
|
||||
dw.TB.Delete()
|
||||
dw.TB = nil
|
||||
}
|
||||
|
||||
// init the table
|
||||
dw.pb = pb
|
||||
t := dw.pb.NewTable("EventsPB Off")
|
||||
t.NewUuid()
|
||||
t.SetParent(dw.box)
|
||||
|
||||
// pick the columns
|
||||
t.AddDropletName()
|
||||
t.AddDropletUuid()
|
||||
t.AddHypervisor()
|
||||
|
||||
// display the protobuf
|
||||
dw.TB = t
|
||||
f := func(e *virtpb.Event) {
|
||||
log.Info("std EventWindow() something here", e.Droplet)
|
||||
// m.Enabled = true
|
||||
}
|
||||
dw.TB.Custom(f)
|
||||
|
||||
dw.TB.ShowTable()
|
||||
}
|
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
|
||||
// Use of this source code is governed by the GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.wit.com/gui"
|
||||
"go.wit.com/lib/gadgets"
|
||||
"go.wit.com/lib/protobuf/virtpb"
|
||||
"go.wit.com/log"
|
||||
)
|
||||
|
||||
type stdHypervisorTableWin struct {
|
||||
sync.Mutex
|
||||
win *gadgets.GenericWindow // the machines gui window
|
||||
box *gui.Node // the machines gui parent box widget
|
||||
pb *virtpb.Hypervisors // the protobuf
|
||||
TB *virtpb.HypervisorsTable // the gui table buffer
|
||||
update bool // if the window should be updated
|
||||
}
|
||||
|
||||
func (w *stdHypervisorTableWin) Toggle() {
|
||||
if w == nil {
|
||||
return
|
||||
}
|
||||
if w.win == nil {
|
||||
return
|
||||
}
|
||||
w.win.Toggle()
|
||||
}
|
||||
|
||||
func newHypervisorsWindow() *stdHypervisorTableWin {
|
||||
dwin := new(stdHypervisorTableWin)
|
||||
dwin.win = gadgets.NewGenericWindow("virtigo current hypervisors", "things to do")
|
||||
dwin.win.Custom = func() {
|
||||
log.Info("test delete window here")
|
||||
}
|
||||
|
||||
// make a box at the bottom of the window for the protobuf table
|
||||
dwin.box = dwin.win.Bottom.Box().SetProgName("TBOX")
|
||||
return dwin
|
||||
}
|
||||
|
||||
// default table protobuf window
|
||||
func (dw *stdHypervisorTableWin) doStdHypervisors(pb *virtpb.Hypervisors) {
|
||||
dw.Lock()
|
||||
defer dw.Unlock()
|
||||
|
||||
// erase the old table
|
||||
if dw.TB != nil {
|
||||
dw.TB.Delete()
|
||||
dw.TB = nil
|
||||
}
|
||||
|
||||
// init the table
|
||||
dw.pb = pb
|
||||
t := dw.pb.NewTable("HypervisorsPB Off")
|
||||
t.NewUuid()
|
||||
t.SetParent(dw.box)
|
||||
|
||||
// pick the columns
|
||||
t.AddHostname()
|
||||
t.AddMemory()
|
||||
t.AddCpus()
|
||||
t.AddKillcount()
|
||||
t.AddTimeFunc("last poll", func(h *virtpb.Hypervisor) time.Time {
|
||||
// hm := me.hmap[h]
|
||||
// tmp := hm.lastpoll
|
||||
// log.Info("poll age", h.Hostname, virtpb.FormatDuration(time.Since(tmp)))
|
||||
return time.Now()
|
||||
})
|
||||
t.AddStringFunc("droplets", func(h *virtpb.Hypervisor) string {
|
||||
/*
|
||||
var totalDroplets int
|
||||
var totalUnknownDroplets int
|
||||
// dur := time.Since(h.lastpoll)
|
||||
// tmp := virtpb.FormatDuration(dur)
|
||||
// fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp)
|
||||
hm := me.hmap[h]
|
||||
for name, _ := range hm.lastDroplets {
|
||||
totalDroplets += 1
|
||||
d := me.cluster.FindDropletByName(name)
|
||||
if d == nil {
|
||||
totalUnknownDroplets += 1
|
||||
}
|
||||
}
|
||||
log.Printf("Total Droplets %d total libvirt only droplets = %d\n", totalDroplets, totalUnknownDroplets)
|
||||
return fmt.Sprintf("%d", totalDroplets)
|
||||
*/
|
||||
return "todo"
|
||||
})
|
||||
|
||||
// display the protobuf
|
||||
dw.TB = t
|
||||
f := func(e *virtpb.Hypervisor) {
|
||||
log.Info("std HypervisorWindow() something here", e.Hostname)
|
||||
// m.Enabled = true
|
||||
}
|
||||
dw.TB.Custom(f)
|
||||
|
||||
dw.TB.ShowTable()
|
||||
}
|
||||
|
||||
// default table protobuf window
|
||||
func (dw *stdHypervisorTableWin) doNewStdHypervisors(pb *virtpb.Hypervisors) {
|
||||
dw.Lock()
|
||||
defer dw.Unlock()
|
||||
|
||||
// erase the old table
|
||||
if dw.TB != nil {
|
||||
dw.TB.Delete()
|
||||
dw.TB = nil
|
||||
}
|
||||
|
||||
// init the table
|
||||
dw.pb = pb
|
||||
t := dw.pb.NewTable("HypervisorsPB Off")
|
||||
t.NewUuid()
|
||||
t.SetParent(dw.box)
|
||||
|
||||
// pick the columns
|
||||
t.AddHostname()
|
||||
t.AddMemory()
|
||||
t.AddCpus()
|
||||
t.AddKillcount()
|
||||
t.AddTimeFunc("last poll", func(h *virtpb.Hypervisor) time.Time {
|
||||
// hm := me.hmap[h]
|
||||
// tmp := hm.lastpoll
|
||||
// log.Info("poll age", h.Hostname, virtpb.FormatDuration(time.Since(tmp)))
|
||||
return time.Now()
|
||||
})
|
||||
t.AddStringFunc("droplets", func(h *virtpb.Hypervisor) string {
|
||||
var totalDroplets int
|
||||
var totalUnknownDroplets int
|
||||
// dur := time.Since(h.lastpoll)
|
||||
// tmp := virtpb.FormatDuration(dur)
|
||||
// fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp)
|
||||
hm := me.hmap[h]
|
||||
for name, _ := range hm.lastDroplets {
|
||||
totalDroplets += 1
|
||||
d := me.cluster.FindDropletByName(name)
|
||||
if d == nil {
|
||||
totalUnknownDroplets += 1
|
||||
}
|
||||
}
|
||||
// log.Printf("Total Droplets %d total libvirt only droplets = %d\n", totalDroplets, totalUnknownDroplets)
|
||||
return fmt.Sprintf("%d", totalDroplets)
|
||||
})
|
||||
|
||||
// display the protobuf
|
||||
dw.TB = t
|
||||
f := func(e *virtpb.Hypervisor) {
|
||||
log.Info("std HypervisorWindow() something here", e.Hostname)
|
||||
// m.Enabled = true
|
||||
}
|
||||
dw.TB.Custom(f)
|
||||
|
||||
dw.TB.ShowTable()
|
||||
}
|
188
xml.go
188
xml.go
|
@ -1,188 +0,0 @@
|
|||
// Copyright 2024 WIT.COM Inc Licensed GPL 3.0
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"go.wit.com/log"
|
||||
"libvirt.org/go/libvirtxml"
|
||||
)
|
||||
|
||||
func makeStandardXml(d *DropletT) *libvirtxml.Domain {
|
||||
log.Info("create new xml file for:", d.pb.Hostname)
|
||||
domcfg := &libvirtxml.Domain{}
|
||||
|
||||
addDefaults(domcfg, "standard.x86")
|
||||
addDefaults(domcfg, "memory")
|
||||
addDefaults(domcfg, "network")
|
||||
addDefaults(domcfg, "spice")
|
||||
addDefaults(domcfg, "qcow")
|
||||
addDefaults(domcfg, d.pb.Hostname)
|
||||
|
||||
return domcfg
|
||||
}
|
||||
|
||||
func writeoutXml(domcfg *libvirtxml.Domain, filename string) bool {
|
||||
xmldoc, err := domcfg.Marshal()
|
||||
|
||||
if err != nil {
|
||||
fmt.Println("can't make xml file error:\n", err)
|
||||
return false
|
||||
}
|
||||
|
||||
outfile := "/tmp/" + filename + ".xml"
|
||||
regfile, _ := os.OpenFile(outfile, os.O_RDWR|os.O_CREATE, 0666)
|
||||
fmt.Fprintln(regfile, xmldoc)
|
||||
|
||||
log.Info("File is in", outfile)
|
||||
regfile.Close()
|
||||
return true
|
||||
}
|
||||
|
||||
func setDiskFilename(domcfg *libvirtxml.Domain, filename string) {
|
||||
for i, x := range domcfg.Devices.Disks {
|
||||
// Create a new DomainDiskSourceFile struct
|
||||
newSource := &libvirtxml.DomainDiskSourceFile{
|
||||
File: filename, // Set the file name here
|
||||
}
|
||||
|
||||
// Assign it to the disk's source
|
||||
domcfg.Devices.Disks[i].Source.File = newSource
|
||||
|
||||
// fmt.Printf("Disk Source %s\n", name)
|
||||
fmt.Printf("Disk Device %s\n", x.Source.File)
|
||||
}
|
||||
}
|
||||
|
||||
func addDefaults(d *libvirtxml.Domain, filename string) {
|
||||
fullname := "resources/xml/" + filename + ".xml"
|
||||
pfile, err := resources.ReadFile(fullname)
|
||||
if err != nil {
|
||||
log.Println("ERROR:", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = d.Unmarshal(string(pfile))
|
||||
if err != nil {
|
||||
log.Info("Marshal failed on file", filename)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func readXml(filename string) (*libvirtxml.Domain, error) {
|
||||
log.Verbose("parse xml file:", filename)
|
||||
|
||||
pfile, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
log.Println("ERROR:", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
domcfg := &libvirtxml.Domain{}
|
||||
|
||||
err = domcfg.Unmarshal(string(pfile))
|
||||
if err != nil {
|
||||
log.Info("Marshal failed on file", filename, err)
|
||||
return nil, ErrorParseXML
|
||||
}
|
||||
return domcfg, nil
|
||||
}
|
||||
|
||||
func (d *DropletT) mergeXml(filename string) error {
|
||||
log.Info("merge xml file:", filename)
|
||||
|
||||
pfile, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
log.Println("ERROR:", err)
|
||||
return ErrorNoFile
|
||||
}
|
||||
|
||||
err = d.xml.Unmarshal(string(pfile))
|
||||
if err != nil {
|
||||
log.Info("Marshal failed on file", filename)
|
||||
return ErrorParseXML
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setSimpleDisk(domcfg *libvirtxml.Domain, filename string) {
|
||||
// Clear out the existing disks (if any)
|
||||
domcfg.Devices.Disks = nil
|
||||
|
||||
// Define a new disk with "mynew.qcow2"
|
||||
newDisk := libvirtxml.DomainDisk{
|
||||
Device: "disk",
|
||||
Driver: &libvirtxml.DomainDiskDriver{
|
||||
Name: "qemu",
|
||||
Type: "qcow2",
|
||||
},
|
||||
Source: &libvirtxml.DomainDiskSource{
|
||||
File: &libvirtxml.DomainDiskSourceFile{
|
||||
File: filename,
|
||||
},
|
||||
},
|
||||
Target: &libvirtxml.DomainDiskTarget{
|
||||
Dev: "vda",
|
||||
Bus: "virtio",
|
||||
},
|
||||
}
|
||||
|
||||
// Add the new disk to the domain configuration
|
||||
domcfg.Devices.Disks = append(domcfg.Devices.Disks, newDisk)
|
||||
}
|
||||
|
||||
func getMacs(domcfg *libvirtxml.Domain) []string {
|
||||
var macs []string
|
||||
// Iterate over the network interfaces and print the MAC addresses
|
||||
for _, iface := range domcfg.Devices.Interfaces {
|
||||
if iface.MAC != nil {
|
||||
// iface.MAC.Address = "aa:bb:aa:bb:aa:ff"
|
||||
fmt.Printf("MAC Address: %+v\n", iface.MAC)
|
||||
// fmt.Printf("Interface: %s, MAC Address: %s\n", iface.Target.Dev, iface.MAC.Address)
|
||||
macs = append(macs, iface.MAC.Address)
|
||||
} else {
|
||||
fmt.Printf("Interface: %s, MAC Address: not available\n", iface.Target.Dev)
|
||||
}
|
||||
}
|
||||
return macs
|
||||
}
|
||||
|
||||
// removes all the ethernet interfaces
|
||||
func clearEthernet(domcfg *libvirtxml.Domain) {
|
||||
// Clear out the existing disks (if any)
|
||||
domcfg.Devices.Interfaces = nil
|
||||
}
|
||||
|
||||
// add a new ethernet interface with mac assigned to bridge name
|
||||
func addEthernet(domcfg *libvirtxml.Domain, mac string, brname string) {
|
||||
// Define a new disk with "mynew.qcow2"
|
||||
newNet := libvirtxml.DomainInterface{
|
||||
MAC: &libvirtxml.DomainInterfaceMAC{
|
||||
Address: mac,
|
||||
},
|
||||
Target: &libvirtxml.DomainInterfaceTarget{
|
||||
Dev: brname,
|
||||
},
|
||||
}
|
||||
|
||||
// Add the new disk to the domain configuration
|
||||
domcfg.Devices.Interfaces = append(domcfg.Devices.Interfaces, newNet)
|
||||
}
|
||||
|
||||
func setRandomMacs(domcfg *libvirtxml.Domain) {
|
||||
for i, x := range domcfg.Devices.Interfaces {
|
||||
// Create a new DomainDiskInterfaces struct
|
||||
newMac := &libvirtxml.DomainInterfaceMAC{
|
||||
Address: "aa:bb:cc:dd:ee:ff", // make sure this is unique
|
||||
}
|
||||
|
||||
// Assign it to the disk's source
|
||||
domcfg.Devices.Interfaces[i].MAC = newMac
|
||||
|
||||
// fmt.Printf("Disk Source %s\n", name)
|
||||
// fmt.Printf("mac addr %+v\n", x.MAC)
|
||||
fmt.Printf("mac addr %s\n", x.MAC.Address)
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue