Compare commits

...

142 Commits
v0.1 ... master

Author SHA1 Message Date
Jeff Carr dbcd3b5686 new GUI codebase 2025-09-09 05:52:39 -05:00
Jeff Carr f67c81d499 autogenpb syntax change 2025-06-30 07:56:23 -05:00
Jeff Carr f39c7d4a51 todo: fix edit 2025-06-26 17:58:22 -05:00
Jeff Carr 65563eb8e2 droplet create makes the network and disks 2025-06-04 06:29:52 -05:00
Jeff Carr bf01596f30 first droplet create (the protobuf anyway) 2025-06-04 06:29:49 -05:00
Jeff Carr d261a220df send a create droplet event & unmarshal response 2025-06-03 23:48:40 -05:00
Jeff Carr 7fd5089917 fix droplet start() 2025-05-23 10:30:20 -05:00
Jeff Carr 82ebc25936 more work on droplet edit 2025-04-24 19:28:18 -05:00
Jeff Carr 4faca63da8 check for changes and save the config files 2025-04-23 02:43:18 -05:00
Jeff Carr c8a50fbb18 sends an edit event 2025-04-22 20:49:47 -05:00
Jeff Carr 4332b3d31a working on a droplet edit window 2025-04-22 18:49:00 -05:00
Jeff Carr 2c5701eeca droplet start worked 2025-04-21 20:54:44 -05:00
Jeff Carr a24448a9d4 /event sends and gets an event PB 2025-04-21 20:54:42 -05:00
Jeff Carr 4121e66e01 attempting the cluster protobuf 2025-04-21 20:54:39 -05:00
Jeff Carr a4dd085a47 move everything into cluster protobuf 2025-04-21 20:54:37 -05:00
Jeff Carr d3f809b25d okay then 2025-04-21 20:54:35 -05:00
Jeff Carr 8eda4cf2da add --daemon 2025-04-21 20:54:33 -05:00
Jeff Carr 3cd1f64d15 stub in droplet status 2025-04-20 19:40:50 -05:00
Jeff Carr 69ee3b95d0 droplet list works 2025-04-12 11:27:55 -05:00
Jeff Carr 16558e1b72 autocomplete + doList() 2025-04-11 21:01:18 -05:00
Jeff Carr 1fd6b1d36d continued work on the GUI 2025-03-24 21:53:31 -05:00
Jeff Carr a7e639cdb0 working on create 2025-03-22 09:46:16 -05:00
Jeff Carr 03b03cb197 fixes to the GUI 2025-03-22 09:46:16 -05:00
Jeff Carr a97379d76f move to more common code 2025-03-22 09:46:16 -05:00
Jeff Carr 46472fa868 fix wrong syntax 2025-03-12 10:28:31 -05:00
Jeff Carr 599fe4251f fixes to the admin tables 2025-03-12 05:29:30 -05:00
Jeff Carr 50d16b3d86 early events 2025-03-11 20:17:46 -05:00
Jeff Carr 19b1588512 show hypervisors 2025-03-11 19:44:48 -05:00
Jeff Carr 0a452c005b smarter admin tables 2025-03-11 10:27:42 -05:00
Jeff Carr 7ee465da56 working on vnc 2025-03-11 08:03:54 -05:00
Jeff Carr 8517dbc948 try to get events & hypervisors protobufs 2025-03-11 04:02:30 -05:00
Jeff Carr d2d04da122 cleaner droplet table views 2025-03-11 03:34:30 -05:00
Jeff Carr e0970840e2 admin "show droplets" works 2025-03-10 18:09:14 -05:00
Jeff Carr 89f870f1f9 feed back the protobuf 2025-03-10 09:19:52 -05:00
Jeff Carr 9449b5699e staandard protobuf droplets window 2025-03-10 09:11:48 -05:00
Jeff Carr 68bf08bd6c Unmarshal() droplets 2025-03-10 07:53:11 -05:00
Jeff Carr f9515280cf make admin mode 2025-03-10 04:36:35 -05:00
Jeff Carr 30a5eb56a2 display status and uptime 2025-03-04 05:00:46 -06:00
Jeff Carr 01d7e92cdb wrong name when copying over code from zookeeper 2025-03-04 05:00:46 -06:00
Jeff Carr 73196c3231 attempt to send update to plugin 2025-02-23 13:13:30 -06:00
Jeff Carr 19e29d21d7 events table 2025-02-23 13:13:30 -06:00
Jeff Carr affb055c56 early attempt at tablepb.Update() 2025-02-23 13:13:30 -06:00
Jeff Carr 6e948e0736 add mac addrs 2025-02-23 13:13:30 -06:00
Jeff Carr 3a62f10d20 add GenericWindow() 2025-02-23 13:13:30 -06:00
Jeff Carr 69b0d4c013 add hypervisors table 2025-02-23 13:13:30 -06:00
Jeff Carr 522723e946 wow. all it took to make a table window for running droplets 2025-02-23 13:13:30 -06:00
Jeff Carr 32e2753007 switch virtbuf to virtpb 2025-02-22 17:45:50 -06:00
Jeff Carr 9f9a52312e first gui for virtigo! easy with proto pb tables 2025-02-22 15:23:04 -06:00
Jeff Carr 6e111ba862 run go vet 2025-02-08 20:05:15 -06:00
Jeff Carr 55a3ed7187 func name change 2025-02-07 04:40:52 -06:00
Jeff Carr 4dd0f0eaba improvements due to autogenpb 2024-12-04 02:25:19 -06:00
Jeff Carr 70cc9944ad create and start work again
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-16 05:21:44 -06:00
Jeff Carr 5ea2e5999b fix build 2024-11-16 00:08:34 -06:00
Jeff Carr bd2ddb278c better poll hyper output
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-13 18:53:13 -06:00
Jeff Carr 0697375d44 just comment this out
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-12 23:55:04 -06:00
Jeff Carr a4a2d7a01f debug changes
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-12 09:49:42 -06:00
Jeff Carr 4883c26547 so it shows up on pkg.go.dev
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-07 07:32:41 -06:00
Jeff Carr 1e03cd4377 rename Cluster
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-07 05:04:50 -06:00
Jeff Carr 3c9d1d6f1d still runs
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-06 05:24:09 -06:00
Jeff Carr 0da809ae42 rm old code
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-06 02:41:06 -06:00
Jeff Carr 80ff65c6d2 git ignore files/ directory
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-04 06:12:58 -06:00
Jeff Carr 84aeec7dde more notes
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-03 01:41:00 -05:00
Jeff Carr bd1ed6f513 notes
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-03 01:38:09 -05:00
Jeff Carr 173e9ef1f4 spaces between qcow image names in output
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-03 01:30:08 -05:00
Jeff Carr 1c2bdfa398 build notes
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-02 23:46:53 -05:00
Jeff Carr ea7cf0e744 just use the deb packages for now
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-02 16:06:00 -05:00
Jeff Carr 1321b8566a build doesn't work
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-02 16:05:17 -05:00
Jeff Carr 0f1bdad780 makefile fixes
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-02 09:28:12 -05:00
Jeff Carr 15fe83812b fix makefile. makefiles are awesome. always have one, but fuck makefiles
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-02 09:14:48 -05:00
Jeff Carr 638539c840 accidentally sending things to stdout
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 22:45:32 -05:00
Jeff Carr 71e0065240 everything is working again
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 12:48:31 -05:00
Jeff Carr 301fe567e2 getting down to the nitty gritty
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 11:17:52 -05:00
Jeff Carr 2c1c3482fe working further on handling vm ghosting
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 10:23:49 -05:00
Jeff Carr 8b588eeba5 confirm the current hypervisor value doesn't get cleared out
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 09:53:52 -05:00
Jeff Carr 173520b42e more work on importing
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 08:51:25 -05:00
Jeff Carr 0a28c45a6c import worked
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 08:30:55 -05:00
Jeff Carr a10dab96ff import almost works
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 08:03:20 -05:00
Jeff Carr 7fa6c2e2de argh. still not working
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 06:41:20 -05:00
Jeff Carr 70634ec66e import closer to working
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 06:12:12 -05:00
Jeff Carr 0076d3cb2d compiles
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 05:50:42 -05:00
Jeff Carr 720c2e6576 sent import to hypervisor. failed ofcourse, now onto virtigod
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 02:05:54 -05:00
Jeff Carr 83faa62e18 last commit before import attempt
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 02:00:46 -05:00
Jeff Carr ca9ad75283 more COBOL but the output is really easy to read now
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 00:58:28 -05:00
Jeff Carr c1d86fc324 common d.SprintHeader() functions for humans
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-11-01 00:41:00 -05:00
Jeff Carr 26cd0f7709 ready to try local import
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 22:39:47 -05:00
Jeff Carr d51c4627f7 add droplet() defaults to start state=off
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 22:14:11 -05:00
Jeff Carr efc3032d83 cleaning up debgging for import
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 17:17:10 -05:00
Jeff Carr 3562fc780e ready to trigger importDomain()
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 17:00:34 -05:00
Jeff Carr b28ae96cd4 ready for import local domain request to hypervisors
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 15:43:25 -05:00
Jeff Carr b4ef8b76b1 runs again
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 14:15:36 -05:00
Jeff Carr 3c520003ed compiles again
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 13:37:00 -05:00
Jeff Carr 913b18737b works enough that uptime works again
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 07:16:23 -05:00
Jeff Carr fb51876e8e rename package virtigolib
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 06:50:57 -05:00
Jeff Carr 4a58352ec2 dump more code to virtigoxml
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 06:43:14 -05:00
Jeff Carr e6ea90f8de compiles after lots of protobuf changes
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-31 06:41:30 -05:00
Jeff Carr 22111183a5 make a simple .deb package
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-30 20:31:14 -05:00
Jeff Carr 41673f3170 reject start and create early if grid unstable
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-30 19:38:12 -05:00
Jeff Carr eacf3b8bef events is now c.E
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-30 18:09:54 -05:00
Jeff Carr bf52632cb7 force filenames to match hostnames
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-30 13:17:04 -05:00
Jeff Carr 2a18f506c7 simple unique mac
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-30 12:30:34 -05:00
Jeff Carr 410015c33e more work on create() droplet
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-30 12:16:08 -05:00
Jeff Carr c8a69bdb73 code and debugging cleanups
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-29 22:55:28 -05:00
Jeff Carr 9d08114b93 code reorg
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-29 09:02:15 -05:00
Jeff Carr 8724a07b0d function to dump all droplets
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-29 09:00:40 -05:00
Jeff Carr c26699571d ready to release a debian package?
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-28 11:44:53 -05:00
Jeff Carr 42d54a418d quit with configSave()
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-28 08:08:27 -05:00
Jeff Carr c12d3a2dbb continuing work on tracking droplet state
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-28 08:06:14 -05:00
Jeff Carr de5f5c6a85 grid stable might be working
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-28 07:02:42 -05:00
Jeff Carr 3f7171fff2 make an event for droplet moving hypervisors
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-28 05:07:56 -05:00
Jeff Carr 8fc2fbd9c9 track droplets reported from each hypervisor
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-27 11:02:50 -05:00
Jeff Carr d38865a6cf maybe fix duplicates? this isn't really safe probably
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-27 08:10:19 -05:00
Jeff Carr 7288595efc start spice/vnc ports at 5900 and skip 6000
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-27 07:06:12 -05:00
Jeff Carr 212b582060 sets unique spice port and saves config files
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-27 04:53:39 -05:00
Jeff Carr d948581300 add /dumpdroplets
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-27 03:17:34 -05:00
Jeff Carr 71f83d4000 prepare to validate spice ports
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-27 02:55:08 -05:00
Jeff Carr d0767eb984 validate stuff should be here
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-27 02:29:45 -05:00
Jeff Carr 5d1729f99b more cleanups
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 21:13:25 -05:00
Jeff Carr cec509ae7f doesn't deal with libvirtxml directly anymore
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 20:53:52 -05:00
Jeff Carr 3c1efcba0e start worked by sending protobuf
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 20:09:59 -05:00
Jeff Carr 7837182d53 more http options
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 13:05:05 -05:00
Jeff Carr 2e8281d067 virtigod doing xml now
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 12:32:17 -05:00
Jeff Carr 6d5c5c6072 more moved into virtigoxml
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 11:05:58 -05:00
Jeff Carr 419ae0ad5f validate moved into virtigoxml. maybe a bad name?
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 10:16:56 -05:00
Jeff Carr 34b6891507 move this into a common package?
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 10:11:39 -05:00
Jeff Carr a50f387b96 more common code. it might work again. would be nice
to have a kuma check, but no. I fucking deleted it. and
of course with no backup or memory of where I mapped it to. kuma needs
an undelete! or an event log of changes to kuma. ironic since all it
does is track changes in state but it doesn't track it's own changes

Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 09:38:02 -05:00
Jeff Carr 161bfe395e works again. I fucking deleted the kuma check
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 09:33:31 -05:00
Jeff Carr b4518e8b82 compiles and lists hypervisors
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 08:54:28 -05:00
Jeff Carr 9020957ee7 new config files
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 07:28:19 -05:00
Jeff Carr 0dc393896c still works
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 05:17:51 -05:00
Jeff Carr 11f0cd97b5 using lib/virtigoxml and it seems to actually work
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 04:36:38 -05:00
Jeff Carr fd3e14bcc6 start works with lib/virtigoxml
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 04:25:50 -05:00
Jeff Carr 30884aff3b seperate tap and bridge group add network functions
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 02:42:12 -05:00
Jeff Carr f42091a2ce network is maybe correct as virtio. pci needed?
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 02:31:52 -05:00
Jeff Carr 7320fceb8d finds disks and puts dirs in the protobuf cluster
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-26 01:02:09 -05:00
Jeff Carr 61b954ecca no longer need these for debugging
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-25 22:09:21 -05:00
Jeff Carr 0fd0283372 generate XML by droplet hostname
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-25 22:07:28 -05:00
Jeff Carr 3893ac7e3d --xml-ignore-disk=true works
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-25 20:09:41 -05:00
Jeff Carr b35c04414a checking for duplicate filenames works
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-25 20:05:04 -05:00
Jeff Carr 030af1bcfb check for duplicate disk names
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-25 19:16:44 -05:00
Jeff Carr a5eee861ea print out change events
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-25 17:35:29 -05:00
Jeff Carr 9b94785cd2 start date works on events
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-25 17:01:30 -05:00
Jeff Carr 4d43c36db5 more xml checks
Signed-off-by: Jeff Carr <jcarr@wit.com>
2024-10-25 16:40:05 -05:00
41 changed files with 3586 additions and 2179 deletions

2
.gitignore vendored
View File

@ -2,5 +2,7 @@
go.mod go.mod
go.sum go.sum
files/
virtigo virtigo
virtigod virtigod

View File

@ -1,18 +1,53 @@
.PHONY: build
VERSION = $(shell git describe --tags) VERSION = $(shell git describe --tags)
BUILDTIME = $(shell date +%Y.%m.%d)
# create the go.mod and go.sum if this is a brand new repo # create the go.mod and go.sum if this is a brand new repo
# REDOMOD = $(shell if [ -e go.mod ]; then echo go.mod; else echo no go mod; fi)
REDOMOD = $(shell if [ -e go.sum ]; then echo go.sum exists; else GO111MODULE= go mod init; GO111MODULE= go mod tidy; fi) REDOMOD = $(shell if [ -e go.sum ]; then echo go.sum exists; else GO111MODULE= go mod init; GO111MODULE= go mod tidy; fi)
all: all: install
GO111MODULE=off go build -v -ldflags "-X main.Version=${VERSION} -X gui.GUIVERSION=${VERSION}" @echo build worked
./virtigo --version virtigo list droplets
./virtigo --help virtigo list droplets --on
virtigo droplet show --name check.lab.wit.org
virtigo droplet start --name check.lab.wit.org
build: goimports vet
GO111MODULE=off go build \
-ldflags "-X main.VERSION=${VERSION} -X main.BUILDTIME=${BUILDTIME} -X gui.GUIVERSION=${VERSION}"
verbose: goimports vet
GO111MODULE=off go build -v -x \
-ldflags "-X main.VERSION=${VERSION} -X main.BUILDTIME=${BUILDTIME} -X gui.GUIVERSION=${VERSION}"
install: goimports vet
GO111MODULE=off go install -v -x \
-ldflags "-X main.VERSION=${VERSION} -X main.BUILDTIME=${BUILDTIME} -X gui.GUIVERSION=${VERSION}"
andlabs: verbose
./virtigo --gui andlabs
# makes a .deb package
debian:
rm -f ~/incoming/virtigo*deb
go-deb --no-gui --repo go.wit.com/apps/virtigo
xml-add: xml-add:
./virtigo --libvirt *.xml ./virtigo --libvirt ~/libvirt/*.xml --xml-ignore-disk=true
start-all-droplets: xml-add-save:
./virtigo --libvirt ~/libvirt/*.xml --xml-ignore-disk=true --save
start-pihole.wit.com: build
rm -f /tmp/blahcarr.xml /tmp/pihole.wit.com.xml
./virtigo --start pihole.wit.com
./virtigo --libvirt /tmp/pihole.wit.com.xml
start-pihole.wit.com-http:
curl --silent http://localhost:8080/start?hostname=pihole.wit.com
old-start-all-droplets:
curl --silent http://localhost:8080/start?start=git.wit.org curl --silent http://localhost:8080/start?start=git.wit.org
curl --silent http://localhost:8080/start?start=go.wit.com curl --silent http://localhost:8080/start?start=go.wit.com
curl --silent http://localhost:8080/start?start=rdate.wit.com curl --silent http://localhost:8080/start?start=rdate.wit.com
@ -44,6 +79,10 @@ release-build:
goimports: goimports:
goimports -w *.go goimports -w *.go
vet:
@GO111MODULE=off go vet
@echo this go binary package builds okay
# remake the go.mod and go.sum files # remake the go.mod and go.sum files
redomod: redomod:
rm -f go.* rm -f go.*
@ -53,6 +92,7 @@ redomod:
clean: clean:
rm -f go.* rm -f go.*
rm -f virtigo* rm -f virtigo*
go-mod-clean purge
# git clone the sources and all the golang dependancies into ~/go/src # git clone the sources and all the golang dependancies into ~/go/src
# if you don't have go-clone, you can get it from http://go.wit.com/ # if you don't have go-clone, you can get it from http://go.wit.com/
@ -61,3 +101,26 @@ git-clone:
go-clone --recursive --go-src --no-work go.wit.com/apps/virtigo go-clone --recursive --go-src --no-work go.wit.com/apps/virtigo
go-clone --recursive --go-src --no-work go.wit.com/apps/gowebd go-clone --recursive --go-src --no-work go.wit.com/apps/gowebd
go-clone --recursive --go-src --no-work go.wit.com/lib/daemons/virtigod go-clone --recursive --go-src --no-work go.wit.com/lib/daemons/virtigod
http-uptime:
curl --silent http://localhost:8080/uptime
http-droplets:
curl --silent http://localhost:8080/droplets
http-missing:
curl --silent http://localhost:8080/missing
http-dumplibvirtxml:
curl --silent http://localhost:8080//dumplibvirtxml
protogen:
go-clone google.golang.org/protobuf
cd ~/go/src/google.golang.org/protobuf/cmd/protoc-gen-go && go install
gocui: install
virtigo --gui gocui --gui-verbose --gui-file ../../toolkits/gocui/gocui.so --admin
# virtigo --gui gocui --gui-verbose --gui-file ../../toolkits/gocui/gocui.so --admin >/tmp/forge.log 2>&1
log:
journalctl -f -xeu virtigod.service

View File

@ -1,6 +1,8 @@
virtigo: a control panel for your virtual machine cluster # virtigo: a control panel for your virtual machine cluster
This is an attempt to make something that should: There is no greater thrill for a linux sys admin than running your own cloud.
# This is an attempt to make something that should:
* Maintain the master list of virtual machines that should be running at all times * Maintain the master list of virtual machines that should be running at all times
* Work with a cluster of dom0 hypervisiors via libvirt and/or qemu * Work with a cluster of dom0 hypervisiors via libvirt and/or qemu
@ -12,7 +14,7 @@ This is an attempt to make something that should:
* Work in GUI mode (GTK/QT/etc) but ALSO the console (ncurses) * Work in GUI mode (GTK/QT/etc) but ALSO the console (ncurses)
* GPL'd with the intent for use with homelab and personal hobbyists * GPL'd with the intent for use with homelab and personal hobbyists
Notes & Goals: # Notes & Goals:
* Be super easy to use. * Be super easy to use.
* Automatically map access to serial and graphical consoles * Automatically map access to serial and graphical consoles
@ -26,7 +28,7 @@ Notes & Goals:
* Automatic live migration to decommission nodes * Automatic live migration to decommission nodes
* Implement iptable rules via the virtigo daemon * Implement iptable rules via the virtigo daemon
Inspired by: # Inspired by:
* kvm * kvm
* virt-manager * virt-manager

View File

@ -1,378 +0,0 @@
// Copyright 2024 WIT.COM Inc Licensed GPL 3.0
package main
import (
"encoding/xml"
"errors"
"fmt"
"github.com/google/uuid"
pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log"
"libvirt.org/go/libvirtxml"
)
// import a libvirt xml file
func addDomainDroplet(domcfg *libvirtxml.Domain) (*DropletT, error) {
if domcfg == nil {
return nil, errors.New("domcfg == nil")
}
d, _ := findDomain(domcfg)
if d == nil {
// this is a new unknown droplet (not in the config file)
d = new(DropletT)
d.pb = me.cluster.AddDroplet(domcfg.UUID, domcfg.Name, 2, 2*1024*1024)
d.pb.StartState = pb.DropletState_OFF
d.CurrentState = pb.DropletState_UNKNOWN
// if the domcfg doesn't have a uuid, make a new one here
if d.pb.Uuid == "" {
u := uuid.New()
d.pb.Uuid = u.String()
}
me.droplets = append(me.droplets, d)
me.changed = true
}
err := updateDroplet(d, domcfg)
if err != nil {
log.Info("updateDroplet() failed for", d.pb.Hostname)
return d, errors.New("update failed for " + domcfg.Name)
}
log.Info("added new droplet", domcfg.Name, domcfg.UUID)
dumpNonStandardXML(domcfg)
return d, nil
}
func findDomain(domcfg *libvirtxml.Domain) (*DropletT, error) {
var found *DropletT
if domcfg == nil {
return nil, errors.New("domcfg == nil")
}
for _, d := range me.droplets {
if d.pb.Hostname == domcfg.Name {
if d.pb.Uuid != domcfg.UUID {
if domcfg.UUID == "" {
// ignore blank or nonexistent UUID's
// todo: check to see if the uuid already exists ?
domcfg.UUID = d.pb.Uuid
} else {
fmt.Println("Will Change UUID from", d.pb.Uuid, "to", domcfg.UUID, "for hostname", d.pb.Hostname)
d.pb.Uuid = domcfg.UUID
me.changed = true
}
}
if found == nil {
found = d
} else {
fmt.Println("FOUND TWICE", d.pb.Uuid, domcfg.Name, domcfg.UUID)
return d, errors.New("Found Twice")
}
}
if d.pb.Uuid == domcfg.UUID {
if d.pb.Hostname != domcfg.Name {
fmt.Println("protobuf has: UUID and Name:", d.pb.Uuid, d.pb.Hostname)
fmt.Println("libvirt has: UUID and Name:", domcfg.UUID, domcfg.Name)
fmt.Println("FOUND UUID WITH MIS-MATCHED NAME", domcfg.Name, domcfg.UUID)
return d, errors.New("UUID with mis-matched names")
}
}
}
return found, nil
}
func updateDroplet(d *DropletT, domcfg *libvirtxml.Domain) error {
var alle []*pb.Event
if d == nil {
return errors.New("d == nil")
}
if domcfg == nil {
return errors.New("domcfg == nil")
}
e, err := updateMemory(d, domcfg)
if err != nil {
log.Info("updateMemory() failed")
return err
}
if e != nil {
alle = append(alle, e)
}
// update arch & machine
if (domcfg.OS != nil) && (domcfg.OS.Type != nil) {
// OS Type: &{Arch:x86_64 Machine:pc-i440fx-5.2 Type:hvm}
t := domcfg.OS.Type
if d.pb.QemuArch != t.Arch {
e := NewChangeEvent(d.pb, "Droplet.QemuArch", d.pb.QemuArch, t.Arch)
alle = append(alle, e)
d.pb.QemuArch = t.Arch
}
if d.pb.QemuMachine != t.Machine {
e := NewChangeEvent(d.pb, "Droplet.QemuMachine", d.pb.QemuMachine, t.Machine)
alle = append(alle, e)
d.pb.QemuMachine = t.Machine
}
}
// check cpus
if d.pb.Cpus != int64(domcfg.VCPU.Value) {
// fmt.Printf("cpus changed. VCPU = %+v\n", domcfg.VCPU)
fmt.Printf("cpus changed. from %d to %d\n", d.pb.Cpus, domcfg.VCPU.Value)
alle = append(alle, NewChangeEvent(d.pb, "Droplet.Cpus", d.pb.Cpus, domcfg.VCPU.Value))
d.pb.Cpus = int64(domcfg.VCPU.Value)
}
// update spice port
if domcfg.Devices.Graphics != nil {
for _, g := range domcfg.Devices.Graphics {
if g.Spice == nil {
continue
}
var s *libvirtxml.DomainGraphicSpice
s = g.Spice
// fmt.Printf("Spice: %d %+v %s\n", i, s, s.AutoPort)
if s.AutoPort == "yes" {
// should ignore either way
} else {
if d.pb.SpicePort != int64(s.Port) {
// print out, but ignore the port number
d.pb.SpicePort = int64(s.Port)
fmt.Printf("Spice Port set to = %d\n", s.Port)
alle = append(alle, NewChangeEvent(d.pb, "Droplet.SpicePort", d.pb.SpicePort, s.Port))
}
}
}
}
// check type
if domcfg.Type != "kvm" {
fmt.Printf("not kvm. Virt type == %s\n", domcfg.Type)
return errors.New("not kvm")
}
nete, err := updateNetwork(d, domcfg)
if err != nil {
log.Info("updateNetwork() failed", err)
return errors.New("updateNetwork() failed")
}
for _, e := range nete {
alle = append(alle, e)
}
if !updateDisk(d, domcfg) {
return errors.New("updateDisk() failed")
}
if alle == nil {
log.Info("libvirt xml import worked. nothing changed", domcfg.Name)
return nil
}
log.Info("libvirt xml import worked. droplet changed", domcfg.Name)
// log.Info("all change events", alle)
me.changed = true
// append each change event
for _, e := range alle {
me.events.Events = append(me.events.Events, e)
}
return nil
}
// returns false if something went wrong
func updateMemory(d *DropletT, domcfg *libvirtxml.Domain) (*pb.Event, error) {
if (d == nil) || (domcfg == nil) {
return nil, errors.New("domcfg == nil")
}
if domcfg.Memory == nil {
// nothing to do. libvirt xml file didn't define memory size
return nil, nil
}
var m int64 = 0
switch domcfg.Memory.Unit {
case "KiB":
m = int64(domcfg.Memory.Value * 1024)
case "MiB":
m = int64(domcfg.Memory.Value * 1024 * 1024)
case "GiB":
m = int64(domcfg.Memory.Value * 1024 * 1024 * 1024)
default:
fmt.Println("Unknown Memory Unit", domcfg.Memory.Unit)
return nil, errors.New("Unknown Memory Unit " + domcfg.Memory.Unit)
}
e := d.SetMemory(m)
if e != nil {
fmt.Printf("Memory changed %s to %d %s\n", pb.HumanFormatBytes(d.pb.Memory), domcfg.Memory.Value, domcfg.Memory.Unit)
d.pb.Memory = m
// me.changed = true
}
return e, nil
}
func updateNetwork(d *DropletT, domcfg *libvirtxml.Domain) ([]*pb.Event, error) {
var allEvents []*pb.Event
if (d == nil) || (domcfg == nil) {
return nil, errors.New("domcfg == nil")
}
// mac address & bridge name
var macs map[string]string
macs = make(map[string]string)
// Iterate over the network interfaces and print the MAC addresses
for _, iface := range domcfg.Devices.Interfaces {
var hwaddr string
var brname string
// fmt.Printf("iface: %+v\n", iface)
// fmt.Printf("MAC: %+v\n", iface.MAC)
// fmt.Printf("Source: %+v\n", iface.Source)
// fmt.Printf("Bridge: %+v\n", iface.Source.Bridge)
// fmt.Printf("Model: %+v\n", iface.Model)
if iface.MAC != nil {
// iface.MAC.Address = "aa:bb:aa:bb:aa:ff"
// log.Info("Interface:", iface.Target, "MAC Address:", iface.MAC.Address)
// fmt.Printf("source: %+v\n", iface.Source)
hwaddr = iface.MAC.Address
}
if iface.Source == nil {
// fmt.Printf("non-standard network: %+v\n", iface)
updatedXML, _ := xml.MarshalIndent(domcfg.Devices.Interfaces, "", " ")
log.Info("Non-Standard Network XML Start")
fmt.Println(string(updatedXML))
log.Info("Non-Standard Network XML End")
return nil, errors.New("non-standard network. source == nil")
}
if iface.Source.Bridge == nil {
if hwaddr == "" {
fmt.Printf("non-standard network: %+v\n", iface)
updatedXML, _ := xml.MarshalIndent(domcfg.Devices.Interfaces, "", " ")
log.Info("Non-Standard Network XML Start")
fmt.Println(string(updatedXML))
log.Info("Non-Standard Network XML End")
return nil, errors.New("bridge is nil and no mac address")
}
brname = ""
} else {
if iface.Source.Bridge.Bridge == "" {
if hwaddr == "" {
fmt.Printf("non-standard network: %+v\n", iface)
fmt.Printf("iface.Mac: %+v\n", iface)
updatedXML, _ := xml.MarshalIndent(domcfg.Devices.Interfaces, "", " ")
log.Info("Non-Standard Network XML Start")
fmt.Println(string(updatedXML))
log.Info("Non-Standard Network XML End")
return nil, errors.New("bridge is blank and no mac address")
}
brname = iface.Source.Bridge.Bridge
}
}
// log.Info("network has bridge:", iface.Source.Bridge.Bridge)
if hwaddr == "" {
hwaddr = "generate " + domcfg.Name
log.Info("need to generate mac addr for bridge:", brname)
// return nil, errors.New("need to generate mac addr for bridge: " + brname)
}
macs[hwaddr] = brname
}
for mac, brname := range macs {
var found bool = false
// log.Info("XML has mac address:", mac, brname)
for _, eth := range d.pb.Networks {
if eth.Mac == mac {
// log.Info("OKAY. FOUND ETH:", eth.Mac, eth.Name, brname)
found = true
if brname == "" {
// if new bridge name is blank, keep the old one
brname = eth.Name
}
if eth.Name != brname {
if argv.IgnoreBr {
log.Info("network was:", eth.Mac, eth.Name)
log.Info("network now:", eth.Mac, brname)
log.Info("ignoring network change (--xml-ignore-net)")
} else {
return nil, errors.New("bridge name changed")
}
}
}
}
if !found {
if checkUniqueMac(mac) {
} else {
log.Info("droplet", d.pb.Hostname, "duplicate mac address", mac)
return nil, errors.New("duplicate mac address")
}
var eth *pb.Network
eth = new(pb.Network)
eth.Mac = mac
if brname == "" {
brname = "worldbr"
}
eth.Name = brname
d.pb.Networks = append(d.pb.Networks, eth)
allEvents = append(allEvents, NewChangeEvent(d.pb, "Droplet NewNetwork", "", mac+" "+brname))
}
}
log.Verbose("mac addrs:", macs)
return allEvents, nil
}
/* from vm3-with-nvme-1.5GB-sec.xml
<disk type='block' device='disk'>
<driver name='qemu' type='raw'/>
<source dev='/dev/nvme4n1'/>
<backingStore/>
<target dev='vdb' bus='virtio'/>
<alias name='virtio-disk1'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x04' function='0x0'/>
</disk>
*/
// returns false if something went wrong
func updateDisk(d *DropletT, domcfg *libvirtxml.Domain) bool {
if (d == nil) || (domcfg == nil) {
return false
}
for _, disk := range domcfg.Devices.Disks {
var t *libvirtxml.DomainDiskSourceFile
t = disk.Source.File
if t == nil {
fmt.Println("disk.Source.File == nil")
continue
}
filename := t.File
if filename == "" {
fmt.Println("No disk source file found.")
continue
}
var found bool = false
for _, disk := range d.pb.Disks {
if disk.Filename == filename {
log.Verbose("OKAY. FOUND filename", filename)
found = true
}
}
if !found {
var disk *pb.Disk
disk = new(pb.Disk)
disk.Filename = filename
d.pb.Disks = append(d.pb.Disks, disk)
log.Info("New filename", filename)
me.changed = true
}
}
return true
}

89
argv.go
View File

@ -1,6 +1,11 @@
package main package main
import "go.wit.com/log" import (
"fmt"
"os"
"go.wit.com/log"
)
/* /*
this parses the command line arguements this parses the command line arguements
@ -11,45 +16,58 @@ import "go.wit.com/log"
var argv args var argv args
type args struct { type args struct {
Xml []string `arg:"--libvirt" help:"import qemu xml files: --libvirt /etc/libvirt/qemu/*.xml"` List *ListCmd `arg:"subcommand:list" help:"list things"`
IgnoreCpu bool `arg:"--xml-ignore-cpu" default:"true" help:"ignore non-standard libvirt xml cpus"` Droplet *DropletCmd `arg:"subcommand:droplet" help:"send events to a droplet"`
IgnoreBr bool `arg:"--xml-ignore-net" default:"true" help:"ignore network bridge name changes"` Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"`
Save bool `arg:"--save" default:"false" help:"save protobuf config after import"` Server string `arg:"env:VIRTIGO_SERVER" help:"what virtigo cluster to connect to"`
Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"` Localhost bool `arg:"--localhost" help:"use the local libvirt"`
Port int `arg:"--port" default:"8080" help:"allow droplet events via http"` Daemon bool `arg:"--daemon" help:"run as a daemon"`
Daemon bool `arg:"--daemon" help:"run in daemon mode"` Verbose bool `arg:"--verbose" help:"talk more"`
Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
Xml []string `arg:"--libvirt" help:"import qemu xml files: --libvirt /etc/libvirt/qemu/*.xml"`
Admin bool `arg:"--admin" help:"enter admin mode"`
Bash bool `arg:"--bash" help:"generate bash completion"`
BashAuto []string `arg:"--auto-complete" help:"todo: move this to go-arg"`
} }
// Uptime bool `arg:"--uptime" default:"true" help:"allow uptime checks for things like Kuma"` type EmptyCmd struct {
// Hosts []string `arg:"--hosts" help:"hosts to connect to"` }
type testCmd string
type ListCmd struct {
Droplets *EmptyCmd `arg:"subcommand:droplets" help:"list droplets"`
Hypervisors *EmptyCmd `arg:"subcommand:hypervisors" help:"list hypervisors"`
On bool `arg:"--on" help:"only show things that are on"`
}
type DropletCmd struct {
Start *EmptyCmd `arg:"subcommand:start" help:"start droplet"`
Stop *EmptyCmd `arg:"subcommand:stop" help:"stop droplet"`
Show *EmptyCmd `arg:"subcommand:show" help:"show droplet"`
Console *EmptyCmd `arg:"subcommand:console" help:"open serial console"`
VNC *EmptyCmd `arg:"subcommand:vnc" help:"open VNC console"`
Spice *EmptyCmd `arg:"subcommand:spice" help:"open spiceconsole"`
Name string `arg:"--name" help:"what droplet to start"`
}
func (a args) Description() string { func (a args) Description() string {
return ` return `
virtigo will help control your cluster virtigo: control your cluster
This maintains a master list of all your vm's (aka 'droplets') This maintains a master list of all your vm's (aka 'droplets')
in your homelab cloud. You can import libvirt xml files. in your homelab cloud. You can import libvirt xml files.
This app talks to your hypervisors via the virtigod daemon. This app talks to your hypervisors via the virtigod daemon.
Import your existing libvirt xml files with:
virtigo --libvirt /etc/libvirt/qemu/*.xml --save
This runs a http server so you can control your virtual machines.
For example to start a vm called 'www.wit.com' your cluster 'foo.bar.com':
curl http://foo.bar.com/start?www.wit.com
` `
} }
func (args) Version() string { func (args) Version() string {
return "virtigo " + Version return ARGNAME + " " + VERSION + " Built on " + BUILDTIME
} }
var INFO *log.LogFlag var INFO *log.LogFlag
var POLL *log.LogFlag var POLL *log.LogFlag
var WARN *log.LogFlag var WARN *log.LogFlag
var SPEW *log.LogFlag
var EVENT *log.LogFlag var EVENT *log.LogFlag
func init() { func init() {
@ -58,6 +76,31 @@ func init() {
INFO = log.NewFlag("INFO", false, full, short, "general virtigo") INFO = log.NewFlag("INFO", false, full, short, "general virtigo")
POLL = log.NewFlag("POLL", false, full, short, "virtigo polling") POLL = log.NewFlag("POLL", false, full, short, "virtigo polling")
SPEW = log.NewFlag("SPEW", true, full, short, "bad things") WARN = log.NewFlag("WARN", true, full, short, "bad things")
EVENT = log.NewFlag("EVENT", true, full, short, "hypeprvisor/droplet events") EVENT = log.NewFlag("EVENT", true, full, short, "hypeprvisor/droplet events")
} }
/*
handles shell autocomplete
*/
func (a args) DoAutoComplete(argv []string) {
switch argv[0] {
case "list":
fmt.Println("droplets hypervisors")
case "droplet":
fmt.Println("start stop")
case "devel":
fmt.Println("--force")
case "master":
fmt.Println("")
case "verify":
fmt.Println("user devel master")
default:
if argv[0] == ARGNAME {
// list the subcommands here
fmt.Println("--bash list droplet")
}
}
os.Exit(0)
}

5
build Executable file
View File

@ -0,0 +1,5 @@
#!/bin/bash -x
# this is the systemd control file
mkdir -p files/usr/bin/
cp ../virtigoctl/virtigoctl files/usr/bin/

View File

@ -1,99 +0,0 @@
package main
import (
// "reflect"
"errors"
"fmt"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/wrapperspb"
pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log"
)
func convertToAnypb(x any) *anypb.Any {
switch v := x.(type) {
case int64:
var a *anypb.Any
a, _ = anypb.New(wrapperspb.Int64(x.(int64)))
return a
case string:
var a *anypb.Any
a, _ = anypb.New(wrapperspb.String(x.(string)))
return a
case int:
var a *anypb.Any
a, _ = anypb.New(wrapperspb.Int64(x.(int64)))
return a
case bool:
var a *anypb.Any
a, _ = anypb.New(wrapperspb.Bool(x.(bool)))
return a
default:
log.Error(errors.New("convertToAnypb() unknown type"), "v =", v, "x =", x)
return nil
}
return nil
}
func convertToString(x any) string {
switch v := x.(type) {
case int64:
return fmt.Sprintf("%d", x.(int64))
case string:
return x.(string)
case int:
return fmt.Sprintf("%d", x.(int64))
case uint:
return fmt.Sprintf("%d", x.(uint))
case bool:
if x.(bool) {
return "true"
}
return "false"
default:
log.Info("convertToSTring() unknown type", v)
log.Error(errors.New("convertToSTring() unknown type"), "v =", v, "x =", x)
return ""
}
return ""
}
// Wrapping the int into a protobuf message
func NewChangeEvent(d *pb.Droplet, fname string, origval any, newval any) *pb.Event {
var e *pb.Event
e = new(pb.Event)
e.Droplet = d.Hostname
e.OrigVal = convertToString(origval)
e.NewVal = convertToString(newval)
e.FieldName = fname
// this also works, but it's a bit overkill
// e.NewAny = convertToAnypb(newval)
// me.events.Events = append(me.events.Events, e)
// stuff := me.events.FormatJSON()
// log.Info("events:", stuff)
return e
}
// update the droplet memory
func (d *DropletT) SetMemory(b int64) *pb.Event {
oldm := pb.HumanFormatBytes(d.pb.Memory)
newm := pb.HumanFormatBytes(b)
if d.pb.Memory == b {
// log.Info("droplet", d.pb.Hostname, "memory unchanged", oldm, "to", newm)
return nil
}
log.Info("droplet", d.pb.Hostname, "memory change from", oldm, "to", newm)
return NewChangeEvent(d.pb, "Droplet.Memory", d.pb.Memory, b)
}
// update the droplet memory
func (d *DropletT) SetCpus(b int64) {
log.Info("Set the number of cpus for the droplet", b)
}

170
config.go
View File

@ -1,170 +0,0 @@
package main
/*
All the information is defined by protobuf files
The config files written out by default into
~/.config/virtigo/
protobuf definitions are by nature non-relational
so each protobuf is written out as a seperate file.
This seems like the simpilist way to handle this.
*/
import (
"errors"
"fmt"
"os"
"path/filepath"
"time"
pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log"
)
var ErrorNoFile error = errors.New("missing file")
var ErrorParseJSON error = errors.New("invalid json")
var ErrorParseXML error = errors.New("invalid xml")
// something is wrong somewhere and sometimes the
// protobuf json files get written out with garbage
func cfgfile() {
err := readConfigFile("virtigo.json")
if err == nil {
return
}
if err == ErrorParseJSON {
os.Exit(-1)
}
err = readConfigFile("virtigo.json.last")
if err == nil {
log.Info("read json failed", err)
os.Exit(-1)
}
if err == ErrorNoFile {
log.Info("no config file created yet", err)
os.Exit(-1)
}
}
func readConfigFile(filename string) error {
me.cluster = new(pb.Cluster)
fullname := filepath.Join(os.Getenv("VIRTIGO_HOME"), filename)
pfile, err := os.ReadFile(fullname)
if err != nil {
log.Info("open config file :", err)
return err
}
err = me.cluster.UnmarshalJSON(pfile)
if err != nil {
log.Info("read json failed", err)
os.Exit(-1)
return err
}
// initialize each hypervisor
for _, pbh := range me.cluster.Hypervisors {
h := findHypervisor(pbh.Hostname)
if h != nil {
continue
}
// this is a new unknown droplet (not in the config file)
h = new(HyperT)
h.pb = pbh
h.lastpoll = time.Now()
me.hypers = append(me.hypers, h)
log.Log(EVENT, "config new hypervisors", h.pb.Hostname)
}
var total int
// initialize values for each droplet
for _, pbd := range me.cluster.Droplets {
d := findDroplet(pbd.Hostname)
if d != nil {
continue
}
// this is a new unknown droplet (not in the config file)
d = new(DropletT)
d.pb = pbd
me.droplets = append(me.droplets, d)
log.Log(EVENT, "config new droplet", d.pb.Hostname, d.pb.StartState, d.pb.PreferredHypervisor)
total += 1
}
log.Log(EVENT, "Total Droplet count:", total)
return nil
}
func writeConfigFile() {
// Get the current time
now := time.Now()
// Format the time to match your desired format: YYYY.MM.DD.HHMMSS
timestamp := now.Format("2006.01.02.150405")
filename := "virtigo.json.new." + timestamp
if !writeConfigFileTmp(filename) {
log.Println("config file write error")
os.Exit(-1)
}
origname := filepath.Join(os.Getenv("VIRTIGO_HOME"), "virtigo.json")
newname := filepath.Join(os.Getenv("VIRTIGO_HOME"), "virtigo.json.old")
err := os.Rename(origname, newname)
if err != nil {
log.Printf("rename fail: %s", err)
os.Exit(-1)
}
if !writeConfigFileTmp("virtigo.json") {
log.Println("config file write error")
os.Exit(-1)
}
if me.events.WriteConfigJSON() {
os.Exit(-1)
}
if me.events.WriteConfigTEXT() {
os.Exit(-1)
}
/*
if me.cluster.Droplets.WriteConfigJSON() {
os.Exit(-1)
}
if me.cluster.Droplets.WriteConfigTEXT() {
os.Exit(-1)
}
*/
}
func writeConfigFileTmp(filename string) bool {
fullname := filepath.Join(os.Getenv("VIRTIGO_HOME"), filename)
cfgfile, err := os.OpenFile(fullname, os.O_RDWR|os.O_CREATE, 0666)
defer cfgfile.Close()
if err != nil {
log.Info("open config file :", err)
return false
}
json := me.cluster.FormatJSON()
fmt.Fprintln(cfgfile, json)
log.Info("Write:", fullname, "OK")
return true
}
func writeConfigFileDroplets() {
fullname := filepath.Join(os.Getenv("VIRTIGO_HOME"), "droplets.text")
cfgfile, err := os.OpenFile(fullname, os.O_RDWR|os.O_CREATE, 0666)
defer cfgfile.Close()
if err != nil {
log.Info("open config file :", err)
return
}
// text := me.cluster.Droplets.FormatTEXT()
text := me.cluster.FormatTEXT()
fmt.Fprintln(cfgfile, text)
log.Info("Write:", fullname, "OK")
}

15
control Normal file
View File

@ -0,0 +1,15 @@
Source: virtigo
Build-Depends: golang
Package: virtigo
Maintainer: Jeff Carr <jcarr@wit.com>
Architecture: amd64
Recommends: virtigod
Depends: gus, remmina, remmina-plugin-spice
URL: https://go.wit.com/apps/virtigo
Description: control your virtual machines in your cluster
lets you start,stop, etc virtual machines
converts libvirt xml into protobuf definitions
communicates with hypervisors with protobuf's
uses virsh to start & stop
maintains unique mac address table
servers cluster status on :8080 for uptime checking like Kuma

416
doAdminGui.go Normal file
View File

@ -0,0 +1,416 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/user"
"time"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
// refresh the windows & tables the user has open
func (admin *adminT) refresh() error {
if argv.Verbose {
log.Info("virtigo scan here")
}
if admin.url == nil {
log.Info("admin url == nil")
return fmt.Errorf("admin url == nil")
}
msg := []byte(`{"message": "Hello"}`)
// display the uptime
if data, err := postData(admin.url.String()+"/uptime", msg); err != nil {
log.Info("/uptime Error:", err)
} else {
log.Info("Response:", string(data))
admin.uptime.SetText(string(data))
}
// update the droplet list
if data, err := postData(admin.url.String()+"/DropletsPB", msg); err != nil {
log.Info("/DropletsPB Error:", err)
} else {
fmt.Println("DropletsPB Response len:", len(data))
admin.cluster.Droplets = new(virtpb.Droplets)
if err := admin.cluster.Droplets.Unmarshal(data); err != nil {
fmt.Println("droplets marshal failed", err)
return err
}
fmt.Println("Droplet len=", admin.cluster.Droplets.Len())
}
// update the hypervisor list
if data, err := postData(admin.url.String()+"/HypervisorsPB", msg); err != nil {
log.Info("Error:", err)
} else {
fmt.Println("HypervisorsPB Response len:", len(data))
admin.cluster.Hypervisors = new(virtpb.Hypervisors)
if err := admin.cluster.Hypervisors.Unmarshal(data); err != nil {
fmt.Println("hypervisors marshal failed", err)
return err
}
fmt.Println("Hypervisors len=", admin.cluster.Hypervisors.Len())
}
// update the events list
if data, err := postData(admin.url.String()+"/EventsPB", msg); err != nil {
log.Info("Error:", err)
} else {
fmt.Println("EventsPB Response len:", len(data))
admin.cluster.Events = new(virtpb.Events)
if err := admin.cluster.Events.Unmarshal(data); err != nil {
fmt.Println("events marshal failed", err)
return err
}
fmt.Println("Events len=", admin.cluster.Events.Len())
}
return nil
}
var client *http.Client
func doLocalhostAdminGui() *adminT {
admin := new(adminT)
admin.uptime = me.gwin.Group.NewLabel("uptime")
grid := me.gwin.Group.RawGrid()
grid.NewButton("show hypervisors", func() {
if admin.cluster.Hypervisors == nil {
log.Info("hypervisors not initialized")
return
}
log.Info("Hypervisors len=", admin.cluster.Hypervisors.Len())
admin.hwin = newHypervisorsWindow()
admin.hwin.doStdHypervisors(admin.cluster.Hypervisors)
admin.hwin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NewButton("droplets", func() {
if admin.cluster.Droplets == nil {
log.Info("droplets not initialized")
return
}
admin.dwin = newDropletsWindow(admin)
admin.dwin.win.Custom = func() {
log.Info("hiding droplet table window")
}
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
admin.dwin.doActiveDroplets(found)
})
grid.NewButton("events", func() {
if admin.cluster.Events == nil {
log.Info("events are not initialized")
return
}
log.Info("Events len=", admin.cluster.Events.Len())
admin.ewin = newEventsWindow()
admin.ewin.doStdEvents(admin.cluster.Events)
admin.ewin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NextRow()
grid.NewButton("refresh", func() {
admin.refresh()
})
return admin
}
func doAdminGui() {
// Initialize a persistent client with a custom Transport
client = &http.Client{
Transport: &http.Transport{
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
},
Timeout: 10 * time.Second, // Set a reasonable timeout
}
me.gwin = gadgets.NewGenericWindow("Virtigo: (run your cluster)", "")
me.gwin.Custom = func() {
log.Warn("Main window close")
os.Exit(0)
}
me.cmap = make(map[*virtpb.Cluster]*adminT)
for c := range me.clusters.IterAll() {
a := new(adminT)
me.cmap[c] = a
log.Info("found in the config file", c.URL[0])
a.makeClusterGroup(c)
}
// sit here forever refreshing the GUI
for {
// admin.refresh()
log.Info("todo: refresh() protobufs here")
time.Sleep(90 * time.Second)
}
}
func (admin *adminT) doAdminGui() {
// Initialize a persistent client with a custom Transport
client = &http.Client{
Transport: &http.Transport{
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
},
Timeout: 10 * time.Second, // Set a reasonable timeout
}
win := gadgets.NewGenericWindow("Virtigo: (run your cluster)", "localhost")
win.Custom = func() {
log.Warn("Main window close")
os.Exit(0)
}
me.gwin = win
admin.uptime = win.Group.NewLabel("uptime")
grid := win.Group.RawGrid()
grid.NewButton("show hypervisors", func() {
if admin.cluster.Hypervisors == nil {
log.Info("hypervisors not initialized")
return
}
log.Info("Hypervisors len=", admin.cluster.Hypervisors.Len())
admin.hwin = newHypervisorsWindow()
admin.hwin.doStdHypervisors(admin.cluster.Hypervisors)
admin.hwin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NewButton("droplets", func() {
if admin.cluster.Droplets == nil {
log.Info("droplets not initialized")
return
}
admin.dwin = newDropletsWindow(admin)
admin.dwin.win.Custom = func() {
log.Info("hiding droplet table window")
}
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
admin.dwin.doActiveDroplets(found)
})
grid.NewButton("events", func() {
if admin.cluster.Events == nil {
log.Info("events are not initialized")
return
}
log.Info("Events len=", admin.cluster.Events.Len())
admin.ewin = newEventsWindow()
admin.ewin.doStdEvents(admin.cluster.Events)
admin.ewin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NextRow()
grid.NewButton("refresh", func() {
admin.refresh()
})
grid.NewButton("test gui close", func() {
gui.StandardExit()
// okExit("admin close")
})
me.cmap = make(map[*virtpb.Cluster]*adminT)
for c := range me.clusters.IterAll() {
a := new(adminT)
me.cmap[c] = a
log.Info("found in the config file", c.URL[0])
a.makeClusterGroup(c)
}
// sit here forever refreshing the GUI
for {
admin.refresh()
time.Sleep(90 * time.Second)
}
}
func (admin *adminT) makeClusterGroup(c *virtpb.Cluster) {
var err error
admin.url, err = url.Parse(c.URL[0])
if err != nil {
badExit(err)
}
if admin.cluster == nil {
admin.cluster = new(virtpb.Cluster)
admin.cluster.Name = c.Name
admin.cluster.Uuid = c.Uuid
}
name := c.GetName()
if name == "" {
name = admin.url.Hostname()
}
group := me.gwin.Bottom.NewGroup(name)
admin.uptime = group.NewLabel("uptime")
grid := group.RawGrid()
grid.NewButton("show hypervisors", func() {
if admin.cluster.Hypervisors == nil {
log.Info("hypervisors not initialized")
return
}
log.Info("Hypervisors len=", admin.cluster.Hypervisors.Len())
admin.hwin = newHypervisorsWindow()
admin.hwin.doStdHypervisors(admin.cluster.Hypervisors)
admin.hwin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NewButton("droplets", func() {
if admin.cluster.Droplets == nil {
log.Info("droplets not initialized")
return
}
admin.dwin = newDropletsWindow(admin)
admin.dwin.win.Custom = func() {
log.Info("hiding droplet table window")
}
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
admin.dwin.doActiveDroplets(found)
})
grid.NewButton("events", func() {
if admin.cluster.Events == nil {
log.Info("events are not initialized")
return
}
log.Info("Events len=", admin.cluster.Events.Len())
admin.ewin = newEventsWindow()
admin.ewin.doStdEvents(admin.cluster.Events)
admin.ewin.win.Custom = func() {
log.Info("hiding table window")
}
})
grid.NewButton("refresh", func() {
admin.refresh()
})
if err := admin.refresh(); err != nil {
return
}
grid.NewButton("save cluster.pb", func() {
admin.cluster.ConfigSave()
})
}
func postData(url string, data []byte) ([]byte, error) {
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
usr, _ := user.Current()
req.Header.Set("author", usr.Username)
req.Header.Set("Connection", "keep-alive") // Ensure keep-alive is used
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("request failed: %w", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response: %w", err)
}
return body, nil
}
func (admin *adminT) postEvent(e *virtpb.Event) error {
var result *virtpb.Event
result = new(virtpb.Event)
msg, err := e.Marshal()
if err != nil {
log.Info("postEvent() marshal() failed", err, e)
return err
}
url := admin.url.String() + "/event"
// update the droplet list
if data, err := postData(url, msg); err != nil {
log.Info("postEvent() /event Error:", err)
return err
} else {
if err := result.Unmarshal(data); err != nil {
log.Println("postEvent() result marshal failed", err, "len(dat) =", len(data))
log.Println("postEvent() data =", string(data))
return err
} else {
log.Println("postEvent() result marshal worked on len(dat) =", len(data))
log.Println("postEvent() result =", result.FormatTEXT())
}
}
if result.Error != "" {
return fmt.Errorf("%s", result.Error)
}
log.Printf("Event worked to %s uuid=%s\n", url, result.DropletUuid)
return nil
}

154
doDaemon.go Normal file
View File

@ -0,0 +1,154 @@
// Copyright 2024 WIT.COM Inc Licensed GPL 3.0
package main
import (
"fmt"
"time"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/lib/virtigolib"
"go.wit.com/log"
)
func doDaemon() error {
// set defaults
me.unstable = time.Now() // initialize the grid as unstable
me.changed = false
me.hmap = make(map[*virtpb.Hypervisor]*HyperT)
// how long a droplet can be missing until it's declared dead
me.unstableTimeout = 17 * time.Second
me.missingDropletTimeout = time.Minute // not sure the difference between these values
// how often to poll the hypervisors
me.hyperPollDelay = 5 * time.Second
// how long the cluster must be stable before new droplets can be started
me.clusterStableDuration = 37 * time.Second
me.cluster = virtpb.InitCluster()
if err := me.cluster.ConfigLoad(); err != nil {
log.Info("config load error", err)
log.Info("")
log.Info("You have never run this before")
log.Info("init example cloud here")
log.Sleep(2)
return err
}
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
if d == nil {
fmt.Println("d == nil")
return fmt.Errorf("d == nil")
}
fmt.Println("Droplet UUID:", d.Uuid)
if d.Current == nil {
d.Current = new(virtpb.Current)
}
d.SetState(virtpb.DropletState_OFF)
log.Info("droplet", d.Hostname)
}
hmm := "pihole.wit.com"
d := me.cluster.FindDropletByName(hmm)
if d == nil {
log.Info("did not find found droplet", hmm)
} else {
log.Info("found droplet", d.Hostname, d)
}
var newEvents []*virtpb.Event
// sanity check the cluster & droplets
if _, _, err := ValidateDroplets(); err != nil {
log.Info("todo: add flag to ignore. for now, fix problems in the config file.")
return err
}
newe, err := ValidateDiskFilenames()
if err != nil {
log.Info(err)
return err
}
// this is a new droplet. add it to the cluster
for _, e := range newe {
newEvents = append(newEvents, e)
}
ValidateUniqueFilenames()
for _, filename := range argv.Xml {
domcfg, err := virtigolib.ReadXml(filename)
if err != nil {
// parsing the libvirt xml file failed
log.Info("error:", filename, err)
log.Info("readXml() error", filename)
log.Info("readXml() error", err)
log.Info("libvirt XML will have to be fixed by hand")
return err
}
// this is a new droplet. add it to the cluster
log.Info("Add XML Droplet here", domcfg.Name)
_, newe, err := virtigolib.AddDomainDroplet(me.cluster, domcfg)
if err != nil {
log.Info("addDomainDroplet() error", filename)
log.Info("addDomainDroplet() error", err)
log.Info("libvirt XML will have to be fixed by hand")
return err
}
for _, e := range newe {
newEvents = append(newEvents, e)
}
}
for i, e := range newEvents {
log.Info(i, "Event:", e.Droplet, e.FieldName, "orig:", e.OrigVal, "new:", e.NewVal)
me.changed = true
}
if me.changed {
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
return err
}
log.Info("XML changes saved in protobuf config")
return nil
}
if len(argv.Xml) != 0 {
log.Info("No XML changes found")
return fmt.Errorf("No XML changes found")
}
// initialize each hypervisor
for _, pbh := range me.cluster.H.Hypervisors {
// this is a new unknown droplet (not in the config file)
var h *HyperT
h = new(HyperT)
h.pb = pbh
h.lastDroplets = make(map[string]time.Time)
h.lastpoll = time.Now()
me.hmap[pbh] = h
me.hypers = append(me.hypers, h)
log.Log(EVENT, "config new hypervisors", h.pb.Hostname)
}
// start the watchdog polling for each hypervisor
for _, h := range me.hypers {
log.Info("starting polling on", h.pb.Hostname)
// start a watchdog on each hypervisor
go h.NewWatchdog()
}
var cloud *virtigolib.CloudManager
cloud = virtigolib.NewCloud()
found, _ := cloud.FindDropletByName("www.wit.com")
if found == nil {
log.Info("d == nil")
} else {
log.Info("d == ", found)
}
startHTTP()
return nil
}

361
doDroplet.go Normal file
View File

@ -0,0 +1,361 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"fmt"
"math/rand"
"net/http"
"net/url"
"path/filepath"
"strings"
"time"
"github.com/google/uuid"
"go.wit.com/lib/gui/shell"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func doDroplet() (string, error) {
err := me.clusters.ConfigLoad()
if err != nil {
return "", err
}
msg := []byte(`{"message": "Hello"}`)
// Initialize a persistent client with a custom Transport
client = &http.Client{
Transport: &http.Transport{
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
},
Timeout: 10 * time.Second, // Set a reasonable timeout
}
me.cmap = make(map[*virtpb.Cluster]*adminT)
for c := range me.clusters.IterAll() {
var err error
admin := new(adminT)
if admin.cluster == nil {
admin.cluster = new(virtpb.Cluster)
}
me.cmap[c] = admin
log.Info("found in the config file", c.URL[0])
// a.makeClusterGroup(c)
admin.url, err = url.Parse(c.URL[0])
if err != nil {
return "", err
}
// update the droplet list
if data, err := postData(admin.url.String()+"/DropletsPB", msg); err != nil {
log.Info("/DropletsPB Error:", err)
continue
} else {
admin.cluster.Droplets = new(virtpb.Droplets)
if err := admin.cluster.Droplets.Unmarshal(data); err != nil {
log.Printf("DropletsPB Response len:%d\n", len(data))
log.Println("droplets marshal failed", err)
continue
}
}
log.Printf("Cluster Name: %s\n", c.Name)
log.Printf("Number of Droplets: %d\n", admin.cluster.Droplets.Len())
if argv.Droplet.Name == "" {
return "", fmt.Errorf("--name droplet name was empty")
}
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if argv.Droplet.Name == vm.Hostname {
if argv.Droplet.Show != nil {
log.Info(vm.SprintHeader())
txt := vm.FormatTEXT()
log.Info(txt)
return "droplet status", nil
}
if argv.Droplet.Start != nil {
log.Info("should start droplet here")
log.Info(vm.SprintHeader())
e := new(virtpb.Event)
e.Etype = virtpb.EventType_POWERON
e.DropletUuid = vm.Uuid
if err := admin.postEvent(e); err != nil {
return "droplet start err", err
}
return "droplet start", nil
}
return "droplet found", fmt.Errorf("do what to the droplet?")
}
found.Append(vm)
}
log.Println("On Droplet count=", found.Len())
}
return "", fmt.Errorf("droplet %s not found", argv.Droplet.Name)
}
func doEvent(e *virtpb.Event) *virtpb.Event {
result := new(virtpb.Event)
if e.Etype == virtpb.EventType_POWERON {
log.Println("power on droplet on local cluster here", e.DropletUuid)
result.State = virtpb.Event_DONE
rs, err := Start(e.DropletUuid)
log.Println("Start() returned", rs)
log.Println("Start() returned err", err)
if err != nil {
result.Error = fmt.Sprintf("%v", err)
}
return result
}
if e.Etype == virtpb.EventType_EDIT {
log.Println("edit event", e.DropletUuid)
result.State = virtpb.Event_DONE
if e.Droplet != nil {
return updateDroplet(e.Droplet)
}
log.Println("unknown edit event")
result.State = virtpb.Event_FAIL
return result
}
if e.Etype == virtpb.EventType_ADD {
log.Println("START ADD droplet event", e.Droplet.FormatTEXT())
if e.Droplet == nil {
result.State = virtpb.Event_FAIL
return result
}
result.DropletName = e.Droplet.Hostname
result.Error = e.Droplet.FormatTEXT() // feedback to the other side for debugging
// attempt to create the new droplet
if err := createDroplet(e.Droplet, result); err != nil {
result.Error += fmt.Sprintf("createDroplet() err: %v", err)
result.State = virtpb.Event_FAIL
return result
}
log.Println("create droplet worked", e.Droplet.FormatTEXT())
result.State = virtpb.Event_DONE
return result
}
log.Println("unknown event", e)
result.Etype = e.Etype
result.State = virtpb.Event_FAIL
return result
}
func updateDroplet(newd *virtpb.Droplet) *virtpb.Event {
var changed bool = false
result := new(virtpb.Event)
if newd == nil {
result.Error = "updateDroplet() d == nil"
result.State = virtpb.Event_FAIL
return result
}
d := me.cluster.FindDropletByUuid(newd.Uuid)
if d == nil {
result.Error = "updateDroplet() could not find uuid"
result.State = virtpb.Event_FAIL
return result
}
log.Println("found droplet to update:", newd.Uuid, newd.Hostname, newd.Cpus, newd.Memory)
if d.Hostname != newd.Hostname && newd.Hostname != "" {
d.Hostname = newd.Hostname
changed = true
}
if d.Cpus != newd.Cpus && newd.Cpus > 0 {
d.Cpus = newd.Cpus
changed = true
}
// arbitrary check. don't make vm's with less than 64 MB of RAM
// big enough most things will load with some stdout
if d.Memory != newd.Memory && newd.Memory > (64*1024*1024) {
d.Memory = newd.Memory
changed = true
}
if changed {
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
result.Error = fmt.Sprintf("%v", err)
result.State = virtpb.Event_FAIL
return result
}
} else {
log.Println("nothing changed in", newd.Uuid, newd.Hostname)
}
result.State = virtpb.Event_DONE
return result
}
func createDroplet(newd *virtpb.Droplet, result *virtpb.Event) error {
if newd == nil {
return fmt.Errorf("droplet protobuf == nil")
}
if newd.Uuid == "" {
newd.Uuid = uuid.New().String()
}
d := me.cluster.FindDropletByUuid(newd.Uuid)
if d != nil {
return fmt.Errorf("droplet uuid already used")
}
log.Println("found droplet to update:", newd.Uuid, newd.Hostname, newd.Cpus, newd.Memory)
if newd.Hostname == "" {
return fmt.Errorf("Hostname can not be blank")
}
d = me.cluster.FindDropletByName(newd.Hostname)
if d != nil {
return fmt.Errorf("hostname already defined")
}
// by default, on locally imported domains, set the preferred hypervisor!
newd.LocalOnly = "yes on: " + "farm03"
newd.PreferredHypervisor = "farm03"
newd.StartState = virtpb.DropletState_OFF
newd.Current = new(virtpb.Current)
newd.Current.State = virtpb.DropletState_OFF
// create the network
if err := createNetwork(newd); err != nil {
return err
}
// create the disks
if err := createDisks(newd); err != nil {
return err
}
// append the protobuf and save it
me.cluster.AddDroplet(newd)
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
return fmt.Errorf("ConfigSave() error: %v", err)
}
return nil
}
func findDisks(d *virtpb.Droplet) error {
log.Info("need to do this")
return nil
}
func createDisks(d *virtpb.Droplet) error {
if d.Disks != nil {
return findDisks(d)
}
newdisk := new(virtpb.Disk)
newdisk.Filename = d.Hostname + ".qcow2"
newdisk.Filepath = "/home/nfs2"
d.Disks = append(d.Disks, newdisk)
basefile := "/home/nfs2/base2025.wit-5.qcow2"
newfile := filepath.Join(newdisk.Filepath, newdisk.Filename)
if !shell.Exists(newdisk.Filepath) {
return fmt.Errorf("disk image path missing: %s", newdisk.Filepath)
}
if !shell.Exists(basefile) {
return fmt.Errorf("basefile %s missing", basefile)
}
if shell.Exists(newfile) {
return fmt.Errorf("disk image already exists: %s", newfile)
}
cmd := []string{"dd", "bs=100M", "status=progress", "oflag=dsync", "if=" + basefile, "of=" + newfile}
result := shell.RunRealtime(cmd)
if result.Exit != 0 {
return fmt.Errorf("dd to %s failed %d\n%s\n%s", newfile, result.Exit, strings.Join(result.Stdout, "\n"), strings.Join(result.Stderr, "\n"))
}
return nil
}
func createNetwork(d *virtpb.Droplet) error {
if d.Networks != nil {
// network already done
return nil
}
if len(d.Networks) > 0 {
// network already done
return nil
}
n := new(virtpb.Network)
n.Mac = getNewMac()
n.Name = "worldbr"
d.Networks = append(d.Networks, n)
return nil
}
func getNewMac() string {
// mac address map to check for duplicates
var macs map[string]string
macs = make(map[string]string)
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
for _, n := range d.Networks {
// log.Println("network:", n.Mac, d.Uuid, d.Hostname)
if _, ok := macs[n.Mac]; ok {
// UUID already exists
log.Info("duplicate MAC", n.Mac, macs[n.Mac])
log.Info("duplicate MAC", n.Mac, d.Hostname)
return ""
}
macs[n.Mac] = d.Hostname
}
}
return generateMAC(macs)
}
func generateMAC(macs map[string]string) string {
prefix := []byte{0x22, 0x22, 0x22}
for {
// Generate last 3 bytes randomly
suffix := make([]byte, 3)
if _, err := rand.Read(suffix); err != nil {
log.Fatalf("Failed to generate random bytes: %v", err)
}
// Format full MAC address
mac := fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x",
prefix[0], prefix[1], prefix[2],
suffix[0], suffix[1], suffix[2])
// Check if MAC is already used
if _, exists := macs[mac]; !exists {
log.Println("Using new MAC:", mac)
return mac
}
log.Println("MAC already defined:", mac)
}
}

205
doGui.go Normal file
View File

@ -0,0 +1,205 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"fmt"
"os"
"strings"
"time"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func debug() {
for {
time.Sleep(90 * time.Second)
// log.Info("TODO: use this?")
}
}
func doGui() {
mainWindow := gadgets.NewGenericWindow("Virtigo: (inventory your cluster)", "Local Cluster Settings")
mainWindow.Custom = func() {
log.Warn("Main window close")
os.Exit(0)
}
drawWindow(mainWindow)
}
func drawWindow(win *gadgets.GenericWindow) {
grid := win.Group.RawGrid()
var newHyperWin *stdHypervisorTableWin
grid.NewButton("show hypervisors", func() {
if newHyperWin != nil {
log.Info("redraw hypervisors")
newHyperWin.doNewStdHypervisors(me.cluster.H)
return
}
log.Info("Hypervisors len=", me.cluster.H.Len())
newHyperWin = newHypervisorsWindow()
newHyperWin.doNewStdHypervisors(me.cluster.H)
newHyperWin.win.Custom = func() {
log.Info("hiding table window")
}
})
var dropWin *gadgets.GenericWindow
grid.NewButton("droplets", func() {
if dropWin != nil {
dropWin.Toggle()
return
}
d := me.cluster.GetDropletsPB()
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := d.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
dropWin, _ = makeDropletsWindow(found)
dropWin.Win.Custom = func() {
log.Info("hiding droplet table window")
}
})
var ewin *stdEventTableWin
grid.NewButton("events", func() {
if ewin != nil {
log.Info("update events here")
e := me.cluster.GetEventsPB()
log.Info("Events len=", e.Len())
ewin.doStdEvents(e)
return
}
ewin = newEventsWindow()
ewin.win.Custom = func() {
log.Info("hiding table window")
}
e := me.cluster.GetEventsPB()
log.Info("Events len=", e.Len())
ewin.doStdEvents(e)
})
grid.NextRow()
grid.NewButton("ConfigSave()", func() {
log.Info("todo: make code for this")
})
var testWin *gadgets.GenericWindow
grid.NewButton("create droplet", func() {
if testWin != nil {
testWin.Toggle()
return
}
d := me.cluster.GetDropletsPB()
testWin, _ = makeDropletsWindow(d)
})
grid.NewButton("uptime", func() {
updateUptimeGui("kuma uptime should update this")
})
grid.NextRow()
grid = win.Middle.RawGrid()
me.status = grid.NewLabel("cur status")
grid.NextRow()
me.lastuptime = grid.NewLabel("last uptime")
grid.NextRow()
}
func updateUptimeGui(uptime string) {
if me.status == nil {
// gui is not initialized
return
}
me.status.SetLabel(uptime)
datestamp := time.Now().Format("2006-01-02 15:04:03")
me.lastuptime.SetLabel("last uptime at " + datestamp)
}
func makeDropletsWindow(pb *virtpb.Droplets) (*gadgets.GenericWindow, *virtpb.DropletsTable) {
win := gadgets.NewGenericWindow("Droplets registered with Virtigo", "Buttons of things")
t := pb.NewTable("testDroptable")
t.NewUuid()
grid := win.Group.RawGrid()
grid.NewButton("Create", func() {
log.Info("todo: open create window here")
})
grid.NewButton("Show All", func() {
log.Info("todo")
})
/*
grid.NewButton("Update", func() {
t.Update()
})
*/
tbox := win.Bottom.Box()
t.SetParent(tbox)
t.AddHostname()
t.AddStringFunc("location", func(d *virtpb.Droplet) string {
return d.Current.Hypervisor
})
t.AddMemory()
t.AddCpus()
t.AddSpicePort()
t.AddTimeFunc("age", func(d *virtpb.Droplet) time.Time {
age := d.Current.OnSince.AsTime()
log.Info("age", d.Hostname, virtpb.FormatDuration(time.Since(age)))
return age
})
t.AddStringFunc("State", func(d *virtpb.Droplet) string {
if d.Current.State == virtpb.DropletState_ON {
return "ON"
}
if d.Current.State == virtpb.DropletState_OFF {
return "OFF"
}
return "UNKNOWN"
})
t.AddStringFunc("mac addr", func(d *virtpb.Droplet) string {
var macs []string
for _, n := range d.Networks {
macs = append(macs, n.Mac)
}
tmp := strings.Join(macs, "\n")
return strings.TrimSpace(tmp)
})
t.ShowTable()
return win, t
}
func makeEventsWindow(pb *virtpb.Events) *gadgets.GenericWindow {
win := gadgets.NewGenericWindow("Cluster Events", "Buttons of things")
grid := win.Group.RawGrid()
grid.NewButton("List", func() {
log.Info("list...")
})
tmp := fmt.Sprintf("num of events = %d", pb.Len())
grid.NewLabel(tmp)
tbox := win.Bottom.Box() // a vertical box (like a stack of books)
t := pb.NewTable("test 2")
t.NewUuid()
t.SetParent(tbox)
t.AddDropletName()
t.AddHypervisor()
t.ShowTable()
return win
}

72
doList.go Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"net/http"
"net/url"
"time"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func doList() {
msg := []byte(`{"message": "Hello"}`)
// Initialize a persistent client with a custom Transport
client = &http.Client{
Transport: &http.Transport{
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
},
Timeout: 10 * time.Second, // Set a reasonable timeout
}
me.cmap = make(map[*virtpb.Cluster]*adminT)
for c := range me.clusters.IterAll() {
var err error
admin := new(adminT)
admin.cluster = new(virtpb.Cluster)
me.cmap[c] = admin
log.Info("found in the config file", c.URL[0])
// a.makeClusterGroup(c)
admin.url, err = url.Parse(c.URL[0])
if err != nil {
badExit(err)
}
// update the droplet list
if data, err := postData(admin.url.String()+"/DropletsPB", msg); err != nil {
log.Info("/DropletsPB Error:", err)
continue
} else {
admin.cluster.Droplets = new(virtpb.Droplets)
if err := admin.cluster.Droplets.Unmarshal(data); err != nil {
log.Printf("DropletsPB Response len:%d\n", len(data))
log.Println("droplets marshal failed", err)
continue
}
}
log.Printf("Cluster Name: %s\n", c.Name)
log.Printf("Number of Droplets: %d\n", admin.cluster.Droplets.Len())
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current == nil {
continue
}
if argv.List.On && (vm.Current.State == virtpb.DropletState_OFF) {
continue
}
found.Append(vm)
log.Info(vm.SprintHeader())
}
log.Println("On Droplet count=", found.Len())
}
}

98
dump.go Normal file
View File

@ -0,0 +1,98 @@
package main
import (
"fmt"
"net/http"
"time"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
/*
debugging code to see the state of the
cluster via http
*/
func dumpCluster(w http.ResponseWriter) {
umap, macs, err := ValidateDroplets()
for u, hostname := range umap {
fmt.Fprintln(w, "uuid:", u, "hostname:", hostname)
}
for mac, uuid := range macs {
fmt.Fprintln(w, "mac:", mac, "uuid", uuid, "hostname:", umap[uuid])
}
if err != nil {
fmt.Fprintln(w, "ValidateDroplets() failed:", err)
}
}
// list running droplets and droplets that should be running
func dumpDroplets(w http.ResponseWriter, full bool) {
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
// this line in golang could replace 80 lines of COBOL
header := d.SprintDumpHeader() + " "
// check if this is a locally defined libvirt domain that needs to be imported
if d.LocalOnly != "" {
header += "(local)"
}
header += d.Hostname
if d.Current.State == virtpb.DropletState_ON {
// everything is as it should be with this vm
fmt.Fprintln(w, header)
continue
}
if d.StartState == virtpb.DropletState_ON {
// this is supposed to be ON and needs to be turned on
fmt.Fprintln(w, header, "(should be on). todo: start() here")
continue
}
if d.LocalOnly != "" {
// this is supposed to be ON and needs to be turned on
fmt.Fprintln(w, header, "this libvirt/domain/xml needs to be imported")
continue
}
if full {
var filenames string
for _, disk := range d.Disks {
filenames += disk.Filename + " "
}
// this needs to be turned on
fmt.Fprintln(w, header, filenames)
}
}
}
// status of the hypervisors
func dumpHypervisors(w http.ResponseWriter) {
var totalDroplets int
var totalUnknownDroplets int
for _, h := range me.hypers {
dur := time.Since(h.lastpoll)
tmp := virtpb.FormatDuration(dur)
fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp)
for name, _ := range h.lastDroplets {
totalDroplets += 1
d := me.cluster.FindDropletByName(name)
header := d.SprintDumpHeader() + " "
if d == nil {
totalUnknownDroplets += 1
}
log.Info("\t", header, d.Hostname)
}
}
if totalUnknownDroplets == 0 {
fmt.Fprintln(w, "\tTotal Droplets", totalDroplets)
} else {
fmt.Fprintln(w, "\tTotal Droplets", totalDroplets, "total libvirt only droplets =", totalUnknownDroplets)
}
}

117
event.go
View File

@ -2,22 +2,18 @@ package main
import ( import (
"fmt" "fmt"
"math/rand"
"time" "time"
"go.wit.com/lib/gui/shell" "go.wit.com/lib/gui/shell"
pb "go.wit.com/lib/protobuf/virtbuf" "go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log" "go.wit.com/log"
) )
func (d *DropletT) Start() { // restarts the virtigod daemon on a hypervisor via http
log.Info("a new virtual machine is running") func (h *HyperT) RestartVirtigod() {
}
func (h *HyperT) RestartDaemon() {
url := "http://" + h.pb.Hostname + ":2520/kill" url := "http://" + h.pb.Hostname + ":2520/kill"
s := shell.Wget(url) s := shell.Wget(url)
log.Info("EVENT RestartDaemon", url, s) log.Info("EVENT RestartVirtigod", url, s)
h.lastpoll = time.Now() h.lastpoll = time.Now()
h.killcount += 1 h.killcount += 1
@ -29,103 +25,34 @@ func (h *HyperT) RestartDaemon() {
me.unstable = time.Now() me.unstable = time.Now()
} }
var stableTimeout time.Duration = 43 * time.Second // this must be bool in string because accumulated output is sometimes
// written to STDOUT, sometimes to http
// checks if the cluster is ready and stable func (h *HyperT) start(d *virtpb.Droplet) (bool, string) {
func clusterReady() (bool, string) { ready, result := me.cluster.DropletReady(d)
last := time.Since(me.unstable)
if last > stableTimeout {
// the cluster has not been stable for 133 seconds
log.Warn("clusterReady() is stable for ", shell.FormatDuration(stableTimeout), " secs")
return true, fmt.Sprintln("clusterReady() is stable ", shell.FormatDuration(stableTimeout), " secs")
}
log.Warn("clusterReady() is unstable for", shell.FormatDuration(last))
return false, "clusterReady() is unstable for " + shell.FormatDuration(last)
}
func (d *DropletT) dropletReady() (bool, string) {
if d.CurrentState == pb.DropletState_ON {
return false, "EVENT start droplet is already ON"
}
if d.starts > 2 {
// reason := "EVENT start droplet has already been started " + d.starts + " times"
return false, fmt.Sprintln("EVENT start droplet has already been started ", d.starts, " times")
}
return true, ""
}
func (h *HyperT) Start(d *DropletT) (bool, string) {
ready, result := clusterReady()
if !ready {
return false, result
}
ready, result = d.dropletReady()
if !ready { if !ready {
return false, result return false, result
} }
url := "http://" + h.pb.Hostname + ":2520/start?start=" + d.pb.Hostname url := "http://" + h.pb.Hostname + ":2520/start?start=" + d.Hostname
s := shell.Wget(url) var msg string
var data []byte
msg = d.FormatJSON()
data = []byte(msg) // Convert the string to []byte
req, err := httpPost(url, data)
if err != nil {
return false, fmt.Sprintln("error:", err)
}
log.Info("http post url:", url)
log.Info("http post data:", msg)
result = "EVENT start droplet url: " + url + "\n" result = "EVENT start droplet url: " + url + "\n"
result += "EVENT start droplet response: " + s.String() result += "EVENT start droplet response: " + string(req)
// increment the counter for a start attempt working // increment the counter for a start attempt working
d.starts += 1 d.Current.StartAttempts += 1
// mark the cluster as unstable so droplet starts can be throttled // mark the cluster as unstable so droplet starts can be throttled
me.unstable = time.Now() me.unstable = time.Now()
return true, result return true, result
} }
func Start(name string) (bool, string) {
var result string
d := findDroplet(name)
if d == nil {
result += "can't start unknown droplet"
return false, result
}
if d.CurrentState == pb.DropletState_ON {
return false, "EVENT start droplet is already ON"
}
dur := time.Since(me.unstable) // how long has the cluster been stable?
result = fmt.Sprintln("should start droplet", name, "here. grid stable for:", shell.FormatDuration(dur))
if dur < 17*time.Second {
result += "grid is still too unstable"
return false, result
}
// make the list of hypervisors that are active and can start new droplets
var pool []*HyperT
for _, h := range me.hypers {
result += fmt.Sprintln("could start droplet on", name, "on", h.pb.Hostname, h.pb.Active)
if d.pb.PreferredHypervisor == h.pb.Hostname {
// the config file says this droplet should run on this hypervisor
a, b := h.Start(d)
return a, result + b
}
if h.pb.Active != true {
continue
}
pool = append(pool, h)
}
// left here as an example of how to actually do random numbers
// it's complete mathematical chaos. Randomness is simple when
// human interaction occurs -- which is exactly what happens most
// of the time. most random shit is bullshit. all you really need
// is exactly this to make sure the random functions work as they
// should. Probably, just use this everywhere in all cases. --jcarr
rand.Seed(time.Now().UnixNano())
a := 0
b := len(pool)
n := a + rand.Intn(b-a)
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
h := pool[n]
startbool, startresult := h.Start(d)
return startbool, result + startresult
}

37
exit.go Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
import (
"os"
"go.wit.com/gui"
"go.wit.com/log"
)
func okExit(note string) {
if note != "" {
log.Info(ARGNAME, "exit:", note, "ok")
}
gui.StandardExit()
os.Exit(0)
}
func badExit(err error) {
log.Info(ARGNAME, "failed: ", err)
gui.StandardExit()
os.Exit(-1)
}
func exit(note string, err error) {
if note != "" {
log.Info(ARGNAME, "exit:", note, "ok")
}
gui.StandardExit()
if err == nil {
os.Exit(0)
}
log.Info(ARGNAME, "failed: ", err)
os.Exit(-1)
}

263
http.go
View File

@ -2,12 +2,13 @@ package main
import ( import (
"fmt" "fmt"
"io/ioutil"
"net/http" "net/http"
"os"
"strings" "strings"
"time"
"go.wit.com/lib/gui/shell" "go.wit.com/lib/protobuf/virtpb"
pb "go.wit.com/lib/protobuf/virtbuf" "go.wit.com/lib/virtigolib"
"go.wit.com/log" "go.wit.com/log"
) )
@ -18,107 +19,199 @@ func cleanURL(url string) string {
} }
func okHandler(w http.ResponseWriter, r *http.Request) { func okHandler(w http.ResponseWriter, r *http.Request) {
var tmp string var route string
tmp = cleanURL(r.URL.Path) route = cleanURL(r.URL.Path)
// log.HttpMode(w)
// defer log.HttpMode(nil)
// is the cluster running what it should? msg, err := ioutil.ReadAll(r.Body) // Read the body as []byte
if tmp == "/droplets" { if err != nil {
for _, d := range me.droplets { log.Info("ReadAll() error =", err)
if d.pb.StartState != pb.DropletState_ON { return
continue }
} if route == "/uptime" {
dur := time.Since(d.lastpoll) // Calculate the elapsed time ok, s := uptimeCheck()
var hname string fmt.Fprintln(w, s)
if d.h == nil { // log.Info(s)
hname = "" updateUptimeGui(s)
} else { if ok {
hname = d.h.pb.Hostname // log.Info("Handling URL:", route, "cluster is ok")
} } else {
if d.CurrentState != pb.DropletState_ON { log.Info("Handling URL:", route, "cluster is not right yet")
fmt.Fprintln(w, "BAD STATE ", d.pb.Hostname, hname, "(", d.pb.StartState, "vs", d.CurrentState, ")", shell.FormatDuration(dur))
} else {
dur := time.Since(d.lastpoll) // Calculate the elapsed time
fmt.Fprintln(w, "GOOD STATE ON", d.pb.Hostname, hname, shell.FormatDuration(dur))
}
} }
return return
} }
if tmp == "/favicon.ico" { if route == "/create" {
var d *virtpb.Droplet
d = new(virtpb.Droplet)
if err := d.Unmarshal(msg); err != nil {
log.Info("proto.Unmarshal() failed on wire message len", len(msg))
log.Info("error =", err)
return
}
log.Info("proto.Unmarshal() worked on msg len", len(msg), "hostname =", d.Hostname)
found := me.cluster.FindDropletByName(d.Hostname)
if found != nil {
log.Info("already have hostname ", d.Hostname)
return
}
log.Info("new hostname ", d.Hostname)
if !me.cluster.AddDroplet(d) {
log.Info("new hostname added ok ", d.Hostname)
} else {
log.Info("hostname add failed for ", d.Hostname)
}
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
os.Exit(-1)
}
log.Info("config file saved")
return
}
if route == "/event" {
var e *virtpb.Event
e = new(virtpb.Event)
if err := e.Unmarshal(msg); err != nil {
log.Info("proto.Unmarshal() failed on wire message len", len(msg))
log.Info("error =", err)
return
}
log.Info("/event proto.Unmarshal() worked on msg len", len(msg), "hostname =", e.DropletUuid)
result := doEvent(e)
data, err := result.Marshal()
if err != nil {
log.Info("/event marshal failed", err, "len(data) =", len(data))
fmt.Fprintln(w, "/event failed", err)
return
}
w.Write(data)
// fmt.Fprintln("droplet marshal failed", err)
return
}
if route == "/import" {
log.Info("virtigo import starts here")
result, err := importDomain(w, r)
if err != nil {
log.Info("virtigo import failed")
log.Info(result)
return
}
log.Info("virtigo import worked")
return
}
// toggle poll logging
if route == "/poll" {
if POLL.Enabled() {
log.Info("POLL is true")
POLL.SetBool(false)
} else {
log.Info("POLL is false")
POLL.SetBool(true)
}
return
}
if route == "/dumpcluster" {
dumpCluster(w)
return
}
if route == "/dumpdroplet" {
me.cluster.DumpDroplet(w, r)
return
}
if route == "/dumpdroplets" {
dumpDroplets(w, false)
return
}
if route == "/DropletsPB" {
pb := me.cluster.GetDropletsPB()
data, err := pb.Marshal()
if err != nil {
log.Info("droplet marshal failed", err)
fmt.Fprintln(w, "droplet marshal failed", err)
return
}
w.Write(data)
// fmt.Fprintln("droplet marshal failed", err)
return
}
if route == "/HypervisorsPB" {
pb := me.cluster.GetHypervisorsPB()
data, err := pb.Marshal()
if err != nil {
log.Info("hypervisors marshal failed", err)
fmt.Fprintln(w, "hypervisors marshal failed", err)
return
}
w.Write(data)
// fmt.Fprintln("droplet marshal failed", err)
return
}
if route == "/EventsPB" {
pb := me.cluster.GetEventsPB()
data, err := pb.Marshal()
if err != nil {
log.Info("events marshal failed", err)
fmt.Fprintln(w, "events marshal failed", err)
return
}
w.Write(data)
// fmt.Fprintln("droplet marshal failed", err)
return
}
if route == "/dumpdropletsfull" {
dumpDroplets(w, true)
return
}
if route == "/dumphypervisors" {
dumpHypervisors(w)
return
}
if route == "/dumplibvirtxml" {
virtigolib.DumpLibvirtxmlDomainNames()
return
}
if route == "/quit" {
log.Warn("writing out config file and exiting virtigo")
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
} else {
os.Exit(-1)
}
return
}
if route == "/favicon.ico" {
// w.Header().Set("Content-Type", "image/svg+xml") // w.Header().Set("Content-Type", "image/svg+xml")
w.Header().Set("Content-Type", "image/png") w.Header().Set("Content-Type", "image/png")
writeFile(w, "ipv6.png") writeFile(w, "ipv6.png")
return return
} }
if tmp == "/goReference.svg" { if route == "/goReference.svg" {
w.Header().Set("Content-Type", "image/svg+xml") w.Header().Set("Content-Type", "image/svg+xml")
writeFile(w, "goReference.svg") writeFile(w, "goReference.svg")
return return
} }
if tmp == "/writeconfig" { log.Warn("BAD URL =", route)
writeConfigFile()
writeConfigFileDroplets()
fmt.Fprintln(w, "OK")
return
}
if tmp == "/dumplibvirtxml" {
dumpLibvirtxmlDomainNames()
return
}
if tmp == "/uptime" {
b, s := clusterHealthy()
if b {
log.Info("Handling URL:", tmp, "cluster is ok", s)
fmt.Fprintln(w, s)
} else {
log.Info("Handling URL:", tmp, "cluster is not right yet", s)
fmt.Fprintln(w, s)
}
for _, h := range me.hypers {
url := "http://" + h.pb.Hostname + ":2520/kill"
dur := time.Since(h.lastpoll) // Calculate the elapsed time
if dur > 90*time.Second {
h.RestartDaemon()
continue
}
if h.killcount != 0 {
log.Info("KILL count =", h.killcount, "FOR", h.pb.Hostname, dur, "curl", url)
}
if h.killcount > 10 {
log.Info("KILL count is greater than 10 for host", h.pb.Hostname, dur, "curl", url)
}
// l := shell.FormatDuration(dur)
// log.Warn("HOST =", h.pb.Hostname, "Last poll =", l)
//if d.pb.StartState != "ON" {
// continue
//}
// dur := time.Since(d.lastpoll) // Calculate the elapsed time
}
return
}
if tmp == "/start" {
start := r.URL.Query().Get("start")
// log.Warn("Handling URL:", tmp, "start droplet", start)
b, result := Start(start)
log.Warn("Start returned =", b, "result =", result)
fmt.Fprintln(w, "Start() returned", b)
fmt.Fprintln(w, "result:", result)
return
}
log.Warn("BAD URL =", tmp)
fmt.Fprintln(w, "BAD URL", tmp)
// badurl(w, r.URL.String())
// fmt.Fprintln(w, "BAD", tmp)
} }
// write a file out to the http socket
func writeFile(w http.ResponseWriter, filename string) { func writeFile(w http.ResponseWriter, filename string) {
// fmt.Fprintln(w, "GOT TEST?")
fullname := "resources/" + filename fullname := "resources/" + filename
pfile, err := resources.ReadFile(fullname) pfile, err := resources.ReadFile(fullname)
if err != nil { if err != nil {

187
importDomain.go Normal file
View File

@ -0,0 +1,187 @@
package main
import (
"errors"
"fmt"
"net/http"
"os"
"time"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/lib/virtigolib"
"go.wit.com/log"
// "libvirt.org/go/libvirt"
"libvirt.org/go/libvirtxml"
)
// attempts to import the *libvirt.Domain directly from the hypervisor
func importDomain(w http.ResponseWriter, r *http.Request) (string, error) {
var result string
domainName := r.URL.Query().Get("domainName")
force := r.URL.Query().Get("force")
if domainName == "" {
result = "importDomain() failed. name is blank " + r.URL.Path
log.Warn(result)
fmt.Fprintln(w, result)
return "", errors.New(result)
}
// a LocalOnly record should already have been created by hypervisor.Poll()
d := me.cluster.FindDropletByName(domainName)
if d == nil {
result = "libvirt domain " + domainName + " could not be found on any hypervisor\n"
log.Info(result)
fmt.Fprintln(w, result)
return result, errors.New(result)
}
// if it's not local only, don't attempt this for now
if d.LocalOnly == "" {
if force == "true" {
result = "LocalOnly is blank. force=true. PROCEEDING WITH DANGER\n"
log.Warn(result)
fmt.Fprint(w, result)
} else {
result = "LocalOnly is blank. SKIP. merge not supported yet. force=" + force
log.Log(WARN, result)
fmt.Fprintln(w, result)
return result, errors.New(result)
}
}
/*
// it probably doesn't really matter what the state it
if d.Current.State != pb.DropletState_OFF {
result := "error: libvirt domain " + name + " is not off"
log.Info(result)
fmt.Fprintln(w, result)
return result, errors.New(result)
}
*/
// get the hypervisor record for what it's worth
h := findHypervisorByName(d.Current.Hypervisor)
if h == nil {
result = "unknown hypervisor = " + d.Current.Hypervisor
log.Log(WARN, result)
fmt.Fprintln(w, result)
return result, errors.New(result)
}
// exports and builds a libvirt.Domain from the hypervisor
domcfg, err := ExportLibvirtDomain(h.pb, domainName)
if err != nil {
result = fmt.Sprint("ExportLibvirtDomain() failed", err)
log.Warn(result)
fmt.Fprintln(w, result)
return "", err
}
// merges and updates the droplet protobuf based on the libvirt XML
events, err := virtigolib.MergelibvirtDomain(d, domcfg)
if err != nil {
result = fmt.Sprint("MerglibvirtDomain() failed for", d.Hostname, err)
log.Warn(result)
fmt.Fprintln(w, result)
return "", errors.New(result)
}
// check what was non-standard and make a note of it. Save it in the protobuf
s, err := virtigolib.DumpNonStandardXML(domcfg)
if err != nil {
result = s + "\n"
result = fmt.Sprintln("DumpNonStandardXML() on", domcfg.Name, "failed for", err)
log.Info(result)
return "", err
}
result += s
// everything worked. add the events
for _, e := range events {
me.cluster.AddEvent(e)
}
result += fmt.Sprintln("importDomain() worked")
// remote LocalOnly flag
d.LocalOnly = ""
// probably be safe and don't let this move around the cluster
d.PreferredHypervisor = d.Current.Hypervisor
log.Log(WARN, result)
fmt.Fprintln(w, result)
log.Warn("Everything worked. Saving config files")
if err := me.cluster.ConfigSave(); err != nil {
log.Warn("configsave error", err)
os.Exit(-1)
}
return result, nil
}
// this must be bool in string because accumulated output is sometimes
// written to STDOUT, sometimes to http
func (h *HyperT) importDomain(d *virtpb.Droplet) (bool, string) {
ready, result := me.cluster.DropletReady(d)
if !ready {
return false, result
}
url := "http://" + h.pb.Hostname + ":2520/import?domain=" + d.Hostname
var msg string
var data []byte
msg = d.FormatJSON()
data = []byte(msg) // Convert the string to []byte
req, err := httpPost(url, data)
if err != nil {
return false, fmt.Sprintln("error:", err)
}
log.Info("http post url:", url)
log.Info("http post data:", msg)
result = "EVENT import droplet url: " + url + "\n"
result += "EVENT import droplet response: " + string(req)
// increment the counter for a start attempt working
d.Current.StartAttempts += 1
// mark the cluster as unstable so droplet starts can be throttled
me.unstable = time.Now()
return true, result
}
func ExportLibvirtDomain(h *virtpb.Hypervisor, domainName string) (*libvirtxml.Domain, error) {
// attempt to get the domain record from virtigo
xml, err := postImportDomain(h.Hostname, domainName)
if err != nil {
log.Warn(err)
return nil, err
}
// convert the xml into a libvirt object
domcfg := &libvirtxml.Domain{}
err = domcfg.Unmarshal(string(xml))
if err != nil {
log.Warn("Unmarshal failed", domainName, err)
return nil, err
}
return domcfg, nil
}
func postImportDomain(hypervisor string, domain string) ([]byte, error) {
url := "http://" + hypervisor + ":2520/import?domain=" + domain
var msg string
var data []byte
msg = "import " + domain
data = []byte(msg) // Convert the string to []byte
req, err := httpPost(url, data)
if err != nil {
return nil, err
}
return req, nil
}

View File

@ -1,900 +0,0 @@
// Copyright 2024 WIT.COM Inc Licensed GPL 3.0
package main
import (
"encoding/xml"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
"go.wit.com/log"
"libvirt.org/go/libvirtxml"
)
func makeStandardXml(d *DropletT) *libvirtxml.Domain {
log.Info("create new xml file for:", d.pb.Hostname)
domcfg := &libvirtxml.Domain{}
addDefaults(domcfg, "standard.x86")
addDefaults(domcfg, "memory")
addDefaults(domcfg, "network")
addDefaults(domcfg, "spice")
addDefaults(domcfg, "qcow")
addDefaults(domcfg, d.pb.Hostname)
return domcfg
}
func writeoutXml(domcfg *libvirtxml.Domain, filename string) bool {
xmldoc, err := domcfg.Marshal()
if err != nil {
fmt.Println("can't make xml file error:\n", err)
return false
}
outfile := "/tmp/" + filename + ".xml"
regfile, _ := os.OpenFile(outfile, os.O_RDWR|os.O_CREATE, 0666)
fmt.Fprintln(regfile, xmldoc)
log.Info("File is in", outfile)
regfile.Close()
return true
}
func setDiskFilename(domcfg *libvirtxml.Domain, filename string) {
for i, x := range domcfg.Devices.Disks {
// Create a new DomainDiskSourceFile struct
newSource := &libvirtxml.DomainDiskSourceFile{
File: filename, // Set the file name here
}
// Assign it to the disk's source
domcfg.Devices.Disks[i].Source.File = newSource
// fmt.Printf("Disk Source %s\n", name)
fmt.Printf("Disk Device %s\n", x.Source.File)
}
}
func addDefaults(d *libvirtxml.Domain, filename string) {
fullname := "resources/xml/" + filename + ".xml"
pfile, err := resources.ReadFile(fullname)
if err != nil {
log.Println("ERROR:", err)
return
}
err = d.Unmarshal(string(pfile))
if err != nil {
log.Info("Marshal failed on file", filename)
return
}
}
func readXml(filename string) (*libvirtxml.Domain, error) {
log.Verbose("parse xml file:", filename)
hostname := filepath.Base(filename)
hostname = strings.TrimSuffix(hostname, ".xml")
pfile, err := os.ReadFile(filename)
if err != nil {
log.Println("ERROR:", err)
return nil, err
}
domcfg := &libvirtxml.Domain{}
err = domcfg.Unmarshal(string(pfile))
if err != nil {
log.Info("Marshal failed on file", filename, err)
return nil, ErrorParseXML
}
if domcfg.Name != hostname {
log.Info("ERROR: filename:", filename)
log.Info("ERROR: domcfg.Name != name", domcfg.Name, hostname)
log.Info("ERROR: xml filenames must match the xml name")
os.Exit(-1)
}
return domcfg, nil
}
func (d *DropletT) mergeXml(filename string) error {
log.Info("merge xml file:", filename)
pfile, err := os.ReadFile(filename)
if err != nil {
log.Println("ERROR:", err)
return ErrorNoFile
}
err = d.xml.Unmarshal(string(pfile))
if err != nil {
log.Info("Marshal failed on file", filename)
return ErrorParseXML
}
return nil
}
func setSimpleDisk(domcfg *libvirtxml.Domain, filename string) {
// Clear out the existing disks (if any)
domcfg.Devices.Disks = nil
// Define a new disk with "mynew.qcow2"
newDisk := libvirtxml.DomainDisk{
Device: "disk",
Driver: &libvirtxml.DomainDiskDriver{
Name: "qemu",
Type: "qcow2",
},
Source: &libvirtxml.DomainDiskSource{
File: &libvirtxml.DomainDiskSourceFile{
File: filename,
},
},
Target: &libvirtxml.DomainDiskTarget{
Dev: "vda",
Bus: "virtio",
},
}
// Add the new disk to the domain configuration
domcfg.Devices.Disks = append(domcfg.Devices.Disks, newDisk)
}
func getMacs(domcfg *libvirtxml.Domain) []string {
var macs []string
// Iterate over the network interfaces and print the MAC addresses
for _, iface := range domcfg.Devices.Interfaces {
if iface.MAC != nil {
// iface.MAC.Address = "aa:bb:aa:bb:aa:ff"
fmt.Printf("MAC Address: %+v\n", iface.MAC)
// fmt.Printf("Interface: %s, MAC Address: %s\n", iface.Target.Dev, iface.MAC.Address)
macs = append(macs, iface.MAC.Address)
} else {
fmt.Printf("Interface: %s, MAC Address: not available\n", iface.Target.Dev)
}
}
return macs
}
// removes all the ethernet interfaces
func clearEthernet(domcfg *libvirtxml.Domain) {
// Clear out the existing disks (if any)
domcfg.Devices.Interfaces = nil
}
// add a new ethernet interface with mac assigned to bridge name
func addEthernet(domcfg *libvirtxml.Domain, mac string, brname string) {
// Define a new disk with "mynew.qcow2"
type DomainInterfaceType string
newNet := libvirtxml.DomainInterface{
MAC: &libvirtxml.DomainInterfaceMAC{
Address: mac,
},
Target: &libvirtxml.DomainInterfaceTarget{
Dev: brname,
},
}
// Add the new disk to the domain configuration
domcfg.Devices.Interfaces = append(domcfg.Devices.Interfaces, newNet)
}
func setRandomMacs(domcfg *libvirtxml.Domain) {
for i, x := range domcfg.Devices.Interfaces {
// Create a new DomainDiskInterfaces struct
newMac := &libvirtxml.DomainInterfaceMAC{
Address: "aa:bb:cc:dd:ee:ff", // make sure this is unique
}
// Assign it to the disk's source
domcfg.Devices.Interfaces[i].MAC = newMac
// fmt.Printf("Disk Source %s\n", name)
// fmt.Printf("mac addr %+v\n", x.MAC)
fmt.Printf("mac addr %s\n", x.MAC.Address)
}
}
// go through the libvirt xml object and dump out everything
// that is "standard". This is just a way to double check that
// there might be something interesting in a VM
// 'standard' here means what I think is standard
func dumpNonStandardXML(domcfg *libvirtxml.Domain) (string, error) {
// dump type
if domcfg.Type == "kvm" {
domcfg.Type = ""
} else {
fmt.Printf("type: %+v\n", domcfg.Type)
}
// dump normal OS settings
var standardOS bool = false
if domcfg.OS != nil {
if domcfg.OS.Type != nil {
// OS Type: &{Arch:x86_64 Machine:pc-i440fx-5.2 Type:hvm}
t := domcfg.OS.Type
if t.Arch == "x86_64" || t.Machine == "pc-i440fx-5.2" {
standardOS = true
}
}
}
if standardOS {
domcfg.OS = nil
} else {
fmt.Printf("OS: %+v\n", domcfg.OS)
fmt.Printf("OS Type: %+v\n", domcfg.OS.Type)
}
// ignore XMLName and IOThreads probably
// skip is hard coded in isDomainEmpty() function
// fmt.Printf("XMLName: %+v\n", domcfg.XMLName)
// fmt.Printf("IOThreads: %+v\n", domcfg.IOThreads)
// dump all the clock stuff if it's standard
var normalclock bool = true
if domcfg.Clock.Offset != "utc" {
normalclock = false
}
for i, t := range domcfg.Clock.Timer {
// fmt.Printf("Test Clock Timer: %d , %s , %+v\n", i, t.Name, t)
switch t.Name {
case "rtc":
if t.TickPolicy != "catchup" {
fmt.Printf("Clock Name: %+v , %+v\n", i, t)
normalclock = false
}
case "pit":
if t.TickPolicy != "delay" {
fmt.Printf("Clock Name: %+v , %+v\n", i, t)
normalclock = false
}
case "hpet":
if t.Present != "no" {
fmt.Printf("Clock Name: %+v , %+v\n", i, t)
normalclock = false
}
default:
fmt.Printf("Clock Name: %+v , %+v\n", i, t)
normalclock = false
}
}
if normalclock {
domcfg.Clock = nil
} else {
fmt.Printf("Clock was 'nonstandard' %+v\n", domcfg.Clock.Timer)
}
// probably just dump Features for now
// fmt.Printf("Features: %+v\n", domcfg.Features)
// fmt.Printf("Feature VMPort: %+v\n", domcfg.Features.VMPort)
// ignore if ACPI is set or not
var featurematch bool = true
if domcfg.Features.ACPI != nil {
domcfg.Features.ACPI = nil
} else {
featurematch = false
}
// ignore if APIC is set or not
if domcfg.Features.APIC != nil {
domcfg.Features.APIC = nil
} else {
featurematch = false
}
// what is VMPort anyway?
if domcfg.Features.VMPort != nil {
if domcfg.Features.VMPort.State == "off" {
domcfg.Features.VMPort = nil
}
} else {
featurematch = false
}
// screwit, if all three of those match just erase
// this. not sure what uses it anyway but it's probably obscure
// and I'm not using it on any of my machines right now
// also, this is dumb that I'm doing this but I want to
// fine tooth comb through this right now
// also, I don't have a boss so nobody can tell me what to do
if featurematch {
domcfg.Features = nil
}
// fmt.Printf("Features: %+v\n", domcfg.Features)
// for i, f := range domcfg.Features {
// fmt.Printf("Feature: %+v , %+v\n", i, f)
// }
// these should always just be strings?
domcfg.Name = ""
domcfg.UUID = ""
// todo: actually check these for anything different
domcfg.Memory = nil
domcfg.CurrentMemory = nil
domcfg.VCPU = nil
// is this always "host-passthrough" and "host-model"?
// only Fabrice knows :)
if domcfg.CPU != nil {
switch domcfg.CPU.Mode {
case "host-passthrough":
domcfg.CPU = nil
case "host-model":
domcfg.CPU = nil
case "custom":
updatedXML, _ := xml.MarshalIndent(domcfg.CPU, "", " ")
log.Info("Ignoring custom CPU Start")
fmt.Println(string(updatedXML))
log.Info("Ignoring custom CPU End (--xml-ignore-cpu=true)")
if argv.IgnoreCpu {
domcfg.CPU = nil
}
default:
fmt.Printf("unknown CPU: %+v\n", domcfg.CPU)
fmt.Printf("unknown CPU Model: %+v\n", domcfg.CPU.Model)
fmt.Printf("unknown CPU Mode: %+v\n", domcfg.CPU.Mode)
updatedXML, _ := xml.MarshalIndent(domcfg.CPU, "", " ")
log.Info("Non-Standard XML Start")
fmt.Println(string(updatedXML))
log.Info("Non-Standard XML End")
}
}
var secnormal bool = true
if len(domcfg.SecLabel) != 0 {
for _, sec := range domcfg.SecLabel {
if sec.Model == "apparmor" {
// this should be configured in dom0
} else {
fmt.Printf("? SecLabel: %+v\n", sec)
fmt.Printf("? SecLabel.Model: %+v\n", sec.Model)
secnormal = false
}
}
}
if secnormal {
domcfg.SecLabel = nil
}
// ignore Metadata
// this is probably something about what kind of OS you might be running
// todo: get this directly from the disk image
if domcfg.Metadata != nil {
fmt.Printf("Not saving Domain.Metadata: %+v\n", domcfg.Metadata)
domcfg.Metadata = nil
}
// ignore Resource
if domcfg.Resource != nil {
if domcfg.Resource.Partition == "/machine" {
domcfg.Resource = nil
} else {
fmt.Printf("non-standard Domain.Resource: %+v\n", domcfg.Resource)
}
}
// this will move elsewhere in the protobuf someday
// ignore all these for now
if domcfg.OnPoweroff != "" { // normally "destroy"
domcfg.OnPoweroff = ""
}
if domcfg.OnCrash != "" { // normally "restart", often "destroy"
domcfg.OnCrash = ""
}
if domcfg.OnReboot != "" { // normally "restart"
domcfg.OnReboot = ""
}
// same with PM. move to protobuf
domcfg.PM = nil
// only keep non-qemu stuff
var qemu bool = true
for _, disk := range domcfg.Devices.Disks {
if disk.Driver.Name != "qemu" {
fmt.Printf("- Disk: %s, Device: %s, Source: %s\n", disk.Device, disk.Driver.Name, disk.Source.File.File)
fmt.Printf("FOUND NON QEMU DISK\n")
fmt.Printf("FOUND NON QEMU DISKS\n")
qemu = false
} else {
}
}
if qemu {
domcfg.Devices.Disks = nil
} else {
// fmt.Printf("FOUND NON QEMU DISKS\n")
}
// network interfaces get processed elsewhere
domcfg.Devices.Interfaces = nil
// look for strange stuff here
var normalPCI bool = true
var keepPCI []libvirtxml.DomainController
for _, controller := range domcfg.Devices.Controllers {
switch controller.Type {
case "usb":
switch controller.Model {
case "ich9-ehci1":
case "piix3-uhci":
case "qemu-xhci":
// fmt.Printf("OK USB: %s, %d\n", controller.Model, *controller.Index)
case "ich9-uhci1":
// fmt.Printf("OK USB: %s, %d\n", controller.Model, *controller.Index)
case "ich9-uhci2":
// fmt.Printf("OK USB: %s, %d\n", controller.Model, *controller.Index)
case "ich9-uhci3":
// fmt.Printf("OK USB: %s, %d\n", controller.Model, *controller.Index)
default:
keepPCI = append(keepPCI, controller)
normalPCI = false
fmt.Printf("USB: %s, %d\n", controller.Model, *controller.Index)
// Domain:0xc0002d2760 Bus:0xc0002d2768 Slot:0xc0002d2770 Function:0xc0002d2778 MultiFunction:
pci := controller.Address.PCI
fmt.Printf("USB: Domain: %+v Slot %d Function %d\n", *pci.Domain, *pci.Slot, *pci.Function)
}
case "ide":
// fmt.Printf("IGNORE IDE\n")
case "virtio-serial":
// fmt.Printf("IGNORE virtio-serial\n")
case "sata":
// fmt.Printf("SATA: %s, %d\n", controller.Model, *controller.Index)
// fmt.Printf("SATA: %+v\n", controller)
case "scsi":
switch controller.Model {
case "virtio-scsi":
// fmt.Printf("IGNORE SCSI: lsilogic\n")
case "lsilogic":
// fmt.Printf("IGNORE SCSI: lsilogic\n")
default:
keepPCI = append(keepPCI, controller)
normalPCI = false
}
case "pci":
// these are the strings I've found so far
switch controller.Model {
case "pci-root":
case "pcie-root":
case "pcie-root-port":
case "pcie-to-pci-bridge":
default:
fmt.Printf("PCI: %s, %d\n", controller.Model, *controller.Index)
// Domain:0xc0002d2760 Bus:0xc0002d2768 Slot:0xc0002d2770 Function:0xc0002d2778 MultiFunction:
if controller.Address == nil {
fmt.Printf("PCI: controller.Address = nil\n")
} else {
pci := controller.Address.PCI
fmt.Printf("PCI: Domain: %+v Slot %d Function %d\n", *pci.Domain, *pci.Slot, *pci.Function)
}
normalPCI = false
keepPCI = append(keepPCI, controller)
}
default:
fmt.Printf("? controllerType: %s: %+v\n", controller.Type, controller)
normalPCI = false
keepPCI = append(keepPCI, controller)
}
}
if normalPCI {
domcfg.Devices.Controllers = nil
} else {
domcfg.Devices.Controllers = keepPCI
}
// ignore serial and console
domcfg.Devices.Serials = nil
domcfg.Devices.Consoles = nil
// ignore sound
domcfg.Devices.Sounds = nil
// ignore input
domcfg.Devices.Inputs = nil
// ignore MemoryBalloon. This is cool, but no mortal humans
// are going to use it at this point. By that I mean me.
// someday this will be in protobuf?
domcfg.Devices.MemBalloon = nil
if domcfg.Devices.Emulator == "/usr/bin/qemu-system-x86_64" {
domcfg.Devices.Emulator = ""
}
// ignore Graphics == Spice when AutoPort = 'yes'
var normalSpice bool = true
if domcfg.Devices.Graphics != nil {
for i, g := range domcfg.Devices.Graphics {
if g.VNC != nil {
// ignore vnc settings
// fmt.Printf("Ignore Graphics VNC settings: %d %+v\n", i, g)
continue
}
if g.Spice != nil {
// this is all moved to updateDroplet()
// this is a spice definition, just ignore it
// because port mappings and network access will be handled
// somewhere else someday
// fmt.Printf("Graphics: %d %+v\n", i, g)
var s *libvirtxml.DomainGraphicSpice
s = g.Spice
// fmt.Printf("Spice: %d %+v %s\n", i, s, s.AutoPort)
if s.AutoPort == "yes" {
// should ignore either way
} else {
// print out, but ignore the port number
// fmt.Printf("Spice Port = %d\n", s.Port)
}
continue
}
// figure out what to do with non-spice stuff
fmt.Printf("Unknown Graphics: %d %+v\n", i, g)
normalSpice = false
}
}
if normalSpice {
domcfg.Devices.Graphics = nil
}
// blank out emulator. should be in dom0
switch domcfg.Devices.Emulator {
case "":
domcfg.Devices.Emulator = ""
case "/usr/bin/kvm":
domcfg.Devices.Emulator = ""
case "/usr/bin/kvm-spice":
domcfg.Devices.Emulator = ""
default:
fmt.Printf("Unknown Emulator: %s\n", domcfg.Devices.Emulator)
}
// ignore Channels == SpiceVMC
normalSpice = true
if domcfg.Devices.Channels != nil {
for _, c := range domcfg.Devices.Channels {
if c.Source != nil {
s := c.Source
if s != nil {
// fmt.Printf("Channels: %+v\n", s.SpiceVMC)
} else {
fmt.Printf("? Channels: %+v\n", c)
normalSpice = false
}
} else {
fmt.Printf("? Channels: %+v\n", c)
normalSpice = false
}
}
}
if normalSpice {
domcfg.Devices.Channels = nil
}
// this is probably for spice to have keyboard and mouse input
normalSpice = true
if domcfg.Devices.RedirDevs != nil {
for _, c := range domcfg.Devices.RedirDevs {
s := c.Source
if s != nil {
if s.SpiceVMC != nil {
// this is the normal USB redirection (I guess)
} else {
normalSpice = false
}
} else {
normalSpice = false
}
// fmt.Printf("? RedirDevs: %+v\n", c)
// fmt.Printf("? RedirDevs Source: %+v\n", s)
// fmt.Printf("? RedirDevs SpiceVMC: %d\n", *s.SpiceVMC)
// fmt.Printf("? RedirDevs Address: %+v\n", c.Address)
// fmt.Printf("? RedirDevs USB: %+v\n", c.Address.USB)
}
}
if normalSpice {
domcfg.Devices.RedirDevs = nil
}
var normalRNGs bool = true
if domcfg.Devices.RNGs != nil {
for _, rng := range domcfg.Devices.RNGs {
if rng.Model == "virtio" {
// nothing to do for this
} else {
fmt.Printf("? RNGs: %+v\n", rng)
normalRNGs = false
}
}
}
if normalRNGs {
domcfg.Devices.RNGs = nil
}
// don't copy this over here yet.
// probably most domU's don't really use/need it set to what is in the XML
var normalVideo bool = true
if domcfg.Devices.Videos != nil {
for _, v := range domcfg.Devices.Videos {
switch v.Model.Type {
case "qxl":
if v.Model.VRam == 65536 {
// standard qxl video
} else {
fmt.Printf("? Video: %+v\n", v)
fmt.Printf("? Video Model: %+v\n", v.Model)
normalVideo = false
}
case "cirrus":
case "virtio":
// this should always be standard
//fmt.Printf("? Video: %+v\n", v)
//fmt.Printf("? Video Model: %+v\n", v.Model)
//fmt.Printf("? Video Address: %+v\n", v.Address)
//fmt.Printf("? Video PCI: %+v\n", v.Address.PCI)
default:
fmt.Printf("? Video: %+v\n", v)
fmt.Printf("? Video Model: %+v\n", v.Model)
normalVideo = false
}
}
}
if normalVideo {
domcfg.Devices.Videos = nil
}
return finalEmptyCheck(domcfg)
}
// this tries the final zero'ing out of the XML
// todo: if this fails, put the remaining XML in the protobuf file?
func finalEmptyCheck(domcfg *libvirtxml.Domain) (string, error) {
// dumpLibvirtxmlDomainNames()
if libvirtxmlDomainDevicesEmpty(*domcfg.Devices) {
// fmt.Println("Domain Devices are empty")
domcfg.Devices = nil
} else {
return warnUserOfNonStandardXML(domcfg)
}
if libvirtxmlDomainEmpty(*domcfg) {
domcfg = nil
return warnUserOfNonStandardXML(domcfg)
}
final, err := warnUserOfNonStandardXML(domcfg)
if err != nil {
fmt.Printf("todo: improve this libvirtXML parsing. %v\n", err)
os.Exit(-1)
}
if final != "" {
me.changed = true
}
return final, nil
}
func warnUserOfNonStandardXML(domcfg *libvirtxml.Domain) (string, error) {
updatedXML, err := xml.MarshalIndent(domcfg, "", " ")
if err != nil {
fmt.Printf("Failed to marshal updated XML: %v\n", err)
return "", err
}
final := string(updatedXML)
if final == "" {
// everything seems to have been parsed pretty standard
return "", nil
}
log.Info("Non-Standard XML Start")
fmt.Println(string(updatedXML))
log.Info("Non-Standard XML End")
log.Info("")
log.Info("This XML must be removed by hand. Put this in the protobuf?")
return string(updatedXML), nil
}
// dump out all the fields in libvirtxml.DomainDeviceList
func dumpLibvirtxmlDomainNames() {
var domain libvirtxml.Domain
t := reflect.TypeOf(domain)
fmt.Println("Fields in libvirtxml.Domain:")
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
fmt.Println("Domain:", field.Name)
}
var device libvirtxml.DomainDeviceList
t = reflect.TypeOf(device)
fmt.Println("Fields in libvirtxml.DomainDeviceList:")
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
fmt.Println("DomainDeviceList:", field.Name)
}
var iface libvirtxml.DomainInterface
t = reflect.TypeOf(iface)
fmt.Println("Fields in libvirtxml.DomainInterface:")
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
fmt.Println("DomainInterface:", field.Name)
}
}
// dump out all the fields in libvirtxml.DomainDeviceList
func libvirtxmlDomainDevicesEmpty(mydom libvirtxml.DomainDeviceList) bool {
var empty bool = true
// Get the reflection object of the variable
v := reflect.ValueOf(mydom)
// Ensure that we are working with a struct
if v.Kind() == reflect.Struct {
// fmt.Println("Fields and values in libvirtxml.DomainDeviceList:")
// Loop through each field in the struct
for i := 0; i < v.NumField(); i++ {
// Get field name
field := v.Type().Field(i).Name
// Get field value
value := v.Field(i)
if !value.IsValid() {
fmt.Printf("Field: %s is nil or invalid\n", field)
continue
}
// Check if the field is a string, array, or slice
switch value.Kind() {
case reflect.String:
if value.String() != "" {
fmt.Printf("Field: %s is a String with value: %s\n", field, value.String())
empty = false
}
case reflect.Slice:
if value.Len() != 0 {
fmt.Printf("Field: %s is a Slice with length: %d\n", field, value.Len())
empty = false
}
case reflect.Array:
if value.Len() != 0 {
fmt.Printf("Field: %s is an Array with length: %d\n", field, value.Len())
empty = false
}
case reflect.Ptr:
if !value.IsValid() {
fmt.Println("Field ptr: value:", value)
fmt.Printf("Field ptr: %s is of type: %s\n", field, value.Kind())
empty = false
}
default:
fmt.Printf("Field: %s is of type: %s\n", field, value.Kind())
empty = false
}
// Print the field name and value
// fmt.Printf("Field: %s, Value: %v\n", field, value)
}
} else {
fmt.Println("Provided variable is not a struct.")
}
return empty
}
// dump out all the fields in libvirtxml.DomainDeviceList
func libvirtxmlDomainEmpty(mydom libvirtxml.Domain) bool {
var empty bool = true
// Get the reflection object of the variable
v := reflect.ValueOf(mydom)
// Ensure that we are working with a struct
if v.Kind() == reflect.Struct {
// fmt.Println("Fields and values in libvirtxml.DomainDeviceList:")
// Loop through each field in the struct
for i := 0; i < v.NumField(); i++ {
// Get field name
field := v.Type().Field(i).Name
// Get field value
value := v.Field(i)
if !value.IsValid() {
fmt.Printf("Field: %s is invalid\n", field)
continue
}
// processed as Domain.Metadata & Domain.Resource
// if (field == "IOThreads") || (field == "XMLName") {
// fmt.Printf("Field: %s is: %s\n", field, value.String())
// continue
// }
// Check if the field is a string, array, or slice
switch value.Kind() {
case reflect.String:
if value.String() != "" {
fmt.Printf("Field: %s is a String with value: %s\n", field, value.String())
empty = false
}
case reflect.Slice:
if value.Len() != 0 {
fmt.Printf("Field: %s is a Slice with length: %d\n", field, value.Len())
empty = false
}
case reflect.Array:
if value.Len() != 0 {
fmt.Printf("Field: %s is an Array with length: %d\n", field, value.Len())
empty = false
}
case reflect.Struct:
if IsStructEmptyOrNil(value) {
fmt.Printf("XML Field ignore empty Struct %s\n", field)
} else {
fmt.Printf("Field Struct is not empty %s is %+v\n", field, value)
empty = false
}
case reflect.Uint:
// probably ignore ints. when has that ever gone wrong?
case reflect.Ptr:
if value.IsValid() {
if value.IsNil() {
// this means the value is actually nil
} else {
// there is something still here in the libvirt XML
fmt.Printf("Field Valid? field %s is of type: %s\n", field, value.Kind())
fmt.Println("Field Valid? ptr: value:", value)
empty = false
}
} else {
fmt.Println("Invalid Field ptr: value:", value)
fmt.Printf("Invalid Field ptr: %s is of type: %s\n", field, value.Kind())
empty = false
}
default:
fmt.Printf("Field: %s is of type: %s\n", field, value.Kind())
empty = false
}
// Print the field name and value
// fmt.Printf("Field: %s, Value: %v\n", field, value)
}
} else {
fmt.Println("Provided variable is not a struct.")
}
return empty
}
// IsStructEmptyOrNil checks if a struct or pointer to struct is empty, blank, or nil
func IsStructEmptyOrNil(value interface{}) bool {
val := reflect.ValueOf(value)
// If the value is a pointer, check if it's nil and dereference it if not
if val.Kind() == reflect.Ptr {
if val.IsNil() {
return true
}
val = val.Elem()
}
// Ensure we're dealing with a struct after potential dereferencing
if val.Kind() != reflect.Struct {
return false // Not a struct
}
// Check each field in the struct for its zero value
for i := 0; i < val.NumField(); i++ {
field := val.Field(i)
// Skip unexported fields as we can't access them
if !field.CanInterface() {
continue
}
if !reflect.DeepEqual(field.Interface(), reflect.Zero(field.Type()).Interface()) {
return false // Found a non-zero field
}
}
return true // All fields are zero values
}

176
main.go
View File

@ -4,133 +4,101 @@ package main
import ( import (
"embed" "embed"
"fmt" "net/url"
"os" "os"
"path/filepath" "path/filepath"
"time"
"github.com/google/uuid" "github.com/google/uuid"
"go.wit.com/dev/alexflint/arg" "go.wit.com/dev/alexflint/arg"
pb "go.wit.com/lib/protobuf/virtbuf" "go.wit.com/lib/gui/prep"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log" "go.wit.com/log"
) )
var Version string // sent via -ldflags
var VERSION string
var BUILDTIME string
var ARGNAME string = "virtigo"
//go:embed resources/* //go:embed resources/*
var resources embed.FS var resources embed.FS
func main() { func main() {
me = new(virtigoT)
prep.Bash(ARGNAME, argv.DoAutoComplete) // this line should be: prep.Bash(argv)
me.myGui = prep.Gui() // prepares the GUI package for go-args
me.pp = arg.MustParse(&argv)
if me.pp == nil {
me.pp.WriteHelp(os.Stdout)
os.Exit(0)
}
if os.Getenv("VIRTIGO_HOME") == "" { if os.Getenv("VIRTIGO_HOME") == "" {
homeDir, _ := os.UserHomeDir() homeDir, _ := os.UserHomeDir()
fullpath := filepath.Join(homeDir, ".config/virtigo") fullpath := filepath.Join(homeDir, ".config/virtigo")
os.Setenv("VIRTIGO_HOME", fullpath) os.Setenv("VIRTIGO_HOME", fullpath)
} }
var pp *arg.Parser
pp = arg.MustParse(&argv)
if pp == nil { me.clusters = virtpb.NewClusters()
pp.WriteHelp(os.Stdout)
os.Exit(0) if argv.List != nil {
err := me.clusters.ConfigLoad()
if err != nil {
badExit(err)
}
doList()
okExit("virtigo list")
}
if argv.Droplet != nil {
exit(doDroplet())
}
me.myGui.Start() // loads the GUI toolkit
if argv.Admin {
err := me.clusters.ConfigLoad()
if err != nil {
badExit(err)
}
doAdminGui()
okExit("admin close")
}
if argv.Server != "" {
log.Info("start admin interface")
admin := new(adminT)
var err error
admin.url, err = url.Parse(argv.Server)
if err != nil {
badExit(err)
}
err = me.clusters.ConfigLoad()
if err != nil {
clusters := virtpb.NewClusters()
c := new(virtpb.Cluster)
c.Uuid = uuid.New().String()
c.URL = append(c.URL, argv.Server)
clusters.Append(c)
virtpb.ConfigWriteTEXT(clusters, "cluster.text")
badExit(err)
}
admin.doAdminGui()
okExit("admin close")
} }
if argv.Daemon { if argv.Daemon {
log.DaemonMode(true) if err := doDaemon(); err != nil {
} badExit(err)
// set defaults
me.unstable = time.Now() // initialize the grid as unstable
me.delay = 5 * time.Second // how often to poll the hypervisors
me.changed = false
me.events = new(pb.Events)
u := uuid.New()
me.events.Uuid = u.String()
me.events.Version = "dirty v1"
cfgfile()
// sanity check the droplets
checkDroplets(false)
for _, filename := range argv.Xml {
domcfg, err := readXml(filename)
if err != nil {
// parsing the libvirt xml file failed
log.Info("error:", filename, err)
log.Info("readXml() error", filename)
log.Info("readXml() error", err)
log.Info("libvirt XML will have to be fixed by hand")
os.Exit(-1)
} }
// this is a new droplet. add it to the cluster okExit("")
log.Info("Add XML Droplet here", domcfg.Name)
_, err = addDomainDroplet(domcfg)
if err != nil {
log.Info("addDomainDroplet() error", filename)
log.Info("addDomainDroplet() error", err)
log.Info("libvirt XML will have to be fixed by hand")
os.Exit(-1)
}
}
if len(argv.Xml) != 0 {
if me.changed {
if argv.Save {
writeConfigFile()
writeConfigFileDroplets()
log.Info("XML changes saved in protobuf config")
os.Exit(0)
} else {
log.Info("Not saving changes (use --save to save)")
os.Exit(0)
}
}
log.Info("No XML changes found")
os.Exit(0)
} }
// start the watchdog polling for each hypervisor doGui() // start making our forge GUI
for _, h := range me.hypers { startHTTP() // sit here forever
log.Info("starting polling on", h.pb.Hostname)
go h.NewWatchdog()
}
// sit here
startHTTP()
}
func makeDroplet(start string) {
d := findDroplet(start)
if d == nil {
log.Info("droplet is unknown:", start)
os.Exit(0)
}
log.Info("start droplet here:", d.pb.Hostname)
domcfg := makeStandardXml(d)
fmt.Printf("Virt type %s\n", domcfg.Type)
fmt.Printf("Virt name %s\n", domcfg.Name)
fmt.Printf("Virt UUID %s\n", domcfg.UUID)
fmt.Printf("Virt Memory %d %s\n", domcfg.Memory.Value, domcfg.Memory.Unit)
// test add some ethernet devices
macs := getMacs(domcfg)
fmt.Printf("Virt mac addr:%s\n", macs)
// clearEthernet(domcfg)
addEthernet(domcfg, "04:44:33:11:22:11", "worldbr")
addEthernet(domcfg, "04:44:33:33:44:55", "greenbr")
// add a check here to make these unique
// setRandomMacs(domcfg)
// print out the final mac addresses
macs = getMacs(domcfg)
fmt.Printf("Virt mac addr:%s\n", macs)
qcow := "/home/nfs/" + d.pb.Hostname + ".qcow2"
setSimpleDisk(domcfg, qcow)
writeoutXml(domcfg, "blahcarr")
os.Exit(0)
} }

84
me.go Normal file
View File

@ -0,0 +1,84 @@
package main
// RFC implementation
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net"
"net/http"
"os/user"
"time"
)
// Function to create a persistent TCP connection
func createPersistentConnection(host string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 30 * time.Second,
}
conn, err := dialer.Dial("tcp", host)
if err != nil {
return nil, fmt.Errorf("failed to establish connection: %w", err)
}
return conn, nil
}
func mesocket() {
host := "example.com:80"
// Establish a persistent TCP connection
conn, err := createPersistentConnection(host)
if err != nil {
fmt.Println("Error creating connection:", err)
return
}
defer conn.Close()
// Custom transport that forces HTTP requests to use our existing connection
transport := &http.Transport{
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
fmt.Println("Reusing existing TCP connection")
return conn, nil
},
DisableKeepAlives: false, // Ensure Keep-Alive is enabled
}
client := &http.Client{
Transport: transport,
Timeout: 10 * time.Second,
}
url := "http://example.com/endpoint"
data := []byte(`{"message": "Hello"}`)
// Create an HTTP request
req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
if err != nil {
fmt.Println("Error creating request:", err)
return
}
usr, _ := user.Current()
req.Header.Set("author", usr.Username)
req.Header.Set("Connection", "keep-alive") // Keep connection alive
// Perform the HTTP request
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error performing request:", err)
return
}
defer resp.Body.Close()
// Read and print the response
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println("Error reading response:", err)
return
}
fmt.Println("Response:", string(body))
}

220
poll.go
View File

@ -6,10 +6,20 @@ import (
"time" "time"
"go.wit.com/lib/gui/shell" "go.wit.com/lib/gui/shell"
pb "go.wit.com/lib/protobuf/virtbuf" "go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log" "go.wit.com/log"
"google.golang.org/protobuf/types/known/timestamppb"
) )
func findHypervisorByName(name string) *HyperT {
for _, h := range me.hypers {
if h.pb.Hostname == name {
return h
}
}
return nil
}
func (h *HyperT) pollHypervisor() { func (h *HyperT) pollHypervisor() {
url := "http://" + h.pb.Hostname + ":2520/vms" url := "http://" + h.pb.Hostname + ":2520/vms"
log.Log(POLL, "wget url =", url) log.Log(POLL, "wget url =", url)
@ -17,6 +27,7 @@ func (h *HyperT) pollHypervisor() {
if s == nil { if s == nil {
return return
} }
var bytesSplice []byte var bytesSplice []byte
bytesSplice = s.Bytes() bytesSplice = s.Bytes()
// fmt.Fprintln(w, string(bytesSplice)) // fmt.Fprintln(w, string(bytesSplice))
@ -26,134 +37,184 @@ func (h *HyperT) pollHypervisor() {
} }
fields := strings.Fields(line) fields := strings.Fields(line)
if len(fields) < 2 { if len(fields) < 2 {
log.Log(WARN, "unknown:", h.pb.Hostname, fields)
continue continue
} }
state := fields[0] state := fields[0]
name := fields[1] name := fields[1]
if state == "ON" { d := me.cluster.FindDropletByName(name)
log.Log(POLL, h.pb.Hostname, "STATE:", state, "HOST:", name, "rest:", fields[2:]) if d == nil {
d := findDroplet(name) log.Log(WARN, name, "local defined domain")
if d == nil { log.Log(WARN, name, "local Adding new entry with AddDropletLocal()")
// this is a new unknown droplet (not in the config file) log.Log(WARN, name, "local Adding new entry with AddDropletLocal()")
d = new(DropletT) log.Log(WARN, name, "local Adding new entry with AddDropletLocal()")
d.pb.Hostname = name me.cluster.AddDropletLocal(name, h.pb.Hostname)
d.h = h continue
d.lastpoll = time.Now() }
d.CurrentState = pb.DropletState_ON start := d.SprintHeader()
me.droplets = append(me.droplets, d) h.lastDroplets[name] = time.Now()
log.Log(EVENT, name, "IS NEW. ADDED ON", h.pb.Hostname) if state == "OFF" {
if d.Current.Hypervisor == "" {
d.Current.Hypervisor = h.pb.Hostname
} }
log.Log(INFO, "ALREADY RECORDED", d.pb.Hostname) if d.LocalOnly == "" {
log.Log(WARN, start, "local domain is a duplicate (need to resolve this)", h.pb.Hostname)
continue
}
log.Log(WARN, start, "local domain ready to import from hypervisor")
continue
}
// update the status to ON and the last polled value if state == "ON" {
d.CurrentState = pb.DropletState_ON log.Log(POLL, start, "STATE:", state, "rest:", fields[2:])
d.lastpoll = time.Now()
if d.h == nil { // update the status to ON
d.SetState(virtpb.DropletState_ON)
// set the LastPoll time to now
now := time.Now()
d.Current.LastPoll = timestamppb.New(now)
if d.Current.Hypervisor == "" {
// this means the droplet was in the config file // this means the droplet was in the config file
// but this is the first time it's shown up as running // but this is the first time it's shown up as running
// this should mean a droplet is running where the config file says it probably should be running // this should mean a droplet is running where the config file says it probably should be running
if d.pb.PreferredHypervisor == h.pb.Hostname { if d.PreferredHypervisor == h.pb.Hostname {
log.Log(EVENT, "new droplet", d.pb.Hostname, "(matches config hypervisor", h.pb.Hostname+")") log.Log(EVENT, start, "poll shows new droplet", d.Hostname,
d.h = h "(matches config hypervisor", h.pb.Hostname+")")
d.Current.Hypervisor = h.pb.Hostname
continue continue
} }
log.Log(EVENT, "new droplet", d.pb.Hostname, "on", h.pb.Hostname, "(in config file without preferred hypervisor)") log.Log(EVENT, start, "poll shows new droplet (in config file without preferred hypervisor)")
d.h = h d.Current.Hypervisor = h.pb.Hostname
continue continue
} }
// this means the droplet is still where it was before // if this is blank, the droplet has probably never booted yet
if d.h.pb.Hostname != h.pb.Hostname { if d.Current.Hypervisor == "" {
log.Log(EVENT, "droplet", d.h.pb.Hostname, "moved to", h.pb.Hostname) d.Current.Hypervisor = h.pb.Hostname
continue continue
} }
d.h = h
// this means the droplet has moved
if d.Current.Hypervisor != h.pb.Hostname {
log.Log(EVENT, "droplet", d.Hostname, "moved to", h.pb.Hostname)
// record the droplet migrated (or booted somewhere else? recording this is a work in progress)
me.cluster.DropletMoved(d, h.pb)
continue
}
d.Current.Hypervisor = h.pb.Hostname
}
}
// these are the droplets that don't exist anymore on this hypervisor
// this should mean you ran shutdown within domU
for name, t := range h.lastDroplets {
dur := time.Since(t)
if dur > me.hyperPollDelay {
d := me.cluster.FindDropletByName(name)
header := d.SprintHeader()
if d == nil {
log.Info(header, "droplet has probably powered down", name, "but findDroplet returned nil")
// should delete this from h.lastDroplets
continue
}
if d.Current.State == virtpb.DropletState_OFF {
log.Info(header, "droplet timed out and is off. remove from h.lastDroplets[] slice")
delete(h.lastDroplets, name)
continue
}
// everthing below here is dumb and needs to be rethought
if d.Current.State != virtpb.DropletState_UNKNOWN {
d.SetState(virtpb.DropletState_UNKNOWN)
log.Info(header, "set state UNKNOWN here", name)
}
if d.Current.State == virtpb.DropletState_UNKNOWN {
if dur > time.Minute*2 {
// what this means is the droplet probably wasn't migrated or the migrate failed
// where should this be checked? the status needs to be changed to OFF
s := virtpb.FormatDuration(dur)
log.Info(header, "UNKNOWN state for more than 2 minutes (clearing out ?)", name, s)
// it might be safe to set the status to OFF here. not really. this poll needs
// to be moved somewhere else. there needs to be a new goroutine not tied to the
// hypervisor
d.SetState(virtpb.DropletState_OFF)
}
}
} }
continue
} }
h.lastpoll = time.Now() h.lastpoll = time.Now()
h.killcount = 0 // poll worked. reset killcount h.killcount = 0 // poll worked. reset killcount
} }
func findDroplet(name string) *DropletT {
for _, d := range me.droplets {
if d.pb.Hostname == name {
return d
}
}
return nil
}
func findHypervisor(name string) *HyperT {
for _, h := range me.hypers {
if h.pb.Hostname == name {
return h
}
}
return nil
}
// check the state of the cluster and return a string // check the state of the cluster and return a string
// that is intended to be sent to an uptime monitor like Kuma // that is intended to be sent to an uptime monitor like Kuma
func clusterHealthy() (bool, string) { func uptimeCheck() (bool, string) {
var good bool = true var good bool = true
var total int var total int
var working int var working int
var failed int var failed int
var missing int var missing []*virtpb.Droplet
var unknown int var unknown int
var unknownList []string var unknownList []string
for _, d := range me.droplets { loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
total += 1 total += 1
if d.pb.StartState != pb.DropletState_ON { if d.StartState != virtpb.DropletState_ON {
continue
}
dur := time.Since(d.lastpoll) // Calculate the elapsed time
if d.CurrentState == pb.DropletState_UNKNOWN {
// log.Info("SKIP. hostname has not been polled yet", d.pb.Hostname, d.hname)
unknown += 1
unknownList = append(unknownList, d.pb.Hostname)
continue continue
} }
dur := time.Since(d.Current.LastPoll.AsTime()) // Calculate the elapsed time
var hname string var hname string
if d.h != nil { if d.Current.Hypervisor != "" {
hname = d.h.pb.Hostname hname = d.Current.Hypervisor
} }
if d.CurrentState != pb.DropletState_ON { switch d.Current.State {
log.Info("BAD STATE", d.pb.StartState, d.pb.Hostname, hname, "CurrentState =", d.CurrentState, shell.FormatDuration(dur)) case virtpb.DropletState_UNKNOWN:
good = false // log.Info("SKIP. hostname has not been polled yet", d.Hostname, d.hname)
failed += 1 unknown += 1
} else { unknownList = append(unknownList, d.Hostname)
dur := time.Since(d.lastpoll) // Calculate the elapsed time case virtpb.DropletState_ON:
if dur > time.Minute { if dur > me.missingDropletTimeout {
log.Info("GOOD STATE MISSING", d.pb.Hostname, hname, shell.FormatDuration(dur)) log.Info("GOOD STATE MISSING", d.Hostname, hname, virtpb.FormatDuration(dur))
good = false good = false
d.CurrentState = pb.DropletState_UNKNOWN d.SetState(virtpb.DropletState_UNKNOWN)
failed += 1 failed += 1
continue continue
} }
l := shell.FormatDuration(dur) l := virtpb.FormatDuration(dur)
if l == "" { if l == "" {
log.Info("DUR IS EMPTY", dur) log.Info("DUR IS EMPTY", dur)
missing += 1 missing = append(missing, d)
continue continue
} }
working += 1 working += 1
// log.Info("GOOD STATE ON", d.pb.Hostname, d.hname, "dur =", l) // log.Info("GOOD STATE ON", d.Hostname, d.hname, "dur =", l)
case virtpb.DropletState_OFF:
log.Info("OFF STATE", d.StartState, d.Hostname, hname, virtpb.FormatDuration(dur))
good = false
failed += 1
// missing = append(missing, d)
default:
log.Info("WTF STATE", d.StartState, d.Hostname, hname, "Current.State =", d.Current.State, virtpb.FormatDuration(dur))
good = false
failed += 1
missing = append(missing, d)
} }
} }
var summary string = "(" var summary string = "("
summary += fmt.Sprintf("total = %d ", total) summary += fmt.Sprintf("total = %d ", total)
summary += fmt.Sprintf("working = %d ", working) summary += fmt.Sprintf("working = %d ", working)
if missing > 0 { if len(missing) > 0 {
summary += fmt.Sprintf("missing = %d ", missing) summary += fmt.Sprintf("missing = %d ", len(missing))
} }
if unknown > 0 { if unknown > 0 {
summary += fmt.Sprintf("unknown = %d ", unknown, unknownList) summary += fmt.Sprintf("unknown = %d %+v", unknown, unknownList)
} }
if failed > 0 { if failed > 0 {
summary += fmt.Sprintf("failed = %d ", failed) summary += fmt.Sprintf("failed = %d ", failed)
@ -164,14 +225,19 @@ func clusterHealthy() (bool, string) {
summary += "(killcount=" + fmt.Sprintf("%d", me.killcount) + ")" summary += "(killcount=" + fmt.Sprintf("%d", me.killcount) + ")"
} }
last := time.Since(me.unstable) last := time.Since(me.unstable)
if last > 133*time.Second { s := strings.TrimSpace(virtpb.FormatDuration(last))
if last > me.clusterStableDuration {
// the cluster has not been stable for 10 seconds // the cluster has not been stable for 10 seconds
s := strings.TrimSpace(shell.FormatDuration(last))
summary += "(stable=" + s + ")" summary += "(stable=" + s + ")"
} else {
summary += "(unstable=" + s + ")"
}
for _, d := range missing {
summary += fmt.Sprint("\nmissing droplet: ", d.Hostname, " current state ", d.Current.State)
} }
if good { if good {
return good, "GOOD=true " + summary return good, "GOOD=true " + summary
} }
me.unstable = time.Now() // me.unstable = time.Now()
return good, "GOOD=false " + summary return good, "GOOD=false " + summary
} }

98
post.go Normal file
View File

@ -0,0 +1,98 @@
package main
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/user"
"go.wit.com/log"
)
func httpPost(url string, data []byte) ([]byte, error) {
var err error
var req *http.Request
req, err = http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
usr, _ := user.Current()
req.Header.Set("author", usr.Username)
hostname, _ := os.Hostname()
req.Header.Set("hostname", hostname)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Error(err)
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err)
return body, err
}
return body, nil
}
func parseURL() (string, string) {
parsedURL, err := url.Parse(argv.Server)
if err != nil {
fmt.Println("Error parsing URL:", err)
return "", ""
}
// Extract Host (includes hostname/IP and port if present)
host := parsedURL.Host
fmt.Println("Host:", host)
// Extract Hostname (without port)
hostname := parsedURL.Hostname()
fmt.Println("Hostname:", hostname)
// Extract Port
port := parsedURL.Port()
fmt.Println("Port:", port)
return parsedURL.Hostname(), parsedURL.Port()
}
func gusPost(port string, dest string) ([]byte, error) {
var err error
var req *http.Request
gus, _ := parseURL()
url := fmt.Sprintf("http://%s:%d/%s?port=%s&dest=%s", gus, 2522, "enable", port, dest)
data := []byte("hello world")
req, err = http.NewRequest(http.MethodPost, url, bytes.NewBuffer(data))
usr, _ := user.Current()
req.Header.Set("author", usr.Username)
hostname, _ := os.Hostname()
req.Header.Set("hostname", hostname)
req.Header.Set("port", port)
req.Header.Set("dest", dest)
log.Printf("gusPust url(%s) port(%s) dest(%s) hostname(%s)\n", url, port, dest, hostname)
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Error(err)
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Error(err)
return body, err
}
return body, nil
}

View File

@ -0,0 +1,5 @@
dirs: "/var/lib/libvirt/images"
dirs: "/home/isos"
dirs: "/home/nfs"
dirs: "/home/ceph"
dirs: "/home"

View File

@ -0,0 +1,44 @@
droplets: {
hostname: "git.wit.org"
cpus: 16
memory: 103079215104
preferred_hypervisor: "farm04"
qemu_machine: "pc-q35-9.0"
networks: {
mac: "22:22:22:22:22:03"
name: ""
}
disks: {
filename: "git.wit.org.qcow2"
filepath: "/home/nfs3"
}
}
droplets: {
hostname: "go.wit.com"
cpus: 2
memory: 2147483648
preferred_hypervisor: "farm04"
qemu_machine: "pc-q35-9.0"
networks: {
mac: "22:22:22:22:22:05"
name: ""
}
disks: {
filename: "go.wit.com.qcow2"
filepath: "/home/nfs"
}
}
droplets: {
hostname: "wekan.foo.com"
cpus: 2
memory: 2147483648
qemu_machine: "pc-q35-9.1"
networks: {
mac: "22:22:22:22:22:08"
name: ""
}
disks: {
filename: "wekan.foo.com.qcow2"
filepath: "/home/nfs"
}
}

View File

@ -0,0 +1,10 @@
events: {
droplet: "www.foo.org"
start: {
seconds: 1729895589
nanos: 425114400
}
field_name: "Droplet.Memory"
orig_val: "1073741824"
new_val: "2147483648"
}

View File

@ -0,0 +1,17 @@
hypervisors: {
uuid: "11111111-2222-3333-4444-555555555555"
hostname: "hyper01"
active: true
cpus: 16
memory: 8796093022208
comment: "this is a fake hypervisor"
autoscan: true
}
hypervisors: {
hostname: "hyper02"
active: true
cpus: 16
memory: 8796093022208
comment: "this is a fake hypervisor"
autoscan: true
}

View File

@ -112,3 +112,21 @@ DomainInterface: ROM
DomainInterface: ACPI DomainInterface: ACPI
DomainInterface: Alias DomainInterface: Alias
DomainInterface: Address DomainInterface: Address
Fields in libvirtxml.DomainInterfaceSource
libvirtxml.DomainInterfaceSource User
libvirtxml.DomainInterfaceSource Ethernet
libvirtxml.DomainInterfaceSource VHostUser
libvirtxml.DomainInterfaceSource Server
libvirtxml.DomainInterfaceSource Client
libvirtxml.DomainInterfaceSource MCast
libvirtxml.DomainInterfaceSource Network
libvirtxml.DomainInterfaceSource Bridge
libvirtxml.DomainInterfaceSource Internal
libvirtxml.DomainInterfaceSource Direct
libvirtxml.DomainInterfaceSource Hostdev
libvirtxml.DomainInterfaceSource UDP
libvirtxml.DomainInterfaceSource VDPA
libvirtxml.DomainInterfaceSource Null
libvirtxml.DomainInterfaceSource VDS
Fields in libvirtxml.DomainInterfaceSourceBridge
libvirtxml.DomainInterfaceSourceBridge Bridge

View File

@ -1,191 +0,0 @@
<domain type='kvm'>
<name>go.wit.com</name>
<uuid>9e795cd7-7142-4757-bef2-f607b4f9944f</uuid>
<metadata>
<libosinfo:libosinfo xmlns:libosinfo="http://libosinfo.org/xmlns/libvirt/domain/1.0">
<libosinfo:os id="http://debian.org/debian/12"/>
</libosinfo:libosinfo>
</metadata>
<memory unit='KiB'>2097152</memory>
<currentMemory unit='KiB'>2097152</currentMemory>
<vcpu placement='static'>2</vcpu>
<resource>
<partition>/machine</partition>
</resource>
<os>
<type arch='x86_64' machine='pc-q35-9.0'>hvm</type>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<vmport state='off'/>
</features>
<cpu mode='host-passthrough' check='none' migratable='on'/>
<clock offset='utc'>
<timer name='rtc' tickpolicy='catchup'/>
<timer name='pit' tickpolicy='delay'/>
<timer name='hpet' present='no'/>
</clock>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>destroy</on_crash>
<pm>
<suspend-to-mem enabled='no'/>
<suspend-to-disk enabled='no'/>
</pm>
<devices>
<emulator>/usr/bin/qemu-system-x86_64</emulator>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' />
<source file='/home/go.wit.com.qcow2'/>
<backingStore/>
<target dev='sda' bus='sata'/>
<address type='drive' controller='0' bus='0' target='0' unit='0'/>
</disk>
<controller type='usb' index='0' model='qemu-xhci' ports='15'>
<address type='pci' domain='0x0000' bus='0x02' slot='0x00' function='0x0'/>
</controller>
<controller type='pci' index='0' model='pcie-root'/>
<controller type='pci' index='1' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='1' port='0x10'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x0' multifunction='on'/>
</controller>
<controller type='pci' index='2' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='2' port='0x11'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x1'/>
</controller>
<controller type='pci' index='3' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='3' port='0x12'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x2'/>
</controller>
<controller type='pci' index='4' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='4' port='0x13'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x3'/>
</controller>
<controller type='pci' index='5' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='5' port='0x14'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x4'/>
</controller>
<controller type='pci' index='6' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='6' port='0x15'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x5'/>
</controller>
<controller type='pci' index='7' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='7' port='0x16'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x6'/>
</controller>
<controller type='pci' index='8' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='8' port='0x17'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x02' function='0x7'/>
</controller>
<controller type='pci' index='9' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='9' port='0x18'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0' multifunction='on'/>
</controller>
<controller type='pci' index='10' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='10' port='0x19'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x1'/>
</controller>
<controller type='pci' index='11' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='11' port='0x1a'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x2'/>
</controller>
<controller type='pci' index='12' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='12' port='0x1b'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x3'/>
</controller>
<controller type='pci' index='13' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='13' port='0x1c'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x4'/>
</controller>
<controller type='pci' index='14' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='14' port='0x1d'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x5'/>
</controller>
<controller type='pci' index='15' model='pcie-root-port'>
<model name='pcie-root-port'/>
<target chassis='15' port='0x1e'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x6'/>
</controller>
<controller type='pci' index='16' model='pcie-to-pci-bridge'>
<model name='pcie-pci-bridge'/>
<address type='pci' domain='0x0000' bus='0x04' slot='0x00' function='0x0'/>
</controller>
<controller type='sata' index='0'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x1f' function='0x2'/>
</controller>
<controller type='virtio-serial' index='0'>
<address type='pci' domain='0x0000' bus='0x03' slot='0x00' function='0x0'/>
</controller>
<controller type='scsi' index='0' model='lsilogic'>
<address type='pci' domain='0x0000' bus='0x10' slot='0x01' function='0x0'/>
</controller>
<interface type='bridge'>
<mac address='22:22:22:22:22:22'/>
<source bridge='worldbr'/>
<model type='virtio'/>
<address type='pci' domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
</interface>
<serial type='pty'>
<target type='isa-serial' port='0'>
<model name='isa-serial'/>
</target>
</serial>
<console type='pty'>
<target type='serial' port='0'/>
</console>
<channel type='unix'>
<target type='virtio' name='org.qemu.guest_agent.0'/>
<address type='virtio-serial' controller='0' bus='0' port='1'/>
</channel>
<channel type='spicevmc'>
<target type='virtio' name='com.redhat.spice.0'/>
<address type='virtio-serial' controller='0' bus='0' port='2'/>
</channel>
<input type='tablet' bus='usb'>
<address type='usb' bus='0' port='1'/>
</input>
<input type='mouse' bus='ps2'/>
<graphics type='spice' autoport='yes'>
<listen type='address'/>
<image compression='off'/>
</graphics>
<sound model='ich9'>
<address type='pci' domain='0x0000' bus='0x00' slot='0x1b' function='0x0'/>
</sound>
<video>
<model type='virtio' heads='1' primary='yes'/>
<address type='pci' domain='0x0000' bus='0x00' slot='0x01' function='0x0'/>
</video>
<redirdev bus='usb' type='spicevmc'>
<address type='usb' bus='0' port='2'/>
</redirdev>
<redirdev bus='usb' type='spicevmc'>
<address type='usb' bus='0' port='3'/>
</redirdev>
<memballoon model='virtio'>
<address type='pci' domain='0x0000' bus='0x05' slot='0x00' function='0x0'/>
</memballoon>
<rng model='virtio'>
<backend model='random'>/dev/urandom</backend>
<address type='pci' domain='0x0000' bus='0x06' slot='0x00' function='0x0'/>
</rng>
</devices>
<seclabel type='dynamic' model='apparmor' relabel='yes'/>
</domain>

View File

@ -1,6 +0,0 @@
<domain type='kvm'>
<name>jcarr</name>
<uuid>c8684902-c405-4f31-b532-128c277056cc</uuid>
<memory unit='GiB'>4</memory>
<vcpu placement='static'>8</vcpu>
</domain>

114
start.go Normal file
View File

@ -0,0 +1,114 @@
package main
// validates the droplet information
// finds a hypervisor
// attempts to start the virtual machine
import (
"errors"
"fmt"
"math/rand"
"time"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func isClusterStable() (string, error) {
// how long has the cluster been stable?
// wait until it is stable. use this to throttle droplet starts
dur := time.Since(me.unstable)
good := fmt.Sprintln("trying to start droplet here. grid stable for: ", virtpb.FormatDuration(dur))
if dur < me.unstableTimeout {
tmp := virtpb.FormatDuration(me.unstableTimeout)
err := "grid is still too unstable (unstable timeout = " + tmp + ")\n"
return good + err, errors.New(err)
}
return good, nil
}
// for now, because sometimes this should write to stdout and
// sometimes to http socket, it returns a string
func Start(id string) (string, error) {
var result string
if s, err := isClusterStable(); err != nil {
result += s
return result, err
}
// lookup the droplet by name
d := me.cluster.FindDropletByUuid(id)
if d == nil {
result = "can't start unknown droplet: " + id
return result, errors.New(result)
}
// validate the droplet
if err := ValidateDroplet(d); err != nil {
log.Info("ValidateDroplet() failed", err)
result = "ValidateDroplet() failed droplet " + d.Hostname
return result, err
}
if d.Current == nil {
d.Current = new(virtpb.Current)
}
// is the droplet already on?
if d.Current.State == virtpb.DropletState_ON {
result = "EVENT start droplet " + d.Hostname + " is already ON"
return result, errors.New(result)
}
// make the list of hypervisors that are active and can start new droplets
var pool []*HyperT
for _, h := range me.hypers {
// this droplet is set to use this and only this hypervisor
if d.ForceHypervisor == h.pb.Hostname {
ok, b := h.start(d)
if ok {
return result + b, nil
}
return result + b, errors.New("start " + d.Hostname + " on hypervisor " + h.pb.Hostname)
}
// skip hypervisors marked inactive
if h.pb.Active != true {
result += fmt.Sprintln("hypervisor is inactive:", d.Hostname, "for", h.pb.Hostname, h.pb.Active)
continue
}
// the config file says this droplet should run on this hypervisor
// attempt to start the droplet here. use this even if the hypervisor is inactive?
if d.PreferredHypervisor == h.pb.Hostname {
ok, b := h.start(d)
if ok {
return result + b, nil
}
return result + b, errors.New("start " + d.Hostname + " on hypervisor " + h.pb.Hostname)
}
result += fmt.Sprintln("hypervisor ready:", d.Hostname, "for", h.pb.Hostname, h.pb.Active)
pool = append(pool, h)
}
// left here as an example of how to actually do random numbers
// it's complete mathematical chaos. Randomness is simple when
// human interaction occurs -- which is exactly what happens most
// of the time. most random shit is bullshit. all you really need
// is exactly this to make sure the random functions work as they
// should. Probably, just use this everywhere in all cases. --jcarr
rand.Seed(time.Now().UnixNano())
a := 0
b := len(pool)
n := a + rand.Intn(b-a)
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
h := pool[n]
ok, output := h.start(d)
if ok {
return result + output, nil
}
return result + output, errors.New("start " + d.Hostname + " on hypervisor " + h.pb.Hostname)
}

View File

@ -1,13 +1,17 @@
package main package main
import ( import (
"net/url"
"time" "time"
pb "go.wit.com/lib/protobuf/virtbuf" "go.wit.com/dev/alexflint/arg"
"libvirt.org/go/libvirtxml" "go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/gui/prep"
"go.wit.com/lib/protobuf/virtpb"
) )
var me virtigoT var me *virtigoT
// disable the GUI // disable the GUI
func (b *virtigoT) Disable() { func (b *virtigoT) Disable() {
@ -21,31 +25,43 @@ func (b *virtigoT) Enable() {
// this app's variables // this app's variables
type virtigoT struct { type virtigoT struct {
cluster *pb.Cluster pp *arg.Parser // go-arg parser
events *pb.Events myGui *prep.GuiPrep // the gui toolkit handle
names []string e *virtpb.Events // virt protobuf events
hypers []*HyperT hmap map[*virtpb.Hypervisor]*HyperT // map to the local struct
droplets []*DropletT names []string // ?
delay time.Duration // how often to poll the hypervisors hypers []*HyperT // notsure
killcount int killcount int // how many times virtigo-d has had to been killed
unstable time.Time // the last time the cluster was incorrect unstable time.Time // the last time the cluster was incorrect
changed bool changed bool // have things changed?
hyperPollDelay time.Duration // how often to poll the hypervisors
unstableTimeout time.Duration // how long a droplet can be unstable until it's declared dead
clusterStableDuration time.Duration // how long the cluster must be stable before new droplets can be started
missingDropletTimeout time.Duration // how long a droplet can be missing for
status *gui.Node // the cluster status
lastuptime *gui.Node // the last time uptime was checked by Kuma
clusters *virtpb.Clusters // clusters protobuf
cmap map[*virtpb.Cluster]*adminT // map to local GUI objects and the protobuf
gwin *gadgets.GenericWindow // main window
cluster *virtpb.OldCluster // basic cluster settings
// admin *adminT // the admin struct
}
// cluster "admin" mode
type adminT struct {
cluster *virtpb.Cluster // the cluster protobuf
uptime *gui.Node // the uptime message
dwin *stdDropletTableWin // the droplet window
hwin *stdHypervisorTableWin // the hypervisor window
ewin *stdEventTableWin // the events window
url *url.URL // URL for the cloud
} }
// the stuff that is needed for a hypervisor // the stuff that is needed for a hypervisor
type HyperT struct { type HyperT struct {
pb *pb.Hypervisor // the Hypervisor protobuf pb *virtpb.Hypervisor // the Hypervisor protobuf
dog *time.Ticker // the watchdog timer itself dog *time.Ticker // the watchdog timer itself
lastpoll time.Time // the last time the hypervisor polled lastpoll time.Time // the last time the hypervisor polled
killcount int lastDroplets map[string]time.Time // the vm's in the last poll
} killcount int // how many times the daemon has been forcably killed
// the stuff that is needed for a hypervisor
type DropletT struct {
pb *pb.Droplet // the Droplet protobuf
xml *libvirtxml.Domain // a xml representation from libvirt
h *HyperT // the hypervisor it's currently running on
CurrentState pb.DropletState // what the state of the droplet is ACTUALLY IS
lastpoll time.Time // the last time the droplet was seen running
starts int // how many times a start event has been attempted
} }

View File

@ -14,16 +14,22 @@ package main
*/ */
import ( import (
"errors"
"os" "os"
"path/filepath"
"strings"
"github.com/google/uuid" "github.com/google/uuid"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log" "go.wit.com/log"
) )
// will make sure the mac address is unique // will make sure the mac address is unique
func checkUniqueMac(mac string) bool { func ValidateUniqueMac(mac string) bool {
for _, d := range me.cluster.Droplets { loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
for _, n := range d.Networks { for _, n := range d.Networks {
if n.Mac == mac { if n.Mac == mac {
log.Info("duplicate MAC", n.Mac, "in droplet", d.Hostname) log.Info("duplicate MAC", n.Mac, "in droplet", d.Hostname)
@ -34,7 +40,133 @@ func checkUniqueMac(mac string) bool {
return true return true
} }
func checkDroplets(dump bool) bool { // records all the known paths. this should go in the protobuf
func addClusterFilepath(dir string) *virtpb.Event {
var found bool = false
var e *virtpb.Event
for _, d := range me.cluster.Dirs {
if d == dir {
// found dir
found = true
break
}
}
if !found {
if dir != "." {
// make a new Add Event
e = virtpb.NewAddEvent(nil, "Add Cluster Directory", dir)
me.cluster.Dirs = append(me.cluster.Dirs, dir)
}
}
return e
}
// returns the droplet using a filename
func lookupFilename(filename string) *virtpb.Droplet {
filebase := filepath.Base(filename)
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
for _, disk := range d.Disks {
if filebase == disk.Filename {
return d
}
}
}
return nil
}
func ValidateUniqueFilenames() bool {
var ok bool = true
var disks map[string]string
disks = make(map[string]string)
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
for _, disk := range d.Disks {
filename := disk.Filename
addClusterFilepath(disk.Filepath)
if _, ok := disks[filename]; ok {
/*
if argv.IgnDisk {
log.Info("ignore dup disk", filename, disks[filename], d.Hostname)
} else {
}
*/
log.Info("file", filename, "on droplet", disks[filename])
log.Info("file", filename, "on droplet", d.Hostname)
log.Info("duplicate disk names (--xml-ignore-disk to ignore)")
ok = false
}
disks[filename] = d.Hostname
}
}
if ok {
log.Println("validated okay: no duplicate disk images")
}
return ok
}
func ValidateDiskFilenames() ([]*virtpb.Event, error) {
var alle []*virtpb.Event
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
var found bool = false
for _, disk := range d.Disks {
filename := disk.Filename
filebase := filepath.Base(filename)
dir := filepath.Dir(filename)
addClusterFilepath(dir)
if disk.Filename != filebase {
// update filename
e := d.NewChangeEvent("Disk.Filename", disk.Filename, filebase)
alle = append(alle, e)
disk.Filename = filebase
}
// make sure the filename is the hostname + .qcow2
filetype := filepath.Ext(filebase)
if filetype == ".img" {
found = true
continue
}
if filetype != ".qcow2" {
log.Info("file type", filetype, "not supported for", filebase, "on", d.Hostname)
return nil, errors.New("only supporting qcow2 images for now")
}
test := strings.TrimSuffix(filebase, filetype)
if test == d.Hostname {
found = true
}
if dir == "." {
continue
}
if dir == "" {
continue
}
if disk.Filepath != dir {
// update filename
e := d.NewChangeEvent("Disk.Filepath", disk.Filepath, dir)
alle = append(alle, e)
disk.Filepath = dir
}
}
if !found {
log.Info("droplet", d.Hostname, d.Disks)
log.Warn("droplet " + d.Hostname + " has nonstandard disk names")
}
}
return alle, nil
}
// consistancy check. run on a regular basis
//
// runs on startup. dies if there are duplicates
// the config file must then be edited by hand for now
func ValidateDroplets() (map[string]string, map[string]string, error) {
// uuid map to check for duplicates // uuid map to check for duplicates
var umap map[string]string var umap map[string]string
umap = make(map[string]string) umap = make(map[string]string)
@ -43,7 +175,9 @@ func checkDroplets(dump bool) bool {
var macs map[string]string var macs map[string]string
macs = make(map[string]string) macs = make(map[string]string)
for _, d := range me.cluster.Droplets { loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
// Generate a new UUID // Generate a new UUID
if d.Uuid == "" { if d.Uuid == "" {
u := uuid.New() u := uuid.New()
@ -55,7 +189,14 @@ func checkDroplets(dump bool) bool {
// UUID already exists // UUID already exists
log.Info("duplicate UUID", d.Uuid, umap[d.Uuid]) log.Info("duplicate UUID", d.Uuid, umap[d.Uuid])
log.Info("duplicate UUID", d.Uuid, d.Hostname) log.Info("duplicate UUID", d.Uuid, d.Hostname)
os.Exit(-1) if d.Archive == nil {
d.Archive = new(virtpb.Archive)
log.Info("d.Archive was nil for", d.Hostname)
// os.Exit(-1)
}
d.Archive.Reason = virtpb.DropletArchive_DUP
continue
// return umap, macs, errors.New("duplicate UUID: " + d.Uuid)
} }
umap[d.Uuid] = d.Hostname umap[d.Uuid] = d.Hostname
@ -65,7 +206,7 @@ func checkDroplets(dump bool) bool {
// UUID already exists // UUID already exists
log.Info("duplicate MAC", n.Mac, macs[n.Mac], umap[macs[n.Mac]]) log.Info("duplicate MAC", n.Mac, macs[n.Mac], umap[macs[n.Mac]])
log.Info("duplicate MAC", n.Mac, d.Hostname) log.Info("duplicate MAC", n.Mac, d.Hostname)
os.Exit(-1) return umap, macs, errors.New("duplicate MAC: " + n.Mac)
} }
macs[n.Mac] = d.Uuid macs[n.Mac] = d.Uuid
} }
@ -73,15 +214,159 @@ func checkDroplets(dump bool) bool {
log.Println("validated okay: no duplicate MAC addr") log.Println("validated okay: no duplicate MAC addr")
log.Println("validated okay: no duplicate UUID") log.Println("validated okay: no duplicate UUID")
if dump { return umap, macs, nil
for u, hostname := range umap { }
log.Println("uuid:", u, "hostname:", hostname)
}
for mac, uuid := range macs { func searchForDuplicateUUIDs() {
log.Println("mac:", mac, "uuid", uuid, "hostname:", umap[uuid]) // var broken int
}
/*
// remove from the slice
func deleteDroplet(bad int) {
var all *virtpb.Droplets
all = me.cluster.DeleteDroplet(b *db.Droplet)
fmt.Println("deleting", bad, all.Droplets[bad].Hostname)
// Check if the index is within bounds
if bad >= 0 && bad < len(all.Droplets) {
// Remove element at targetIndex
all.Droplets = append(all.Droplets[:bad], all.Droplets[bad+1:]...)
}
}
*/
// checks a droplet right before a start event
// verify ethernet mac address
// verify uuid (but probably can ignore this since it's not used)
// check qemu domain id
// check spice and vnc ports
// check filenames
func ValidateDroplet(check *virtpb.Droplet) error {
// check for duplicate uuid's
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
if check == d {
continue
}
if d.Uuid == check.Uuid {
// UUID already exists
log.Info("duplicate UUID", d.Uuid, d.Hostname)
log.Info("duplicate UUID", d.Uuid, check.Hostname)
// d.Archive = new(virtpb.DropletArchive)
if d.Archive == nil {
log.Info("d.Archive == nil")
os.Exit(-1)
}
d.Archive.Reason = virtpb.DropletArchive_DUP
// return errors.New("duplicate UUID: " + d.Uuid)
} }
} }
return false // check for duplicate mac addresses
for _, checkn := range check.Networks {
log.Info("found mac = ", checkn.Mac, check.Hostname)
if checkn.Mac == "" {
checkn.Mac = getNewMac()
if err := me.cluster.ConfigSave(); err != nil {
log.Info("configsave error", err)
os.Exit(-1)
}
}
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
if check == d {
continue
}
for _, n := range d.Networks {
if checkn.Mac == n.Mac {
// MAC already exists
log.Info("duplicate MAC", n.Mac, d.Hostname)
log.Info("duplicate MAC", n.Mac, check.Hostname)
return errors.New("duplicate MAC: " + n.Mac)
}
}
}
}
if err := setUniqueSpicePort(check); err != nil {
return err
}
return nil
}
func setUniqueSpicePort(check *virtpb.Droplet) error {
var ports map[int64]*virtpb.Droplet
ports = make(map[int64]*virtpb.Droplet)
// check spice ports
// checkn.SpicePort = getUniqueSpicePort()
loop := me.cluster.DropletsAll() // get the list of droplets
for loop.Scan() {
d := loop.Next()
if d.SpicePort == 0 {
continue
}
if dup, ok := ports[d.SpicePort]; ok {
// dup := ports[d.SpicePort]
log.Warn("duplicate ports", d.SpicePort, d.Hostname, d.Current.State)
if d.Current.State != virtpb.DropletState_ON {
// hack for now. should be safe to erase this
d.SpicePort = 0
log.Warn("erasing port for non-ON droplet", d.SpicePort, d.Hostname, d.Current.State)
}
log.Warn("duplicate ports", dup.SpicePort, dup.Hostname, dup.Current.State)
if dup.Current.State != virtpb.DropletState_ON {
// hack for now. should be safe to erase this
dup.SpicePort = 0
log.Warn("erasing port for non-ON droplet", dup.SpicePort, dup.Hostname, dup.Current.State)
}
// todo: fix this somewhow
return errors.New("duplicate ports")
}
ports[d.SpicePort] = d
}
for p, d := range ports {
log.Info("found spice port", p, "on", d.Hostname)
}
var start int64
start = 5910
for {
if start == 6000 {
// x11 might use this on dom0's running a desktop
// maybe qemu uses it iternally
start += 1
continue
}
if _, ok := ports[start]; ok {
d := ports[start]
log.Info("already using port", start, "on", d.Hostname)
if d == check {
log.Info("this is good because it's me!", check.Hostname, d.Hostname)
return nil
}
start += 1
continue
}
// generate change port event
log.Info("going to try port", start, "on", check.Hostname)
e := check.NewChangeEvent("SpicePort", check.SpicePort, start)
me.cluster.AddEvent(e)
// set port to start
check.SpicePort = start
// write out config file
if err := me.cluster.ConfigSave(); err != nil {
log.Info("config save error inside here is bad", err)
return err
}
return nil
}
} }

View File

@ -15,7 +15,7 @@ func TimeFunction(f func()) time.Duration {
} }
func (h *HyperT) NewWatchdog() { func (h *HyperT) NewWatchdog() {
h.dog = time.NewTicker(me.delay) h.dog = time.NewTicker(me.hyperPollDelay)
defer h.dog.Stop() defer h.dog.Stop()
done := make(chan bool) done := make(chan bool)
/* /*

33
windowCreate.go Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"go.wit.com/lib/gadgets"
"go.wit.com/log"
)
func createWindow() *gadgets.GenericWindow {
createWindow := gadgets.NewGenericWindow("Create Droplet", "settings")
createWindow.Custom = func() {
log.Warn("create window close")
}
grid := createWindow.Group.RawGrid()
gadgets.NewBasicEntry(grid, "memory")
grid.NextRow()
grid.NewLabel("name")
grid.NewTextbox("something")
grid.NextRow()
grid.NewButton("Start", func() {
log.Info("make a box")
})
return createWindow
}

121
windowDropletCreate.go Normal file
View File

@ -0,0 +1,121 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"fmt"
"strconv"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func (admin *adminT) createDropletWindow() *gadgets.GenericWindow {
d := new(virtpb.Droplet)
win := gadgets.NewGenericWindow("Create Droplet "+d.Hostname, "settings")
win.Custom = func() {
log.Warn("edit window close")
}
grid := win.Group.RawGrid()
var save *gui.Node
grid.NewLabel("name")
name := grid.NewTextbox("new2.wit.com")
d.Hostname = "new2.wit.com"
name.SetText(d.Hostname)
name.Custom = func() {
if d.Hostname == name.String() {
return
}
d.Hostname = name.String()
log.Info("changed droplet name to", d.Hostname)
save.Enable()
}
grid.NextRow()
mem := gadgets.NewBasicEntry(grid, "memory (GB)")
mem.SetText("16")
d.Memory = int64(16 * 1024 * 2024 * 1024)
grid.NextRow()
mem.Custom = func() {
newmem, err := strconv.Atoi(mem.String())
if err != nil {
log.Info("mem value error", mem.String(), err)
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
return
}
if newmem < 1 {
log.Info("mem can not be < 1")
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
return
}
d.Memory = int64(newmem * (1024 * 2024 * 1024))
log.Info("changed mem value. new val =", d.Memory)
save.Enable()
}
grid.NextRow() // each entry is on it's own row
cpus := gadgets.NewBasicEntry(grid, "cpus")
cpus.SetText("4")
d.Cpus = int64(4)
cpus.Custom = func() {
newcpu, err := strconv.Atoi(cpus.String())
if err != nil {
log.Info("cpus value error", cpus.String(), err)
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
return
}
if newcpu < 1 {
log.Info("cpus can not be < 1")
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
return
}
d.Cpus = int64(newcpu)
log.Info("changed cpus value. new val =", d.Cpus)
save.Enable()
}
grid.NextRow() // each entry is on it's own row
/*
save = grid.NewButton("postEvent() EDIT", func() {
log.Info("save droplet changes here")
e := new(virtpb.Event)
e.Etype = virtpb.EventType_EDIT
e.Droplet = d
if err := admin.postEvent(e); err != nil {
log.Info("event edit err", err)
} else {
log.Info("admin.postEvent() worked (?)")
}
})
*/
save = grid.NewButton("Create", func() {
log.Info("save droplet changes here")
e := new(virtpb.Event)
e.Etype = virtpb.EventType_ADD
e.Droplet = d
if err := admin.postEvent(e); err != nil {
log.Info("event edit err", err)
} else {
log.Info("admin.postEvent() worked (?)")
}
})
// save.Disable()
return win
}

125
windowDropletEdit.go Normal file
View File

@ -0,0 +1,125 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
// An app to submit patches for the 30 GO GUI repos
import (
"fmt"
"strconv"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
func (admin *adminT) editDropletWindow(d *virtpb.Droplet) *gadgets.GenericWindow {
win := gadgets.NewGenericWindow("Edit Droplet "+d.Hostname, "settings")
win.Custom = func() {
log.Warn("edit window close")
}
grid := win.Group.RawGrid()
var save *gui.Node
grid.NewLabel("name")
name := grid.NewTextbox("something")
name.SetText(d.Hostname)
name.Custom = func() {
if d.Hostname == name.String() {
return
}
d.Hostname = name.String()
log.Info("changed droplet name to", d.Hostname)
save.Enable()
}
grid.NextRow()
mem := gadgets.NewBasicEntry(grid, "memory (GB)")
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
grid.NextRow()
mem.Custom = func() {
newmem, err := strconv.Atoi(mem.String())
if err != nil {
log.Info("mem value error", mem.String(), err)
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
return
}
if newmem < 1 {
log.Info("mem can not be < 1")
mem.SetText(fmt.Sprintf("%d", d.Memory/(1024*1024*1024)))
return
}
d.Memory = int64(newmem * (1024 * 2024 * 1024))
log.Info("changed mem value. new val =", d.Memory)
save.Enable()
}
cpus := gadgets.NewBasicEntry(grid, "cpus")
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
grid.NextRow()
cpus.Custom = func() {
newcpu, err := strconv.Atoi(cpus.String())
if err != nil {
log.Info("cpus value error", cpus.String(), err)
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
return
}
if newcpu < 1 {
log.Info("cpus can not be < 1")
cpus.SetText(fmt.Sprintf("%d", d.Cpus))
return
}
d.Cpus = int64(newcpu)
log.Info("changed cpus value. new val =", d.Cpus)
save.Enable()
}
grid.NewLabel("hypervisor")
hyper := grid.NewDropdown()
hyper.AddText("farm03")
hyper.AddText("farm04")
hyper.AddText("farm05")
if d.Current != nil {
hyper.SetText(d.Current.Hypervisor)
} else {
hyper.SetText("farm03")
}
grid.NextRow()
grid.NewButton("Start", func() {
log.Info("make a box")
})
save = grid.NewButton("save", func() {
log.Info("save droplet changes here")
e := new(virtpb.Event)
e.Etype = virtpb.EventType_EDIT
e.Droplet = d
/*
e.Droplet = new(virtpb.Droplet)
e.Droplet.Uuid = d.Uuid
e.Droplet.Cpus = 4
e.Droplet.Memory = 8 * (1024 * 1024 * 1024)
e.Droplet.Hostname = name.String()
*/
if err := admin.postEvent(e); err != nil {
log.Info("event edit err", err)
}
})
save.Disable()
grid.NewButton("dump", func() {
t := d.FormatTEXT()
log.Info(t)
})
return win
}

227
windowDroplets.go Normal file
View File

@ -0,0 +1,227 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
import (
"fmt"
"strings"
"sync"
"time"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
type stdDropletTableWin struct {
sync.Mutex
win *gadgets.GenericWindow // the machines gui window
box *gui.Node // the machines gui parent box widget
pb *virtpb.Droplets // the droplets protobuf
TB *virtpb.DropletsTable // the gui table buffer
update bool // if the window should be updated
Close func() // this function is called when the window is closed
admin *adminT
}
func (w *stdDropletTableWin) Toggle() {
if w == nil {
return
}
if w.win == nil {
return
}
w.win.Toggle()
}
func newDropletsWindow(admin *adminT) *stdDropletTableWin {
dwin := new(stdDropletTableWin)
dwin.admin = admin
dwin.win = gadgets.NewGenericWindow("virtigo current droplets", "Options")
dwin.win.Custom = func() {
log.Info("test delete window here")
}
grid := dwin.win.Group.RawGrid()
grid.NewButton("Active", func() {
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State != virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
dwin.doActiveDroplets(found)
})
grid.NewButton("Inactive", func() {
var found *virtpb.Droplets
found = virtpb.NewDroplets()
all := admin.cluster.Droplets.All()
for all.Scan() {
vm := all.Next()
if vm.Current.State == virtpb.DropletState_ON {
continue
}
found.Append(vm)
}
dwin.doInactiveDroplets(found)
})
grid.NewButton("Create", func() {
log.Info("create droplet here")
admin.createDropletWindow()
})
// make a box at the bottom of the window for the protobuf table
dwin.box = dwin.win.Bottom.Box().SetProgName("TBOX")
return dwin
}
// default window for active running droplets
func (dw *stdDropletTableWin) doInactiveDroplets(pb *virtpb.Droplets) {
dw.Lock()
defer dw.Unlock()
// erase the old table
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
// init the table
dw.pb = pb
t := dw.pb.NewTable("DropletsPB Off")
t.NewUuid()
t.SetParent(dw.box)
dropedit := t.AddButtonFunc("Edit", func(d *virtpb.Droplet) string {
return "edit"
})
dropedit.Custom = func(d *virtpb.Droplet) {
log.Info("edit droplet here", d.Hostname)
dw.admin.editDropletWindow(d)
}
dropon := t.AddButtonFunc("Start", func(d *virtpb.Droplet) string {
return "poweron"
})
dropon.Custom = func(d *virtpb.Droplet) {
log.Info("start droplet here", d.Hostname)
log.Info("should start droplet here")
log.Info(d.SprintHeader())
e := new(virtpb.Event)
e.Etype = virtpb.EventType_POWERON
e.DropletUuid = d.Uuid
if err := dw.admin.postEvent(e); err != nil {
log.Info("droplet start err", err)
}
}
vp := t.AddButtonFunc("Verify Config", func(p *virtpb.Droplet) string {
return p.Hostname
})
vp.Custom = func(d *virtpb.Droplet) {
log.Info("open config window", d.Hostname)
}
t.AddMemory()
t.AddCpus()
// final setup and display the table
dw.TB = t
f := func(e *virtpb.Droplet) {
log.Info("Triggered. do something here", e.Hostname)
// m.Enabled = true
}
dw.TB.Custom(f)
dw.TB.ShowTable()
}
// default window for active running droplets
func (dw *stdDropletTableWin) doActiveDroplets(pb *virtpb.Droplets) {
dw.Lock()
defer dw.Unlock()
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
dw.pb = pb
t := dw.pb.NewTable("DropletsPB On")
t.NewUuid()
t.SetParent(dw.box)
serial := t.AddButtonFunc("serial", func(p *virtpb.Droplet) string {
return "ttyS0"
})
serial.Custom = func(d *virtpb.Droplet) {
log.Printf("run %s: socat telnet somewhere %s:%d\n", d.Hostname, argv.Server, d.SpicePort)
log.Info("socat TCP-LISTEN:5000,reuseaddr,fork EXEC:\"virsh console myvm\"")
}
fb := t.AddButtonFunc("fb0 console", func(p *virtpb.Droplet) string {
return "remmina"
})
fb.Custom = func(d *virtpb.Droplet) {
log.Printf("connect to %s on %s: remmina spice://%s:%d\n", d.Hostname, d.Current.Hypervisor, argv.Server, 10000+d.SpicePort)
data, err := gusPost(fmt.Sprintf("%d", 10000+d.SpicePort), d.Current.Hypervisor)
log.Info("data", string(data), "err =", err)
}
// t.AddHostname()
vp := t.AddButtonFunc("Hostname", func(p *virtpb.Droplet) string {
return p.Hostname
})
vp.Custom = func(d *virtpb.Droplet) {
log.Info("edit droplet here", d.Hostname)
dw.admin.editDropletWindow(d)
}
t.AddStringFunc("location", func(d *virtpb.Droplet) string {
return d.Current.Hypervisor
})
t.AddMemory()
t.AddCpus()
t.AddSpicePort()
t.AddTimeFunc("age", func(d *virtpb.Droplet) time.Time {
age := d.Current.OnSince.AsTime()
// log.Info("age", d.Hostname, virtpb.FormatDuration(time.Since(age)))
return age
})
t.AddStringFunc("State", func(d *virtpb.Droplet) string {
if d.Current.State == virtpb.DropletState_ON {
return "ON"
}
if d.Current.State == virtpb.DropletState_OFF {
return "OFF"
}
return "UNKNOWN"
})
t.AddStringFunc("mac addr", func(d *virtpb.Droplet) string {
var macs []string
for _, n := range d.Networks {
macs = append(macs, n.Mac)
}
tmp := strings.Join(macs, "\n")
return strings.TrimSpace(tmp)
})
t.ShowTable()
// display the protobuf
dw.TB = t
f := func(e *virtpb.Droplet) {
log.Info("Triggered. do something here", e.Hostname)
// m.Enabled = true
}
dw.TB.Custom(f)
}

77
windowEvents.go Normal file
View File

@ -0,0 +1,77 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
import (
"sync"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
type stdEventTableWin struct {
sync.Mutex
win *gadgets.GenericWindow // the machines gui window
box *gui.Node // the machines gui parent box widget
pb *virtpb.Events // the protobuf
TB *virtpb.EventsTable // the gui table buffer
update bool // if the window should be updated
}
func (w *stdEventTableWin) Toggle() {
if w == nil {
return
}
if w.win == nil {
return
}
w.win.Toggle()
}
func newEventsWindow() *stdEventTableWin {
dwin := new(stdEventTableWin)
dwin.win = gadgets.NewGenericWindow("virtigo current events", "things to do")
dwin.win.Custom = func() {
log.Info("test delete window here")
}
// make a box at the bottom of the window for the protobuf table
dwin.box = dwin.win.Bottom.Box().SetProgName("TBOX")
return dwin
}
// default table protobuf window
func (dw *stdEventTableWin) doStdEvents(pb *virtpb.Events) {
dw.Lock()
defer dw.Unlock()
// erase the old table
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
// init the table
dw.pb = pb
t := dw.pb.NewTable("EventsPB Off")
t.NewUuid()
t.SetParent(dw.box)
// pick the columns
t.AddDropletName()
t.AddDropletUuid()
t.AddHypervisor()
// display the protobuf
dw.TB = t
f := func(e *virtpb.Event) {
log.Info("std EventWindow() something here", e.Droplet)
// m.Enabled = true
}
dw.TB.Custom(f)
dw.TB.ShowTable()
}

163
windowHypervisors.go Normal file
View File

@ -0,0 +1,163 @@
// Copyright 2017-2025 WIT.COM Inc. All rights reserved.
// Use of this source code is governed by the GPL 3.0
package main
import (
"fmt"
"sync"
"time"
"go.wit.com/gui"
"go.wit.com/lib/gadgets"
"go.wit.com/lib/protobuf/virtpb"
"go.wit.com/log"
)
type stdHypervisorTableWin struct {
sync.Mutex
win *gadgets.GenericWindow // the machines gui window
box *gui.Node // the machines gui parent box widget
pb *virtpb.Hypervisors // the protobuf
TB *virtpb.HypervisorsTable // the gui table buffer
update bool // if the window should be updated
}
func (w *stdHypervisorTableWin) Toggle() {
if w == nil {
return
}
if w.win == nil {
return
}
w.win.Toggle()
}
func newHypervisorsWindow() *stdHypervisorTableWin {
dwin := new(stdHypervisorTableWin)
dwin.win = gadgets.NewGenericWindow("virtigo current hypervisors", "things to do")
dwin.win.Custom = func() {
log.Info("test delete window here")
}
// make a box at the bottom of the window for the protobuf table
dwin.box = dwin.win.Bottom.Box().SetProgName("TBOX")
return dwin
}
// default table protobuf window
func (dw *stdHypervisorTableWin) doStdHypervisors(pb *virtpb.Hypervisors) {
dw.Lock()
defer dw.Unlock()
// erase the old table
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
// init the table
dw.pb = pb
t := dw.pb.NewTable("HypervisorsPB Off")
t.NewUuid()
t.SetParent(dw.box)
// pick the columns
t.AddHostname()
t.AddMemory()
t.AddCpus()
t.AddKillcount()
t.AddTimeFunc("last poll", func(h *virtpb.Hypervisor) time.Time {
// hm := me.hmap[h]
// tmp := hm.lastpoll
// log.Info("poll age", h.Hostname, virtpb.FormatDuration(time.Since(tmp)))
return time.Now()
})
t.AddStringFunc("droplets", func(h *virtpb.Hypervisor) string {
/*
var totalDroplets int
var totalUnknownDroplets int
// dur := time.Since(h.lastpoll)
// tmp := virtpb.FormatDuration(dur)
// fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp)
hm := me.hmap[h]
for name, _ := range hm.lastDroplets {
totalDroplets += 1
d := me.cluster.FindDropletByName(name)
if d == nil {
totalUnknownDroplets += 1
}
}
log.Printf("Total Droplets %d total libvirt only droplets = %d\n", totalDroplets, totalUnknownDroplets)
return fmt.Sprintf("%d", totalDroplets)
*/
return "todo"
})
// display the protobuf
dw.TB = t
f := func(e *virtpb.Hypervisor) {
log.Info("std HypervisorWindow() something here", e.Hostname)
// m.Enabled = true
}
dw.TB.Custom(f)
dw.TB.ShowTable()
}
// default table protobuf window
func (dw *stdHypervisorTableWin) doNewStdHypervisors(pb *virtpb.Hypervisors) {
dw.Lock()
defer dw.Unlock()
// erase the old table
if dw.TB != nil {
dw.TB.Delete()
dw.TB = nil
}
// init the table
dw.pb = pb
t := dw.pb.NewTable("HypervisorsPB Off")
t.NewUuid()
t.SetParent(dw.box)
// pick the columns
t.AddHostname()
t.AddMemory()
t.AddCpus()
t.AddKillcount()
t.AddTimeFunc("last poll", func(h *virtpb.Hypervisor) time.Time {
// hm := me.hmap[h]
// tmp := hm.lastpoll
// log.Info("poll age", h.Hostname, virtpb.FormatDuration(time.Since(tmp)))
return time.Now()
})
t.AddStringFunc("droplets", func(h *virtpb.Hypervisor) string {
var totalDroplets int
var totalUnknownDroplets int
// dur := time.Since(h.lastpoll)
// tmp := virtpb.FormatDuration(dur)
// fmt.Fprintln(w, h.pb.Hostname, "killcount =", h.killcount, "lastpoll:", tmp)
hm := me.hmap[h]
for name, _ := range hm.lastDroplets {
totalDroplets += 1
d := me.cluster.FindDropletByName(name)
if d == nil {
totalUnknownDroplets += 1
}
}
// log.Printf("Total Droplets %d total libvirt only droplets = %d\n", totalDroplets, totalUnknownDroplets)
return fmt.Sprintf("%d", totalDroplets)
})
// display the protobuf
dw.TB = t
f := func(e *virtpb.Hypervisor) {
log.Info("std HypervisorWindow() something here", e.Hostname)
// m.Enabled = true
}
dw.TB.Custom(f)
dw.TB.ShowTable()
}