parent
913b18737b
commit
3c520003ed
17
argv.go
17
argv.go
|
@ -11,15 +11,16 @@ import "go.wit.com/log"
|
||||||
var argv args
|
var argv args
|
||||||
|
|
||||||
type args struct {
|
type args struct {
|
||||||
Xml []string `arg:"--libvirt" help:"import qemu xml files: --libvirt /etc/libvirt/qemu/*.xml"`
|
Xml []string `arg:"--libvirt" help:"import qemu xml files: --libvirt /etc/libvirt/qemu/*.xml"`
|
||||||
IgnoreCpu bool `arg:"--xml-ignore-cpu" default:"true" help:"ignore non-standard libvirt xml cpus"`
|
Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"`
|
||||||
IgnoreBr bool `arg:"--xml-ignore-net" default:"true" help:"ignore network bridge name changes"`
|
Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
|
||||||
IgnDisk bool `arg:"--xml-ignore-disk" default:"false" help:"ignore duplicate disk names"`
|
|
||||||
Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"`
|
|
||||||
Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
|
|
||||||
Daemon bool `arg:"--daemon" help:"run in daemon mode"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Daemon bool `arg:"--daemon" help:"run in daemon mode"`
|
||||||
|
// IgnoreCpu bool `arg:"--xml-ignore-cpu" default:"true" help:"ignore non-standard libvirt xml cpus"`
|
||||||
|
// IgnoreBr bool `arg:"--xml-ignore-net" default:"true" help:"ignore network bridge name changes"`
|
||||||
|
// IgnDisk bool `arg:"--xml-ignore-disk" default:"false" help:"ignore duplicate disk names"`
|
||||||
|
|
||||||
// Save bool `arg:"--save" default:"false" help:"save protobuf config after import"`
|
// Save bool `arg:"--save" default:"false" help:"save protobuf config after import"`
|
||||||
// Start string `arg:"--start" help:"start a droplet"`
|
// Start string `arg:"--start" help:"start a droplet"`
|
||||||
// Uptime bool `arg:"--uptime" default:"true" help:"allow uptime checks for things like Kuma"`
|
// Uptime bool `arg:"--uptime" default:"true" help:"allow uptime checks for things like Kuma"`
|
||||||
|
@ -35,7 +36,7 @@ This app talks to your hypervisors via the virtigod daemon.
|
||||||
|
|
||||||
Import your existing libvirt xml files with:
|
Import your existing libvirt xml files with:
|
||||||
|
|
||||||
virtigo --libvirt /etc/libvirt/qemu/*.xml --save
|
virtigo --libvirt /etc/libvirt/qemu/*.xml
|
||||||
|
|
||||||
This runs a http server so you can control your virtual machines.
|
This runs a http server so you can control your virtual machines.
|
||||||
For example to start a vm called 'www.wit.com' your cluster 'foo.bar.com':
|
For example to start a vm called 'www.wit.com' your cluster 'foo.bar.com':
|
||||||
|
|
|
@ -71,7 +71,7 @@ func create(w http.ResponseWriter, r *http.Request) (string, error) {
|
||||||
d.Networks = append(d.Networks, newNet)
|
d.Networks = append(d.Networks, newNet)
|
||||||
// d.AddDefaultNetwork(mac)
|
// d.AddDefaultNetwork(mac)
|
||||||
}
|
}
|
||||||
me.cluster.Droplets = append(me.cluster.Droplets, d)
|
me.cluster.AddDroplet(d)
|
||||||
|
|
||||||
result, err := startDroplet(d)
|
result, err := startDroplet(d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -158,11 +158,6 @@ func startDroplet(d *pb.Droplet) (string, error) {
|
||||||
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
|
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
|
||||||
h := pool[n]
|
h := pool[n]
|
||||||
|
|
||||||
// update/resend the search directories to the hypervisor
|
|
||||||
result += fmt.Sprintln("h.sendDirs() HERE")
|
|
||||||
result += fmt.Sprintln("h.sendDirs() HERE")
|
|
||||||
h.sendDirs()
|
|
||||||
|
|
||||||
ok, output := h.start(d)
|
ok, output := h.start(d)
|
||||||
if ok {
|
if ok {
|
||||||
return result + output, nil
|
return result + output, nil
|
||||||
|
|
8
dump.go
8
dump.go
|
@ -16,7 +16,7 @@ import (
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func dumpCluster(w http.ResponseWriter) {
|
func dumpCluster(w http.ResponseWriter) {
|
||||||
umap, macs, err := ValidateDroplets(me.cluster)
|
umap, macs, err := ValidateDroplets()
|
||||||
for u, hostname := range umap {
|
for u, hostname := range umap {
|
||||||
fmt.Fprintln(w, "uuid:", u, "hostname:", hostname)
|
fmt.Fprintln(w, "uuid:", u, "hostname:", hostname)
|
||||||
}
|
}
|
||||||
|
@ -31,7 +31,11 @@ func dumpCluster(w http.ResponseWriter) {
|
||||||
|
|
||||||
// list running droplets and droplets that should be running
|
// list running droplets and droplets that should be running
|
||||||
func dumpDroplets(w http.ResponseWriter, full bool) {
|
func dumpDroplets(w http.ResponseWriter, full bool) {
|
||||||
for _, d := range me.cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
|
fmt.Println(w, "Droplet UUID:", d.Uuid)
|
||||||
|
|
||||||
var macs []string
|
var macs []string
|
||||||
for _, n := range d.Networks {
|
for _, n := range d.Networks {
|
||||||
macs = append(macs, n.Mac)
|
macs = append(macs, n.Mac)
|
||||||
|
|
33
main.go
33
main.go
|
@ -4,6 +4,7 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"embed"
|
"embed"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
@ -33,9 +34,9 @@ func main() {
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
if argv.Daemon {
|
// if argv.Daemon {
|
||||||
log.DaemonMode(true)
|
// log.DaemonMode(true)
|
||||||
}
|
// }
|
||||||
|
|
||||||
// set defaults
|
// set defaults
|
||||||
me.unstable = time.Now() // initialize the grid as unstable
|
me.unstable = time.Now() // initialize the grid as unstable
|
||||||
|
@ -52,20 +53,25 @@ func main() {
|
||||||
// how long the cluster must be stable before new droplets can be started
|
// how long the cluster must be stable before new droplets can be started
|
||||||
me.clusterStableDuration = 37 * time.Second
|
me.clusterStableDuration = 37 * time.Second
|
||||||
|
|
||||||
// read in the config file
|
me.cluster = pb.InitCluster()
|
||||||
me.cluster = new(pb.Cluster)
|
|
||||||
me.cluster.E = new(pb.Events)
|
|
||||||
if err := me.cluster.ConfigLoad(); err != nil {
|
if err := me.cluster.ConfigLoad(); err != nil {
|
||||||
log.Info("config load error", err)
|
log.Info("config load error", err)
|
||||||
os.Exit(-1)
|
os.Exit(-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, d := range me.cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
|
if d == nil {
|
||||||
|
fmt.Println("d == nil")
|
||||||
|
os.Exit(-1)
|
||||||
|
}
|
||||||
|
fmt.Println("Droplet UUID:", d.Uuid)
|
||||||
if d.Current == nil {
|
if d.Current == nil {
|
||||||
d.Current = new(pb.Current)
|
d.Current = new(pb.Current)
|
||||||
}
|
}
|
||||||
d.Current.State = pb.DropletState_OFF
|
d.Current.State = pb.DropletState_OFF
|
||||||
log.Info(i, "droplet", d.Hostname)
|
log.Info("droplet", d.Hostname)
|
||||||
}
|
}
|
||||||
hmm := "pihole.wit.com"
|
hmm := "pihole.wit.com"
|
||||||
d := me.cluster.FindDropletByName(hmm)
|
d := me.cluster.FindDropletByName(hmm)
|
||||||
|
@ -78,11 +84,11 @@ func main() {
|
||||||
var newEvents []*pb.Event
|
var newEvents []*pb.Event
|
||||||
|
|
||||||
// sanity check the cluster & droplets
|
// sanity check the cluster & droplets
|
||||||
if _, _, err := ValidateDroplets(me.cluster); err != nil {
|
if _, _, err := ValidateDroplets(); err != nil {
|
||||||
log.Info("todo: add flag to ignore. for now, fix problems in the config file.")
|
log.Info("todo: add flag to ignore. for now, fix problems in the config file.")
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
newe, err := ValidateDiskFilenames(me.cluster)
|
newe, err := ValidateDiskFilenames()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Info(err)
|
log.Info(err)
|
||||||
os.Exit(-1)
|
os.Exit(-1)
|
||||||
|
@ -91,7 +97,7 @@ func main() {
|
||||||
for _, e := range newe {
|
for _, e := range newe {
|
||||||
newEvents = append(newEvents, e)
|
newEvents = append(newEvents, e)
|
||||||
}
|
}
|
||||||
ValidateUniqueFilenames(me.cluster)
|
ValidateUniqueFilenames()
|
||||||
|
|
||||||
for _, filename := range argv.Xml {
|
for _, filename := range argv.Xml {
|
||||||
domcfg, err := virtigolib.ReadXml(filename)
|
domcfg, err := virtigolib.ReadXml(filename)
|
||||||
|
@ -138,7 +144,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize each hypervisor
|
// initialize each hypervisor
|
||||||
for _, pbh := range me.cluster.Hypervisors {
|
for _, pbh := range me.cluster.H.Hypervisors {
|
||||||
// this is a new unknown droplet (not in the config file)
|
// this is a new unknown droplet (not in the config file)
|
||||||
var h *HyperT
|
var h *HyperT
|
||||||
h = new(HyperT)
|
h = new(HyperT)
|
||||||
|
@ -155,9 +161,6 @@ func main() {
|
||||||
for _, h := range me.hypers {
|
for _, h := range me.hypers {
|
||||||
log.Info("starting polling on", h.pb.Hostname)
|
log.Info("starting polling on", h.pb.Hostname)
|
||||||
|
|
||||||
// inititialize the search directories on each hypervisor
|
|
||||||
h.sendDirs()
|
|
||||||
|
|
||||||
// start a watchdog on each hypervisor
|
// start a watchdog on each hypervisor
|
||||||
go h.NewWatchdog()
|
go h.NewWatchdog()
|
||||||
}
|
}
|
||||||
|
|
4
poll.go
4
poll.go
|
@ -139,7 +139,9 @@ func uptimeCheck() (bool, string) {
|
||||||
var unknown int
|
var unknown int
|
||||||
var unknownList []string
|
var unknownList []string
|
||||||
|
|
||||||
for _, d := range me.cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
total += 1
|
total += 1
|
||||||
if d.StartState != pb.DropletState_ON {
|
if d.StartState != pb.DropletState_ON {
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
dirs: "/var/lib/libvirt/images"
|
||||||
|
dirs: "/home/isos"
|
||||||
|
dirs: "/home/nfs"
|
||||||
|
dirs: "/home/ceph"
|
||||||
|
dirs: "/home"
|
|
@ -0,0 +1,44 @@
|
||||||
|
droplets: {
|
||||||
|
hostname: "git.wit.org"
|
||||||
|
cpus: 16
|
||||||
|
memory: 103079215104
|
||||||
|
preferred_hypervisor: "farm04"
|
||||||
|
qemu_machine: "pc-q35-9.0"
|
||||||
|
networks: {
|
||||||
|
mac: "22:22:22:22:22:03"
|
||||||
|
name: ""
|
||||||
|
}
|
||||||
|
disks: {
|
||||||
|
filename: "git.wit.org.qcow2"
|
||||||
|
filepath: "/home/nfs3"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
droplets: {
|
||||||
|
hostname: "go.wit.com"
|
||||||
|
cpus: 2
|
||||||
|
memory: 2147483648
|
||||||
|
preferred_hypervisor: "farm04"
|
||||||
|
qemu_machine: "pc-q35-9.0"
|
||||||
|
networks: {
|
||||||
|
mac: "22:22:22:22:22:05"
|
||||||
|
name: ""
|
||||||
|
}
|
||||||
|
disks: {
|
||||||
|
filename: "go.wit.com.qcow2"
|
||||||
|
filepath: "/home/nfs"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
droplets: {
|
||||||
|
hostname: "wekan.foo.com"
|
||||||
|
cpus: 2
|
||||||
|
memory: 2147483648
|
||||||
|
qemu_machine: "pc-q35-9.1"
|
||||||
|
networks: {
|
||||||
|
mac: "22:22:22:22:22:08"
|
||||||
|
name: ""
|
||||||
|
}
|
||||||
|
disks: {
|
||||||
|
filename: "wekan.foo.com.qcow2"
|
||||||
|
filepath: "/home/nfs"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
events: {
|
||||||
|
droplet: "www.foo.org"
|
||||||
|
start: {
|
||||||
|
seconds: 1729895589
|
||||||
|
nanos: 425114400
|
||||||
|
}
|
||||||
|
field_name: "Droplet.Memory"
|
||||||
|
orig_val: "1073741824"
|
||||||
|
new_val: "2147483648"
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
hypervisors: {
|
||||||
|
uuid: "11111111-2222-3333-4444-555555555555"
|
||||||
|
hostname: "hyper01"
|
||||||
|
active: true
|
||||||
|
cpus: 16
|
||||||
|
memory: 8796093022208
|
||||||
|
comment: "this is a fake hypervisor"
|
||||||
|
autoscan: true
|
||||||
|
}
|
||||||
|
hypervisors: {
|
||||||
|
hostname: "hyper02"
|
||||||
|
active: true
|
||||||
|
cpus: 16
|
||||||
|
memory: 8796093022208
|
||||||
|
comment: "this is a fake hypervisor"
|
||||||
|
autoscan: true
|
||||||
|
}
|
5
start.go
5
start.go
|
@ -101,11 +101,6 @@ func Start(name string) (string, error) {
|
||||||
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
|
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
|
||||||
h := pool[n]
|
h := pool[n]
|
||||||
|
|
||||||
// update/resend the search directories to the hypervisor
|
|
||||||
result += fmt.Sprintln("h.sendDirs() HERE")
|
|
||||||
result += fmt.Sprintln("h.sendDirs() HERE")
|
|
||||||
h.sendDirs()
|
|
||||||
|
|
||||||
ok, output := h.start(d)
|
ok, output := h.start(d)
|
||||||
if ok {
|
if ok {
|
||||||
return result + output, nil
|
return result + output, nil
|
||||||
|
|
|
@ -20,7 +20,8 @@ func (b *virtigoT) Enable() {
|
||||||
|
|
||||||
// this app's variables
|
// this app's variables
|
||||||
type virtigoT struct {
|
type virtigoT struct {
|
||||||
cluster *pb.Cluster // basic cluster settings
|
cluster *pb.NewCluster // basic cluster settings
|
||||||
|
// newc *pb.NewCluster // basic cluster settings
|
||||||
e *pb.Events // virtbuf events
|
e *pb.Events // virtbuf events
|
||||||
hmap map[*pb.Hypervisor]*HyperT // map to the local struct
|
hmap map[*pb.Hypervisor]*HyperT // map to the local struct
|
||||||
names []string
|
names []string
|
||||||
|
|
85
validate.go
85
validate.go
|
@ -16,6 +16,7 @@ package main
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -26,8 +27,10 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// will make sure the mac address is unique
|
// will make sure the mac address is unique
|
||||||
func ValidateUniqueMac(cluster *pb.Cluster, mac string) bool {
|
func ValidateUniqueMac(mac string) bool {
|
||||||
for _, d := range cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
for _, n := range d.Networks {
|
for _, n := range d.Networks {
|
||||||
if n.Mac == mac {
|
if n.Mac == mac {
|
||||||
log.Info("duplicate MAC", n.Mac, "in droplet", d.Hostname)
|
log.Info("duplicate MAC", n.Mac, "in droplet", d.Hostname)
|
||||||
|
@ -39,10 +42,10 @@ func ValidateUniqueMac(cluster *pb.Cluster, mac string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// records all the known paths. this should go in the protobuf
|
// records all the known paths. this should go in the protobuf
|
||||||
func addClusterFilepath(cluster *pb.Cluster, dir string) *pb.Event {
|
func addClusterFilepath(dir string) *pb.Event {
|
||||||
var found bool = false
|
var found bool = false
|
||||||
var e *pb.Event
|
var e *pb.Event
|
||||||
for _, d := range cluster.Dirs {
|
for _, d := range me.cluster.Dirs {
|
||||||
if d == dir {
|
if d == dir {
|
||||||
// found dir
|
// found dir
|
||||||
found = true
|
found = true
|
||||||
|
@ -53,17 +56,19 @@ func addClusterFilepath(cluster *pb.Cluster, dir string) *pb.Event {
|
||||||
if dir != "." {
|
if dir != "." {
|
||||||
// make a new Add Event
|
// make a new Add Event
|
||||||
e = pb.NewAddEvent(nil, "Add Cluster Directory", dir)
|
e = pb.NewAddEvent(nil, "Add Cluster Directory", dir)
|
||||||
cluster.Dirs = append(cluster.Dirs, dir)
|
me.cluster.Dirs = append(me.cluster.Dirs, dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns the droplet using a filename
|
// returns the droplet using a filename
|
||||||
func lookupFilename(cluster *pb.Cluster, filename string) *pb.Droplet {
|
func lookupFilename(filename string) *pb.Droplet {
|
||||||
filebase := filepath.Base(filename)
|
filebase := filepath.Base(filename)
|
||||||
|
|
||||||
for _, d := range cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
for _, disk := range d.Disks {
|
for _, disk := range d.Disks {
|
||||||
if filebase == disk.Filename {
|
if filebase == disk.Filename {
|
||||||
return d
|
return d
|
||||||
|
@ -73,15 +78,17 @@ func lookupFilename(cluster *pb.Cluster, filename string) *pb.Droplet {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ValidateUniqueFilenames(cluster *pb.Cluster) bool {
|
func ValidateUniqueFilenames() bool {
|
||||||
var ok bool = true
|
var ok bool = true
|
||||||
var disks map[string]string
|
var disks map[string]string
|
||||||
disks = make(map[string]string)
|
disks = make(map[string]string)
|
||||||
|
|
||||||
for _, d := range cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
for _, disk := range d.Disks {
|
for _, disk := range d.Disks {
|
||||||
filename := disk.Filename
|
filename := disk.Filename
|
||||||
addClusterFilepath(cluster, disk.Filepath)
|
addClusterFilepath(disk.Filepath)
|
||||||
if _, ok := disks[filename]; ok {
|
if _, ok := disks[filename]; ok {
|
||||||
/*
|
/*
|
||||||
if argv.IgnDisk {
|
if argv.IgnDisk {
|
||||||
|
@ -103,16 +110,18 @@ func ValidateUniqueFilenames(cluster *pb.Cluster) bool {
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func ValidateDiskFilenames(cluster *pb.Cluster) ([]*pb.Event, error) {
|
func ValidateDiskFilenames() ([]*pb.Event, error) {
|
||||||
var alle []*pb.Event
|
var alle []*pb.Event
|
||||||
|
|
||||||
for _, d := range cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
var found bool = false
|
var found bool = false
|
||||||
for _, disk := range d.Disks {
|
for _, disk := range d.Disks {
|
||||||
filename := disk.Filename
|
filename := disk.Filename
|
||||||
filebase := filepath.Base(filename)
|
filebase := filepath.Base(filename)
|
||||||
dir := filepath.Dir(filename)
|
dir := filepath.Dir(filename)
|
||||||
addClusterFilepath(cluster, dir)
|
addClusterFilepath(dir)
|
||||||
if disk.Filename != filebase {
|
if disk.Filename != filebase {
|
||||||
// update filename
|
// update filename
|
||||||
e := d.NewChangeEvent("Disk.Filename", disk.Filename, filebase)
|
e := d.NewChangeEvent("Disk.Filename", disk.Filename, filebase)
|
||||||
|
@ -159,7 +168,9 @@ func getNewMac() string {
|
||||||
var macs map[string]string
|
var macs map[string]string
|
||||||
macs = make(map[string]string)
|
macs = make(map[string]string)
|
||||||
|
|
||||||
for _, d := range me.cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
for _, n := range d.Networks {
|
for _, n := range d.Networks {
|
||||||
// log.Println("network:", n.Mac, d.Uuid, d.Hostname)
|
// log.Println("network:", n.Mac, d.Uuid, d.Hostname)
|
||||||
if _, ok := macs[n.Mac]; ok {
|
if _, ok := macs[n.Mac]; ok {
|
||||||
|
@ -189,7 +200,7 @@ func getNewMac() string {
|
||||||
|
|
||||||
// runs on startup. dies if there are duplicates
|
// runs on startup. dies if there are duplicates
|
||||||
// the config file must then be edited by hand
|
// the config file must then be edited by hand
|
||||||
func ValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string, error) {
|
func ValidateDroplets() (map[string]string, map[string]string, error) {
|
||||||
// uuid map to check for duplicates
|
// uuid map to check for duplicates
|
||||||
var umap map[string]string
|
var umap map[string]string
|
||||||
umap = make(map[string]string)
|
umap = make(map[string]string)
|
||||||
|
@ -198,7 +209,9 @@ func ValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string
|
||||||
var macs map[string]string
|
var macs map[string]string
|
||||||
macs = make(map[string]string)
|
macs = make(map[string]string)
|
||||||
|
|
||||||
for _, d := range cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
// Generate a new UUID
|
// Generate a new UUID
|
||||||
if d.Uuid == "" {
|
if d.Uuid == "" {
|
||||||
u := uuid.New()
|
u := uuid.New()
|
||||||
|
@ -231,6 +244,26 @@ func ValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string
|
||||||
return umap, macs, nil
|
return umap, macs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func searchForDuplicateUUIDs() {
|
||||||
|
// var broken int
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
// remove from the slice
|
||||||
|
func deleteDroplet(bad int) {
|
||||||
|
var all *pb.Droplets
|
||||||
|
all = me.cluster.DeleteDroplet(b *db.Droplet)
|
||||||
|
|
||||||
|
fmt.Println("deleting", bad, all.Droplets[bad].Hostname)
|
||||||
|
|
||||||
|
// Check if the index is within bounds
|
||||||
|
if bad >= 0 && bad < len(all.Droplets) {
|
||||||
|
// Remove element at targetIndex
|
||||||
|
all.Droplets = append(all.Droplets[:bad], all.Droplets[bad+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// checks a droplet right before a start event
|
// checks a droplet right before a start event
|
||||||
// verify ethernet mac address
|
// verify ethernet mac address
|
||||||
// verify uuid (but probably can ignore this since it's not used)
|
// verify uuid (but probably can ignore this since it's not used)
|
||||||
|
@ -239,7 +272,9 @@ func ValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string
|
||||||
// check filenames
|
// check filenames
|
||||||
func ValidateDroplet(check *pb.Droplet) error {
|
func ValidateDroplet(check *pb.Droplet) error {
|
||||||
// check for duplicate uuid's
|
// check for duplicate uuid's
|
||||||
for _, d := range me.cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
if check == d {
|
if check == d {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -247,13 +282,21 @@ func ValidateDroplet(check *pb.Droplet) error {
|
||||||
// UUID already exists
|
// UUID already exists
|
||||||
log.Info("duplicate UUID", d.Uuid, d.Hostname)
|
log.Info("duplicate UUID", d.Uuid, d.Hostname)
|
||||||
log.Info("duplicate UUID", d.Uuid, check.Hostname)
|
log.Info("duplicate UUID", d.Uuid, check.Hostname)
|
||||||
|
// d.Archive = new(pb.DropletArchive)
|
||||||
|
if d.Archive == nil {
|
||||||
|
log.Info("d.Archive == nil")
|
||||||
|
os.Exit(-1)
|
||||||
|
}
|
||||||
|
d.Archive.Reason = pb.DropletArchive_DUP
|
||||||
return errors.New("duplicate UUID: " + d.Uuid)
|
return errors.New("duplicate UUID: " + d.Uuid)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for duplicate mac addresses
|
// check for duplicate mac addresses
|
||||||
for _, checkn := range check.Networks {
|
for _, checkn := range check.Networks {
|
||||||
for _, d := range me.cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
if check == d {
|
if check == d {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -280,7 +323,9 @@ func setUniqueSpicePort(check *pb.Droplet) error {
|
||||||
|
|
||||||
// check spice ports
|
// check spice ports
|
||||||
// checkn.SpicePort = getUniqueSpicePort()
|
// checkn.SpicePort = getUniqueSpicePort()
|
||||||
for _, d := range me.cluster.Droplets {
|
loop := me.cluster.DropletsAll() // get the list of droplets
|
||||||
|
for loop.Scan() {
|
||||||
|
d := loop.Droplet()
|
||||||
if d.SpicePort == 0 {
|
if d.SpicePort == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -330,7 +375,7 @@ func setUniqueSpicePort(check *pb.Droplet) error {
|
||||||
// generate change port event
|
// generate change port event
|
||||||
log.Info("going to try port", start, "on", check.Hostname)
|
log.Info("going to try port", start, "on", check.Hostname)
|
||||||
e := check.NewChangeEvent("SpicePort", check.SpicePort, start)
|
e := check.NewChangeEvent("SpicePort", check.SpicePort, start)
|
||||||
me.cluster.E.Events = append(me.cluster.E.Events, e)
|
me.cluster.AddEvent(e)
|
||||||
|
|
||||||
// set port to start
|
// set port to start
|
||||||
check.SpicePort = start
|
check.SpicePort = start
|
||||||
|
|
24
watchdog.go
24
watchdog.go
|
@ -4,7 +4,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
pb "go.wit.com/lib/protobuf/virtbuf"
|
|
||||||
"go.wit.com/log"
|
"go.wit.com/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -15,29 +14,6 @@ func TimeFunction(f func()) time.Duration {
|
||||||
return time.Since(startTime) // Calculate the elapsed time
|
return time.Since(startTime) // Calculate the elapsed time
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *HyperT) sendDirs() {
|
|
||||||
url := "http://" + h.pb.Hostname + ":2520/cluster"
|
|
||||||
var msg string
|
|
||||||
var data []byte
|
|
||||||
|
|
||||||
var c *pb.Cluster
|
|
||||||
c = new(pb.Cluster)
|
|
||||||
for _, dir := range me.cluster.Dirs {
|
|
||||||
c.Dirs = append(c.Dirs, dir)
|
|
||||||
}
|
|
||||||
msg = c.FormatJSON()
|
|
||||||
data = []byte(msg) // Convert the string to []byte
|
|
||||||
req, err := httpPost(url, data)
|
|
||||||
if err != nil {
|
|
||||||
log.Info("error:", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// log.Info("http post url:", url)
|
|
||||||
// log.Info("http post data:", msg)
|
|
||||||
|
|
||||||
log.Info("EVENT start droplet response: " + string(req))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h *HyperT) NewWatchdog() {
|
func (h *HyperT) NewWatchdog() {
|
||||||
h.dog = time.NewTicker(me.hyperPollDelay)
|
h.dog = time.NewTicker(me.hyperPollDelay)
|
||||||
defer h.dog.Stop()
|
defer h.dog.Stop()
|
||||||
|
|
Loading…
Reference in New Issue